aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg185
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped567
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1723
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg124
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped875
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped1165
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y10
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/atari_dma_emul.c468
-rw-r--r--drivers/scsi/atari_scsi.c27
-rw-r--r--drivers/scsi/ch.c6
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c10
-rw-r--r--drivers/scsi/dpt_i2o.c6
-rw-r--r--drivers/scsi/esp_scsi.h3
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c260
-rw-r--r--drivers/scsi/ipr.c20
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c44
-rw-r--r--drivers/scsi/libiscsi.c202
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/lpfc/lpfc.h96
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1375
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h51
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c400
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1712
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h183
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c924
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c624
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h163
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c514
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1715
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c168
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/megaraid.c11
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c18
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/osst.c3
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c58
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c37
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c194
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c47
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c46
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h16
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c353
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/qlogicpti.c146
-rw-r--r--drivers/scsi/qlogicpti.h2
-rw-r--r--drivers/scsi/scsi.c111
-rw-r--r--drivers/scsi/scsi_error.c163
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/scsi_lib.c245
-rw-r--r--drivers/scsi/scsi_netlink.c520
-rw-r--r--drivers/scsi/scsi_priv.h8
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c21
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c107
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c23
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/sd.c193
-rw-r--r--drivers/scsi/sd.h21
-rw-r--r--drivers/scsi/sd_dif.c42
-rw-r--r--drivers/scsi/sg.c680
-rw-r--r--drivers/scsi/sr.c47
-rw-r--r--drivers/scsi/sr_vendor.c12
-rw-r--r--drivers/scsi/st.c18
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/sun_esp.c267
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
117 files changed, 10501 insertions, 7555 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b92c19bb6876..5311317c2e4c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1924,12 +1924,9 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
1924 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1924 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1925 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1925 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1926 if (scsi_sg_count(cmd) == 1) { 1926 if (scsi_sg_count(cmd) == 1) {
1927 unsigned long flags;
1928 void *buf = tw_dev->generic_buffer_virt[request_id]; 1927 void *buf = tw_dev->generic_buffer_virt[request_id];
1929 1928
1930 local_irq_save(flags);
1931 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); 1929 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1932 local_irq_restore(flags);
1933 } 1930 }
1934 } 1931 }
1935} /* End twa_scsiop_execute_scsi_complete() */ 1932} /* End twa_scsiop_execute_scsi_complete() */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a0537f09aa21..c03f1d2c9e2e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1466,12 +1466,7 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
1466static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id, 1466static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1467 void *data, unsigned int len) 1467 void *data, unsigned int len)
1468{ 1468{
1469 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1469 scsi_sg_copy_from_buffer(tw_dev->srb[request_id], data, len);
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 scsi_sg_copy_from_buffer(cmd, data, len);
1474 local_irq_restore(flags);
1475} 1470}
1476 1471
1477/* This function is called by the isr to complete an inquiry command */ 1472/* This function is called by the isr to complete an inquiry command */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e0322b1c1ea..403ecad48d4b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
1325 To compile this driver as a module, choose M here: the 1325 To compile this driver as a module, choose M here: the
1326 module will be called qlogicfas. 1326 module will be called qlogicfas.
1327 1327
1328config SCSI_QLOGIC_FC_FIRMWARE
1329 bool "Include loadable firmware in driver"
1330 depends on SCSI_QLOGIC_FC
1331 help
1332 Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
1333 expanded LUN addressing and FcTape (FCP-2) support, in the
1334 qlogicfc driver. This is required on some platforms.
1335
1336config SCSI_QLOGIC_1280 1328config SCSI_QLOGIC_1280
1337 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" 1329 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
1338 depends on PCI && SCSI 1330 depends on PCI && SCSI
@@ -1648,6 +1640,7 @@ config ATARI_SCSI
1648 tristate "Atari native SCSI support" 1640 tristate "Atari native SCSI support"
1649 depends on ATARI && SCSI 1641 depends on ATARI && SCSI
1650 select SCSI_SPI_ATTRS 1642 select SCSI_SPI_ATTRS
1643 select NVRAM
1651 ---help--- 1644 ---help---
1652 If you have an Atari with built-in NCR5380 SCSI controller (TT, 1645 If you have an Atari with built-in NCR5380 SCSI controller (TT,
1653 Falcon, ...) say Y to get it supported. Of course also, if you have 1646 Falcon, ...) say Y to get it supported. Of course also, if you have
@@ -1678,14 +1671,6 @@ config ATARI_SCSI_RESET_BOOT
1678 boot process fractionally longer but may assist recovery from errors 1671 boot process fractionally longer but may assist recovery from errors
1679 that leave the devices with SCSI operations partway completed. 1672 that leave the devices with SCSI operations partway completed.
1680 1673
1681config TT_DMA_EMUL
1682 bool "Hades SCSI DMA emulator"
1683 depends on ATARI_SCSI && HADES
1684 help
1685 This option enables code which emulates the TT SCSI DMA chip on the
1686 Hades. This increases the SCSI transfer rates at least ten times
1687 compared to PIO transfers.
1688
1689config MAC_SCSI 1674config MAC_SCSI
1690 bool "Macintosh NCR5380 SCSI" 1675 bool "Macintosh NCR5380 SCSI"
1691 depends on MAC && SCSI=y 1676 depends on MAC && SCSI=y
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c25273..8abfd06b5a72 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1139 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1139 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1140 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1140 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1141 srbcmd->flags = cpu_to_le32(flag); 1141 srbcmd->flags = cpu_to_le32(flag);
1142 timeout = cmd->timeout_per_command/HZ; 1142 timeout = cmd->request->timeout/HZ;
1143 if (timeout == 0) 1143 if (timeout == 0)
1144 timeout = 1; 1144 timeout = 1;
1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index b5a868d85eb4..1e5478abd90e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -337,7 +337,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
337#else 337#else
338#define IRQ_MIN 9 338#define IRQ_MIN 9
339#if defined(__PPC) 339#if defined(__PPC)
340#define IRQ_MAX (NR_IRQS-1) 340#define IRQ_MAX (nr_irqs-1)
341#else 341#else
342#define IRQ_MAX 12 342#define IRQ_MAX 12
343#endif 343#endif
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index cca16fc5b4ad..0666c22ab55b 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -80,6 +80,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
80 } 80 }
81 81
82/* 82/*
83 * Registers marked "dont_generate_debug_code" are not (yet) referenced
84 * from the driver code, and this keyword inhibit generation
85 * of debug code for them.
86 *
87 * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
88 * is added to the register which is referenced in the driver.
89 * Unreferenced register with no dont_generate_debug_code will result
90 * in dead code. No warning is issued.
91 */
92
93/*
83 * Mode Pointer 94 * Mode Pointer
84 * Controls which of the 5, 512byte, address spaces should be used 95 * Controls which of the 5, 512byte, address spaces should be used
85 * as the source and destination of any register accesses in our 96 * as the source and destination of any register accesses in our
@@ -91,6 +102,7 @@ register MODE_PTR {
91 field DST_MODE 0x70 102 field DST_MODE 0x70
92 field SRC_MODE 0x07 103 field SRC_MODE 0x07
93 mode_pointer 104 mode_pointer
105 dont_generate_debug_code
94} 106}
95 107
96const SRC_MODE_SHIFT 0 108const SRC_MODE_SHIFT 0
@@ -190,6 +202,7 @@ register SEQINTCODE {
190 SAW_HWERR, 202 SAW_HWERR,
191 BAD_SCB_STATUS 203 BAD_SCB_STATUS
192 } 204 }
205 dont_generate_debug_code
193} 206}
194 207
195/* 208/*
@@ -207,6 +220,7 @@ register CLRINT {
207 field CLRSEQINT 0x04 220 field CLRSEQINT 0x04
208 field CLRCMDINT 0x02 221 field CLRCMDINT 0x02
209 field CLRSPLTINT 0x01 222 field CLRSPLTINT 0x01
223 dont_generate_debug_code
210} 224}
211 225
212/* 226/*
@@ -222,6 +236,7 @@ register ERROR {
222 field SQPARERR 0x08 236 field SQPARERR 0x08
223 field ILLOPCODE 0x04 237 field ILLOPCODE 0x04
224 field DSCTMOUT 0x02 238 field DSCTMOUT 0x02
239 dont_generate_debug_code
225} 240}
226 241
227/* 242/*
@@ -255,6 +270,7 @@ register HCNTRL {
255 field INTEN 0x02 270 field INTEN 0x02
256 field CHIPRST 0x01 271 field CHIPRST 0x01
257 field CHIPRSTACK 0x01 272 field CHIPRSTACK 0x01
273 dont_generate_debug_code
258} 274}
259 275
260/* 276/*
@@ -265,6 +281,7 @@ register HNSCB_QOFF {
265 access_mode RW 281 access_mode RW
266 size 2 282 size 2
267 count 2 283 count 2
284 dont_generate_debug_code
268} 285}
269 286
270/* 287/*
@@ -274,6 +291,7 @@ register HESCB_QOFF {
274 address 0x008 291 address 0x008
275 access_mode RW 292 access_mode RW
276 count 2 293 count 2
294 dont_generate_debug_code
277} 295}
278 296
279/* 297/*
@@ -311,6 +329,7 @@ register CLRSEQINTSTAT {
311 field CLRSEQ_SCSIINT 0x04 329 field CLRSEQ_SCSIINT 0x04
312 field CLRSEQ_PCIINT 0x02 330 field CLRSEQ_PCIINT 0x02
313 field CLRSEQ_SPLTINT 0x01 331 field CLRSEQ_SPLTINT 0x01
332 dont_generate_debug_code
314} 333}
315 334
316/* 335/*
@@ -320,6 +339,7 @@ register SWTIMER {
320 address 0x00E 339 address 0x00E
321 access_mode RW 340 access_mode RW
322 size 2 341 size 2
342 dont_generate_debug_code
323} 343}
324 344
325/* 345/*
@@ -330,6 +350,7 @@ register SNSCB_QOFF {
330 access_mode RW 350 access_mode RW
331 size 2 351 size 2
332 modes M_CCHAN 352 modes M_CCHAN
353 dont_generate_debug_code
333} 354}
334 355
335/* 356/*
@@ -340,6 +361,7 @@ register SESCB_QOFF {
340 count 2 361 count 2
341 access_mode RW 362 access_mode RW
342 modes M_CCHAN 363 modes M_CCHAN
364 dont_generate_debug_code
343} 365}
344 366
345/* 367/*
@@ -350,6 +372,7 @@ register SDSCB_QOFF {
350 access_mode RW 372 access_mode RW
351 modes M_CCHAN 373 modes M_CCHAN
352 size 2 374 size 2
375 dont_generate_debug_code
353} 376}
354 377
355/* 378/*
@@ -378,6 +401,7 @@ register QOFF_CTLSTA {
378 SCB_QSIZE_8192, 401 SCB_QSIZE_8192,
379 SCB_QSIZE_16384 402 SCB_QSIZE_16384
380 } 403 }
404 dont_generate_debug_code
381} 405}
382 406
383/* 407/*
@@ -431,6 +455,7 @@ register DSCOMMAND0 {
431 field EXTREQLCK 0x10 /* External Request Lock */ 455 field EXTREQLCK 0x10 /* External Request Lock */
432 field DISABLE_TWATE 0x02 /* Rev B or greater */ 456 field DISABLE_TWATE 0x02 /* Rev B or greater */
433 field CIOPARCKEN 0x01 /* Internal bus parity error enable */ 457 field CIOPARCKEN 0x01 /* Internal bus parity error enable */
458 dont_generate_debug_code
434} 459}
435 460
436/* 461/*
@@ -459,6 +484,7 @@ register SG_CACHE_PRE {
459 field SG_ADDR_MASK 0xf8 484 field SG_ADDR_MASK 0xf8
460 field ODD_SEG 0x04 485 field ODD_SEG 0x04
461 field LAST_SEG 0x02 486 field LAST_SEG 0x02
487 dont_generate_debug_code
462} 488}
463 489
464register SG_CACHE_SHADOW { 490register SG_CACHE_SHADOW {
@@ -491,6 +517,7 @@ register HADDR {
491 access_mode RW 517 access_mode RW
492 size 8 518 size 8
493 modes M_DFF0, M_DFF1 519 modes M_DFF0, M_DFF1
520 dont_generate_debug_code
494} 521}
495 522
496/* 523/*
@@ -522,6 +549,7 @@ register HCNT {
522 access_mode RW 549 access_mode RW
523 size 3 550 size 3
524 modes M_DFF0, M_DFF1 551 modes M_DFF0, M_DFF1
552 dont_generate_debug_code
525} 553}
526 554
527/* 555/*
@@ -551,6 +579,7 @@ register SGHADDR {
551 access_mode RW 579 access_mode RW
552 size 8 580 size 8
553 modes M_DFF0, M_DFF1 581 modes M_DFF0, M_DFF1
582 dont_generate_debug_code
554} 583}
555 584
556/* 585/*
@@ -561,6 +590,7 @@ register SCBHADDR {
561 access_mode RW 590 access_mode RW
562 size 8 591 size 8
563 modes M_CCHAN 592 modes M_CCHAN
593 dont_generate_debug_code
564} 594}
565 595
566/* 596/*
@@ -570,6 +600,7 @@ register SGHCNT {
570 address 0x084 600 address 0x084
571 access_mode RW 601 access_mode RW
572 modes M_DFF0, M_DFF1 602 modes M_DFF0, M_DFF1
603 dont_generate_debug_code
573} 604}
574 605
575/* 606/*
@@ -579,6 +610,7 @@ register SCBHCNT {
579 address 0x084 610 address 0x084
580 access_mode RW 611 access_mode RW
581 modes M_CCHAN 612 modes M_CCHAN
613 dont_generate_debug_code
582} 614}
583 615
584/* 616/*
@@ -609,6 +641,7 @@ register DFF_THRSH {
609 RD_DFTHRSH_90, 641 RD_DFTHRSH_90,
610 RD_DFTHRSH_MAX 642 RD_DFTHRSH_MAX
611 } 643 }
644 dont_generate_debug_code
612} 645}
613 646
614/* 647/*
@@ -817,6 +850,7 @@ register PCIXCTL {
817 field SRSPDPEEN 0x04 850 field SRSPDPEEN 0x04
818 field TSCSERREN 0x02 851 field TSCSERREN 0x02
819 field CMPABCDIS 0x01 852 field CMPABCDIS 0x01
853 dont_generate_debug_code
820} 854}
821 855
822/* 856/*
@@ -863,6 +897,7 @@ register DCHSPLTSTAT0 {
863 field RXOVRUN 0x04 897 field RXOVRUN 0x04
864 field RXSCEMSG 0x02 898 field RXSCEMSG 0x02
865 field RXSPLTRSP 0x01 899 field RXSPLTRSP 0x01
900 dont_generate_debug_code
866} 901}
867 902
868/* 903/*
@@ -908,6 +943,7 @@ register DCHSPLTSTAT1 {
908 modes M_DFF0, M_DFF1 943 modes M_DFF0, M_DFF1
909 count 2 944 count 2
910 field RXDATABUCKET 0x01 945 field RXDATABUCKET 0x01
946 dont_generate_debug_code
911} 947}
912 948
913/* 949/*
@@ -1069,6 +1105,7 @@ register SGSPLTSTAT0 {
1069 field RXOVRUN 0x04 1105 field RXOVRUN 0x04
1070 field RXSCEMSG 0x02 1106 field RXSCEMSG 0x02
1071 field RXSPLTRSP 0x01 1107 field RXSPLTRSP 0x01
1108 dont_generate_debug_code
1072} 1109}
1073 1110
1074/* 1111/*
@@ -1080,6 +1117,7 @@ register SGSPLTSTAT1 {
1080 modes M_DFF0, M_DFF1 1117 modes M_DFF0, M_DFF1
1081 count 2 1118 count 2
1082 field RXDATABUCKET 0x01 1119 field RXDATABUCKET 0x01
1120 dont_generate_debug_code
1083} 1121}
1084 1122
1085/* 1123/*
@@ -1091,6 +1129,7 @@ register SFUNCT {
1091 modes M_CFG 1129 modes M_CFG
1092 field TEST_GROUP 0xF0 1130 field TEST_GROUP 0xF0
1093 field TEST_NUM 0x0F 1131 field TEST_NUM 0x0F
1132 dont_generate_debug_code
1094} 1133}
1095 1134
1096/* 1135/*
@@ -1109,6 +1148,7 @@ register DF0PCISTAT {
1109 field RDPERR 0x04 1148 field RDPERR 0x04
1110 field TWATERR 0x02 1149 field TWATERR 0x02
1111 field DPR 0x01 1150 field DPR 0x01
1151 dont_generate_debug_code
1112} 1152}
1113 1153
1114/* 1154/*
@@ -1204,6 +1244,7 @@ register TARGPCISTAT {
1204 field SSE 0x40 1244 field SSE 0x40
1205 field STA 0x08 1245 field STA 0x08
1206 field TWATERR 0x02 1246 field TWATERR 0x02
1247 dont_generate_debug_code
1207} 1248}
1208 1249
1209/* 1250/*
@@ -1216,6 +1257,7 @@ register LQIN {
1216 size 20 1257 size 20
1217 count 2 1258 count 2
1218 modes M_DFF0, M_DFF1, M_SCSI 1259 modes M_DFF0, M_DFF1, M_SCSI
1260 dont_generate_debug_code
1219} 1261}
1220 1262
1221/* 1263/*
@@ -1247,6 +1289,7 @@ register LUNPTR {
1247 access_mode RW 1289 access_mode RW
1248 modes M_CFG 1290 modes M_CFG
1249 count 2 1291 count 2
1292 dont_generate_debug_code
1250} 1293}
1251 1294
1252/* 1295/*
@@ -1278,6 +1321,7 @@ register CMDLENPTR {
1278 access_mode RW 1321 access_mode RW
1279 modes M_CFG 1322 modes M_CFG
1280 count 1 1323 count 1
1324 dont_generate_debug_code
1281} 1325}
1282 1326
1283/* 1327/*
@@ -1290,6 +1334,7 @@ register ATTRPTR {
1290 access_mode RW 1334 access_mode RW
1291 modes M_CFG 1335 modes M_CFG
1292 count 1 1336 count 1
1337 dont_generate_debug_code
1293} 1338}
1294 1339
1295/* 1340/*
@@ -1302,6 +1347,7 @@ register FLAGPTR {
1302 access_mode RW 1347 access_mode RW
1303 modes M_CFG 1348 modes M_CFG
1304 count 1 1349 count 1
1350 dont_generate_debug_code
1305} 1351}
1306 1352
1307/* 1353/*
@@ -1313,6 +1359,7 @@ register CMDPTR {
1313 access_mode RW 1359 access_mode RW
1314 modes M_CFG 1360 modes M_CFG
1315 count 1 1361 count 1
1362 dont_generate_debug_code
1316} 1363}
1317 1364
1318/* 1365/*
@@ -1324,6 +1371,7 @@ register QNEXTPTR {
1324 access_mode RW 1371 access_mode RW
1325 modes M_CFG 1372 modes M_CFG
1326 count 1 1373 count 1
1374 dont_generate_debug_code
1327} 1375}
1328 1376
1329/* 1377/*
@@ -1347,6 +1395,7 @@ register ABRTBYTEPTR {
1347 access_mode RW 1395 access_mode RW
1348 modes M_CFG 1396 modes M_CFG
1349 count 1 1397 count 1
1398 dont_generate_debug_code
1350} 1399}
1351 1400
1352/* 1401/*
@@ -1358,6 +1407,7 @@ register ABRTBITPTR {
1358 access_mode RW 1407 access_mode RW
1359 modes M_CFG 1408 modes M_CFG
1360 count 1 1409 count 1
1410 dont_generate_debug_code
1361} 1411}
1362 1412
1363/* 1413/*
@@ -1398,6 +1448,7 @@ register LUNLEN {
1398 count 2 1448 count 2
1399 mask ILUNLEN 0x0F 1449 mask ILUNLEN 0x0F
1400 mask TLUNLEN 0xF0 1450 mask TLUNLEN 0xF0
1451 dont_generate_debug_code
1401} 1452}
1402const LUNLEN_SINGLE_LEVEL_LUN 0xF 1453const LUNLEN_SINGLE_LEVEL_LUN 0xF
1403 1454
@@ -1410,6 +1461,7 @@ register CDBLIMIT {
1410 access_mode RW 1461 access_mode RW
1411 modes M_CFG 1462 modes M_CFG
1412 count 1 1463 count 1
1464 dont_generate_debug_code
1413} 1465}
1414 1466
1415/* 1467/*
@@ -1422,6 +1474,7 @@ register MAXCMD {
1422 access_mode RW 1474 access_mode RW
1423 modes M_CFG 1475 modes M_CFG
1424 count 9 1476 count 9
1477 dont_generate_debug_code
1425} 1478}
1426 1479
1427/* 1480/*
@@ -1432,6 +1485,7 @@ register MAXCMDCNT {
1432 address 0x033 1485 address 0x033
1433 access_mode RW 1486 access_mode RW
1434 modes M_CFG 1487 modes M_CFG
1488 dont_generate_debug_code
1435} 1489}
1436 1490
1437/* 1491/*
@@ -1490,6 +1544,7 @@ register LQCTL1 {
1490 field PCI2PCI 0x04 1544 field PCI2PCI 0x04
1491 field SINGLECMD 0x02 1545 field SINGLECMD 0x02
1492 field ABORTPENDING 0x01 1546 field ABORTPENDING 0x01
1547 dont_generate_debug_code
1493} 1548}
1494 1549
1495/* 1550/*
@@ -1508,6 +1563,7 @@ register LQCTL2 {
1508 field LQOCONTINUE 0x04 1563 field LQOCONTINUE 0x04
1509 field LQOTOIDLE 0x02 1564 field LQOTOIDLE 0x02
1510 field LQOPAUSE 0x01 1565 field LQOPAUSE 0x01
1566 dont_generate_debug_code
1511} 1567}
1512 1568
1513/* 1569/*
@@ -1578,6 +1634,7 @@ register SXFRCTL0 {
1578 field DFPEXP 0x40 1634 field DFPEXP 0x40
1579 field BIOSCANCELEN 0x10 1635 field BIOSCANCELEN 0x10
1580 field SPIOEN 0x08 1636 field SPIOEN 0x08
1637 dont_generate_debug_code
1581} 1638}
1582 1639
1583/* 1640/*
@@ -1594,6 +1651,7 @@ register SXFRCTL1 {
1594 field ENSTIMER 0x04 1651 field ENSTIMER 0x04
1595 field ACTNEGEN 0x02 1652 field ACTNEGEN 0x02
1596 field STPWEN 0x01 1653 field STPWEN 0x01
1654 dont_generate_debug_code
1597} 1655}
1598 1656
1599/* 1657/*
@@ -1696,6 +1754,7 @@ register SCSISIGO {
1696 P_STATUS CDO|IOO, 1754 P_STATUS CDO|IOO,
1697 P_MESGIN CDO|IOO|MSGO 1755 P_MESGIN CDO|IOO|MSGO
1698 } 1756 }
1757 dont_generate_debug_code
1699} 1758}
1700 1759
1701/* 1760/*
@@ -1738,6 +1797,7 @@ register MULTARGID {
1738 modes M_CFG 1797 modes M_CFG
1739 size 2 1798 size 2
1740 count 2 1799 count 2
1800 dont_generate_debug_code
1741} 1801}
1742 1802
1743/* 1803/*
@@ -1774,6 +1834,7 @@ register SCSIDAT {
1774 access_mode RW 1834 access_mode RW
1775 modes M_DFF0, M_DFF1, M_SCSI 1835 modes M_DFF0, M_DFF1, M_SCSI
1776 size 2 1836 size 2
1837 dont_generate_debug_code
1777} 1838}
1778 1839
1779/* 1840/*
@@ -1796,6 +1857,7 @@ register TARGIDIN {
1796 count 2 1857 count 2
1797 field CLKOUT 0x80 1858 field CLKOUT 0x80
1798 field TARGID 0x0F 1859 field TARGID 0x0F
1860 dont_generate_debug_code
1799} 1861}
1800 1862
1801/* 1863/*
@@ -1825,6 +1887,7 @@ register SBLKCTL {
1825 field ENAB40 0x08 /* LVD transceiver active */ 1887 field ENAB40 0x08 /* LVD transceiver active */
1826 field ENAB20 0x04 /* SE/HVD transceiver active */ 1888 field ENAB20 0x04 /* SE/HVD transceiver active */
1827 field SELWIDE 0x02 1889 field SELWIDE 0x02
1890 dont_generate_debug_code
1828} 1891}
1829 1892
1830/* 1893/*
@@ -1842,6 +1905,7 @@ register OPTIONMODE {
1842 field ENDGFORMCHK 0x04 1905 field ENDGFORMCHK 0x04
1843 field AUTO_MSGOUT_DE 0x02 1906 field AUTO_MSGOUT_DE 0x02
1844 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE 1907 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE
1908 dont_generate_debug_code
1845} 1909}
1846 1910
1847/* 1911/*
@@ -1876,6 +1940,7 @@ register CLRSINT0 {
1876 field CLROVERRUN 0x04 1940 field CLROVERRUN 0x04
1877 field CLRSPIORDY 0x02 1941 field CLRSPIORDY 0x02
1878 field CLRARBDO 0x01 1942 field CLRARBDO 0x01
1943 dont_generate_debug_code
1879} 1944}
1880 1945
1881/* 1946/*
@@ -1929,6 +1994,7 @@ register CLRSINT1 {
1929 field CLRSCSIPERR 0x04 1994 field CLRSCSIPERR 0x04
1930 field CLRSTRB2FAST 0x02 1995 field CLRSTRB2FAST 0x02
1931 field CLRREQINIT 0x01 1996 field CLRREQINIT 0x01
1997 dont_generate_debug_code
1932} 1998}
1933 1999
1934/* 2000/*
@@ -1962,6 +2028,7 @@ register CLRSINT2 {
1962 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ 2028 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */
1963 field CLRSDONE 0x02 /* Modes 0 and 1 only */ 2029 field CLRSDONE 0x02 /* Modes 0 and 1 only */
1964 field CLRDMADONE 0x01 /* Modes 0 and 1 only */ 2030 field CLRDMADONE 0x01 /* Modes 0 and 1 only */
2031 dont_generate_debug_code
1965} 2032}
1966 2033
1967/* 2034/*
@@ -2002,6 +2069,7 @@ register LQISTATE {
2002 access_mode RO 2069 access_mode RO
2003 modes M_CFG 2070 modes M_CFG
2004 count 6 2071 count 6
2072 dont_generate_debug_code
2005} 2073}
2006 2074
2007/* 2075/*
@@ -2022,6 +2090,7 @@ register LQOSTATE {
2022 access_mode RO 2090 access_mode RO
2023 modes M_CFG 2091 modes M_CFG
2024 count 2 2092 count 2
2093 dont_generate_debug_code
2025} 2094}
2026 2095
2027/* 2096/*
@@ -2054,6 +2123,7 @@ register CLRLQIINT0 {
2054 field CLRLQIBADLQT 0x04 2123 field CLRLQIBADLQT 0x04
2055 field CLRLQIATNLQ 0x02 2124 field CLRLQIATNLQ 0x02
2056 field CLRLQIATNCMD 0x01 2125 field CLRLQIATNCMD 0x01
2126 dont_generate_debug_code
2057} 2127}
2058 2128
2059/* 2129/*
@@ -2070,6 +2140,7 @@ register LQIMODE0 {
2070 field ENLQIBADLQT 0x04 2140 field ENLQIBADLQT 0x04
2071 field ENLQIATNLQ 0x02 2141 field ENLQIATNLQ 0x02
2072 field ENLQIATNCMD 0x01 2142 field ENLQIATNCMD 0x01
2143 dont_generate_debug_code
2073} 2144}
2074 2145
2075/* 2146/*
@@ -2106,6 +2177,7 @@ register CLRLQIINT1 {
2106 field CLRLQIBADLQI 0x04 2177 field CLRLQIBADLQI 0x04
2107 field CLRLQIOVERI_LQ 0x02 2178 field CLRLQIOVERI_LQ 0x02
2108 field CLRLQIOVERI_NLQ 0x01 2179 field CLRLQIOVERI_NLQ 0x01
2180 dont_generate_debug_code
2109} 2181}
2110 2182
2111/* 2183/*
@@ -2124,6 +2196,7 @@ register LQIMODE1 {
2124 field ENLQIBADLQI 0x04 2196 field ENLQIBADLQI 0x04
2125 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */ 2197 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */
2126 field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */ 2198 field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */
2199 dont_generate_debug_code
2127} 2200}
2128 2201
2129/* 2202/*
@@ -2165,6 +2238,7 @@ register CLRSINT3 {
2165 count 3 2238 count 3
2166 field CLRNTRAMPERR 0x02 2239 field CLRNTRAMPERR 0x02
2167 field CLROSRAMPERR 0x01 2240 field CLROSRAMPERR 0x01
2241 dont_generate_debug_code
2168} 2242}
2169 2243
2170/* 2244/*
@@ -2177,6 +2251,7 @@ register SIMODE3 {
2177 count 4 2251 count 4
2178 field ENNTRAMPERR 0x02 2252 field ENNTRAMPERR 0x02
2179 field ENOSRAMPERR 0x01 2253 field ENOSRAMPERR 0x01
2254 dont_generate_debug_code
2180} 2255}
2181 2256
2182/* 2257/*
@@ -2207,6 +2282,7 @@ register CLRLQOINT0 {
2207 field CLRLQOATNLQ 0x04 2282 field CLRLQOATNLQ 0x04
2208 field CLRLQOATNPKT 0x02 2283 field CLRLQOATNPKT 0x02
2209 field CLRLQOTCRC 0x01 2284 field CLRLQOTCRC 0x01
2285 dont_generate_debug_code
2210} 2286}
2211 2287
2212/* 2288/*
@@ -2222,6 +2298,7 @@ register LQOMODE0 {
2222 field ENLQOATNLQ 0x04 2298 field ENLQOATNLQ 0x04
2223 field ENLQOATNPKT 0x02 2299 field ENLQOATNPKT 0x02
2224 field ENLQOTCRC 0x01 2300 field ENLQOTCRC 0x01
2301 dont_generate_debug_code
2225} 2302}
2226 2303
2227/* 2304/*
@@ -2251,6 +2328,7 @@ register CLRLQOINT1 {
2251 field CLRLQOBADQAS 0x04 2328 field CLRLQOBADQAS 0x04
2252 field CLRLQOBUSFREE 0x02 2329 field CLRLQOBUSFREE 0x02
2253 field CLRLQOPHACHGINPKT 0x01 2330 field CLRLQOPHACHGINPKT 0x01
2331 dont_generate_debug_code
2254} 2332}
2255 2333
2256/* 2334/*
@@ -2266,6 +2344,7 @@ register LQOMODE1 {
2266 field ENLQOBADQAS 0x04 2344 field ENLQOBADQAS 0x04
2267 field ENLQOBUSFREE 0x02 2345 field ENLQOBUSFREE 0x02
2268 field ENLQOPHACHGINPKT 0x01 2346 field ENLQOPHACHGINPKT 0x01
2347 dont_generate_debug_code
2269} 2348}
2270 2349
2271/* 2350/*
@@ -2289,6 +2368,7 @@ register OS_SPACE_CNT {
2289 access_mode RO 2368 access_mode RO
2290 modes M_CFG 2369 modes M_CFG
2291 count 2 2370 count 2
2371 dont_generate_debug_code
2292} 2372}
2293 2373
2294/* 2374/*
@@ -2318,6 +2398,7 @@ register GSFIFO {
2318 access_mode RO 2398 access_mode RO
2319 size 2 2399 size 2
2320 modes M_DFF0, M_DFF1, M_SCSI 2400 modes M_DFF0, M_DFF1, M_SCSI
2401 dont_generate_debug_code
2321} 2402}
2322 2403
2323/* 2404/*
@@ -2341,6 +2422,7 @@ register NEXTSCB {
2341 access_mode RW 2422 access_mode RW
2342 size 2 2423 size 2
2343 modes M_SCSI 2424 modes M_SCSI
2425 dont_generate_debug_code
2344} 2426}
2345 2427
2346/* 2428/*
@@ -2357,6 +2439,7 @@ register LQOSCSCTL {
2357 field LQOBUSETDLY 0x40 2439 field LQOBUSETDLY 0x40
2358 field LQONOHOLDLACK 0x02 2440 field LQONOHOLDLACK 0x02
2359 field LQONOCHKOVER 0x01 2441 field LQONOCHKOVER 0x01
2442 dont_generate_debug_code
2360} 2443}
2361 2444
2362/* 2445/*
@@ -2389,6 +2472,7 @@ register CLRSEQINTSRC {
2389 field CLRCFG4TSTAT 0x04 2472 field CLRCFG4TSTAT 0x04
2390 field CLRCFG4ICMD 0x02 2473 field CLRCFG4ICMD 0x02
2391 field CLRCFG4TCMD 0x01 2474 field CLRCFG4TCMD 0x01
2475 dont_generate_debug_code
2392} 2476}
2393 2477
2394/* 2478/*
@@ -2415,6 +2499,7 @@ register CURRSCB {
2415 access_mode RW 2499 access_mode RW
2416 size 2 2500 size 2
2417 modes M_SCSI 2501 modes M_SCSI
2502 dont_generate_debug_code
2418} 2503}
2419 2504
2420/* 2505/*
@@ -2472,6 +2557,7 @@ register LASTSCB {
2472 access_mode RW 2557 access_mode RW
2473 size 2 2558 size 2
2474 modes M_SCSI 2559 modes M_SCSI
2560 dont_generate_debug_code
2475} 2561}
2476 2562
2477/* 2563/*
@@ -2494,6 +2580,7 @@ register SHADDR {
2494 access_mode RO 2580 access_mode RO
2495 size 8 2581 size 8
2496 modes M_DFF0, M_DFF1 2582 modes M_DFF0, M_DFF1
2583 dont_generate_debug_code
2497} 2584}
2498 2585
2499/* 2586/*
@@ -2513,6 +2600,7 @@ register NEGOADDR {
2513 address 0x060 2600 address 0x060
2514 access_mode RW 2601 access_mode RW
2515 modes M_SCSI 2602 modes M_SCSI
2603 dont_generate_debug_code
2516} 2604}
2517 2605
2518/* 2606/*
@@ -2523,6 +2611,7 @@ register NEGPERIOD {
2523 access_mode RW 2611 access_mode RW
2524 modes M_SCSI 2612 modes M_SCSI
2525 count 1 2613 count 1
2614 dont_generate_debug_code
2526} 2615}
2527 2616
2528/* 2617/*
@@ -2543,6 +2632,7 @@ register NEGOFFSET {
2543 access_mode RW 2632 access_mode RW
2544 modes M_SCSI 2633 modes M_SCSI
2545 count 1 2634 count 1
2635 dont_generate_debug_code
2546} 2636}
2547 2637
2548/* 2638/*
@@ -2557,6 +2647,7 @@ register NEGPPROPTS {
2557 field PPROPT_QAS 0x04 2647 field PPROPT_QAS 0x04
2558 field PPROPT_DT 0x02 2648 field PPROPT_DT 0x02
2559 field PPROPT_IUT 0x01 2649 field PPROPT_IUT 0x01
2650 dont_generate_debug_code
2560} 2651}
2561 2652
2562/* 2653/*
@@ -2573,6 +2664,7 @@ register NEGCONOPTS {
2573 field ENAUTOATNI 0x04 2664 field ENAUTOATNI 0x04
2574 field ENAUTOATNO 0x02 2665 field ENAUTOATNO 0x02
2575 field WIDEXFER 0x01 2666 field WIDEXFER 0x01
2667 dont_generate_debug_code
2576} 2668}
2577 2669
2578/* 2670/*
@@ -2583,6 +2675,7 @@ register ANNEXCOL {
2583 access_mode RW 2675 access_mode RW
2584 modes M_SCSI 2676 modes M_SCSI
2585 count 7 2677 count 7
2678 dont_generate_debug_code
2586} 2679}
2587 2680
2588/* 2681/*
@@ -2602,6 +2695,7 @@ register SCSCHKN {
2602 field DFFACTCLR 0x04 2695 field DFFACTCLR 0x04
2603 field SHVALIDSTDIS 0x02 2696 field SHVALIDSTDIS 0x02
2604 field LSTSGCLRDIS 0x01 2697 field LSTSGCLRDIS 0x01
2698 dont_generate_debug_code
2605} 2699}
2606 2700
2607const AHD_ANNEXCOL_PER_DEV0 4 2701const AHD_ANNEXCOL_PER_DEV0 4
@@ -2635,6 +2729,7 @@ register ANNEXDAT {
2635 access_mode RW 2729 access_mode RW
2636 modes M_SCSI 2730 modes M_SCSI
2637 count 3 2731 count 3
2732 dont_generate_debug_code
2638} 2733}
2639 2734
2640/* 2735/*
@@ -2645,6 +2740,7 @@ register IOWNID {
2645 address 0x067 2740 address 0x067
2646 access_mode RW 2741 access_mode RW
2647 modes M_SCSI 2742 modes M_SCSI
2743 dont_generate_debug_code
2648} 2744}
2649 2745
2650/* 2746/*
@@ -2671,6 +2767,7 @@ register TOWNID {
2671 access_mode RW 2767 access_mode RW
2672 modes M_SCSI 2768 modes M_SCSI
2673 count 2 2769 count 2
2770 dont_generate_debug_code
2674} 2771}
2675 2772
2676/* 2773/*
@@ -2702,6 +2799,7 @@ register SHCNT {
2702 access_mode RW 2799 access_mode RW
2703 size 3 2800 size 3
2704 modes M_DFF0, M_DFF1 2801 modes M_DFF0, M_DFF1
2802 dont_generate_debug_code
2705} 2803}
2706 2804
2707/* 2805/*
@@ -2789,6 +2887,7 @@ register SCBPTR {
2789 access_mode RW 2887 access_mode RW
2790 size 2 2888 size 2
2791 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI 2889 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI
2890 dont_generate_debug_code
2792} 2891}
2793 2892
2794/* 2893/*
@@ -2816,6 +2915,7 @@ register SCBAUTOPTR {
2816 field AUSCBPTR_EN 0x80 2915 field AUSCBPTR_EN 0x80
2817 field SCBPTR_ADDR 0x38 2916 field SCBPTR_ADDR 0x38
2818 field SCBPTR_OFF 0x07 2917 field SCBPTR_OFF 0x07
2918 dont_generate_debug_code
2819} 2919}
2820 2920
2821/* 2921/*
@@ -2825,6 +2925,7 @@ register CCSGADDR {
2825 address 0x0AC 2925 address 0x0AC
2826 access_mode RW 2926 access_mode RW
2827 modes M_DFF0, M_DFF1 2927 modes M_DFF0, M_DFF1
2928 dont_generate_debug_code
2828} 2929}
2829 2930
2830/* 2931/*
@@ -2834,6 +2935,7 @@ register CCSCBADDR {
2834 address 0x0AC 2935 address 0x0AC
2835 access_mode RW 2936 access_mode RW
2836 modes M_CCHAN 2937 modes M_CCHAN
2938 dont_generate_debug_code
2837} 2939}
2838 2940
2839/* 2941/*
@@ -2899,6 +3001,7 @@ register CCSGRAM {
2899 address 0x0B0 3001 address 0x0B0
2900 access_mode RW 3002 access_mode RW
2901 modes M_DFF0, M_DFF1 3003 modes M_DFF0, M_DFF1
3004 dont_generate_debug_code
2902} 3005}
2903 3006
2904/* 3007/*
@@ -2908,6 +3011,7 @@ register CCSCBRAM {
2908 address 0x0B0 3011 address 0x0B0
2909 access_mode RW 3012 access_mode RW
2910 modes M_CCHAN 3013 modes M_CCHAN
3014 dont_generate_debug_code
2911} 3015}
2912 3016
2913/* 3017/*
@@ -2958,6 +3062,7 @@ register BRDDAT {
2958 access_mode RW 3062 access_mode RW
2959 modes M_SCSI 3063 modes M_SCSI
2960 count 2 3064 count 2
3065 dont_generate_debug_code
2961} 3066}
2962 3067
2963/* 3068/*
@@ -2974,6 +3079,7 @@ register BRDCTL {
2974 field BRDEN 0x04 3079 field BRDEN 0x04
2975 field BRDRW 0x02 3080 field BRDRW 0x02
2976 field BRDSTB 0x01 3081 field BRDSTB 0x01
3082 dont_generate_debug_code
2977} 3083}
2978 3084
2979/* 3085/*
@@ -2984,6 +3090,7 @@ register SEEADR {
2984 access_mode RW 3090 access_mode RW
2985 modes M_SCSI 3091 modes M_SCSI
2986 count 4 3092 count 4
3093 dont_generate_debug_code
2987} 3094}
2988 3095
2989/* 3096/*
@@ -2995,6 +3102,7 @@ register SEEDAT {
2995 size 2 3102 size 2
2996 modes M_SCSI 3103 modes M_SCSI
2997 count 4 3104 count 4
3105 dont_generate_debug_code
2998} 3106}
2999 3107
3000/* 3108/*
@@ -3011,6 +3119,7 @@ register SEESTAT {
3011 field SEEARBACK 0x04 3119 field SEEARBACK 0x04
3012 field SEEBUSY 0x02 3120 field SEEBUSY 0x02
3013 field SEESTART 0x01 3121 field SEESTART 0x01
3122 dont_generate_debug_code
3014} 3123}
3015 3124
3016/* 3125/*
@@ -3036,6 +3145,7 @@ register SEECTL {
3036 mask SEEOP_EWDS 0x40 3145 mask SEEOP_EWDS 0x40
3037 field SEERST 0x02 3146 field SEERST 0x02
3038 field SEESTART 0x01 3147 field SEESTART 0x01
3148 dont_generate_debug_code
3039} 3149}
3040 3150
3041const SEEOP_ERAL_ADDR 0x80 3151const SEEOP_ERAL_ADDR 0x80
@@ -3050,6 +3160,7 @@ register SCBCNT {
3050 address 0x0BF 3160 address 0x0BF
3051 access_mode RW 3161 access_mode RW
3052 modes M_SCSI 3162 modes M_SCSI
3163 dont_generate_debug_code
3053} 3164}
3054 3165
3055/* 3166/*
@@ -3061,6 +3172,7 @@ register DFWADDR {
3061 access_mode RW 3172 access_mode RW
3062 size 2 3173 size 2
3063 modes M_DFF0, M_DFF1 3174 modes M_DFF0, M_DFF1
3175 dont_generate_debug_code
3064} 3176}
3065 3177
3066/* 3178/*
@@ -3087,6 +3199,7 @@ register DSPDATACTL {
3087 field DESQDIS 0x10 3199 field DESQDIS 0x10
3088 field RCVROFFSTDIS 0x04 3200 field RCVROFFSTDIS 0x04
3089 field XMITOFFSTDIS 0x02 3201 field XMITOFFSTDIS 0x02
3202 dont_generate_debug_code
3090} 3203}
3091 3204
3092/* 3205/*
@@ -3132,6 +3245,7 @@ register DFDAT {
3132 address 0x0C4 3245 address 0x0C4
3133 access_mode RW 3246 access_mode RW
3134 modes M_DFF0, M_DFF1 3247 modes M_DFF0, M_DFF1
3248 dont_generate_debug_code
3135} 3249}
3136 3250
3137/* 3251/*
@@ -3144,6 +3258,7 @@ register DSPSELECT {
3144 count 1 3258 count 1
3145 field AUTOINCEN 0x80 3259 field AUTOINCEN 0x80
3146 field DSPSEL 0x1F 3260 field DSPSEL 0x1F
3261 dont_generate_debug_code
3147} 3262}
3148 3263
3149const NUMDSPS 0x14 3264const NUMDSPS 0x14
@@ -3158,6 +3273,7 @@ register WRTBIASCTL {
3158 count 3 3273 count 3
3159 field AUTOXBCDIS 0x80 3274 field AUTOXBCDIS 0x80
3160 field XMITMANVAL 0x3F 3275 field XMITMANVAL 0x3F
3276 dont_generate_debug_code
3161} 3277}
3162 3278
3163/* 3279/*
@@ -3316,6 +3432,7 @@ register FLAGS {
3316 count 23 3432 count 23
3317 field ZERO 0x02 3433 field ZERO 0x02
3318 field CARRY 0x01 3434 field CARRY 0x01
3435 dont_generate_debug_code
3319} 3436}
3320 3437
3321/* 3438/*
@@ -3344,6 +3461,7 @@ register SEQRAM {
3344 address 0x0DA 3461 address 0x0DA
3345 access_mode RW 3462 access_mode RW
3346 count 2 3463 count 2
3464 dont_generate_debug_code
3347} 3465}
3348 3466
3349/* 3467/*
@@ -3355,6 +3473,7 @@ register PRGMCNT {
3355 access_mode RW 3473 access_mode RW
3356 size 2 3474 size 2
3357 count 5 3475 count 5
3476 dont_generate_debug_code
3358} 3477}
3359 3478
3360/* 3479/*
@@ -3364,6 +3483,7 @@ register ACCUM {
3364 address 0x0E0 3483 address 0x0E0
3365 access_mode RW 3484 access_mode RW
3366 accumulator 3485 accumulator
3486 dont_generate_debug_code
3367} 3487}
3368 3488
3369/* 3489/*
@@ -3380,6 +3500,7 @@ register SINDEX {
3380 access_mode RW 3500 access_mode RW
3381 size 2 3501 size 2
3382 sindex 3502 sindex
3503 dont_generate_debug_code
3383} 3504}
3384 3505
3385/* 3506/*
@@ -3390,6 +3511,7 @@ register DINDEX {
3390 address 0x0E4 3511 address 0x0E4
3391 access_mode RW 3512 access_mode RW
3392 size 2 3513 size 2
3514 dont_generate_debug_code
3393} 3515}
3394 3516
3395/* 3517/*
@@ -3415,6 +3537,7 @@ register ALLONES {
3415 address 0x0E8 3537 address 0x0E8
3416 access_mode RO 3538 access_mode RO
3417 allones 3539 allones
3540 dont_generate_debug_code
3418} 3541}
3419 3542
3420/* 3543/*
@@ -3425,6 +3548,7 @@ register ALLZEROS {
3425 address 0x0EA 3548 address 0x0EA
3426 access_mode RO 3549 access_mode RO
3427 allzeros 3550 allzeros
3551 dont_generate_debug_code
3428} 3552}
3429 3553
3430/* 3554/*
@@ -3435,6 +3559,7 @@ register NONE {
3435 address 0x0EA 3559 address 0x0EA
3436 access_mode WO 3560 access_mode WO
3437 none 3561 none
3562 dont_generate_debug_code
3438} 3563}
3439 3564
3440/* 3565/*
@@ -3445,6 +3570,7 @@ register NONE {
3445register SINDIR { 3570register SINDIR {
3446 address 0x0EC 3571 address 0x0EC
3447 access_mode RO 3572 access_mode RO
3573 dont_generate_debug_code
3448} 3574}
3449 3575
3450/* 3576/*
@@ -3455,6 +3581,7 @@ register SINDIR {
3455register DINDIR { 3581register DINDIR {
3456 address 0x0ED 3582 address 0x0ED
3457 access_mode WO 3583 access_mode WO
3584 dont_generate_debug_code
3458} 3585}
3459 3586
3460/* 3587/*
@@ -3479,6 +3606,7 @@ register FUNCTION1 {
3479register STACK { 3606register STACK {
3480 address 0x0F2 3607 address 0x0F2
3481 access_mode RW 3608 access_mode RW
3609 dont_generate_debug_code
3482} 3610}
3483 3611
3484/* 3612/*
@@ -3491,6 +3619,7 @@ register INTVEC1_ADDR {
3491 size 2 3619 size 2
3492 modes M_CFG 3620 modes M_CFG
3493 count 1 3621 count 1
3622 dont_generate_debug_code
3494} 3623}
3495 3624
3496/* 3625/*
@@ -3503,6 +3632,7 @@ register CURADDR {
3503 size 2 3632 size 2
3504 modes M_SCSI 3633 modes M_SCSI
3505 count 2 3634 count 2
3635 dont_generate_debug_code
3506} 3636}
3507 3637
3508/* 3638/*
@@ -3515,6 +3645,7 @@ register INTVEC2_ADDR {
3515 size 2 3645 size 2
3516 modes M_CFG 3646 modes M_CFG
3517 count 1 3647 count 1
3648 dont_generate_debug_code
3518} 3649}
3519 3650
3520/* 3651/*
@@ -3543,12 +3674,14 @@ scratch_ram {
3543 modes 0, 1, 2, 3 3674 modes 0, 1, 2, 3
3544 REG0 { 3675 REG0 {
3545 size 2 3676 size 2
3677 dont_generate_debug_code
3546 } 3678 }
3547 REG1 { 3679 REG1 {
3548 size 2 3680 size 2
3549 } 3681 }
3550 REG_ISR { 3682 REG_ISR {
3551 size 2 3683 size 2
3684 dont_generate_debug_code
3552 } 3685 }
3553 SG_STATE { 3686 SG_STATE {
3554 size 1 3687 size 1
@@ -3572,9 +3705,11 @@ scratch_ram {
3572 modes 0, 1, 2, 3 3705 modes 0, 1, 2, 3
3573 LONGJMP_ADDR { 3706 LONGJMP_ADDR {
3574 size 2 3707 size 2
3708 dont_generate_debug_code
3575 } 3709 }
3576 ACCUM_SAVE { 3710 ACCUM_SAVE {
3577 size 1 3711 size 1
3712 dont_generate_debug_code
3578 } 3713 }
3579} 3714}
3580 3715
@@ -3591,18 +3726,22 @@ scratch_ram {
3591 */ 3726 */
3592 WAITING_SCB_TAILS { 3727 WAITING_SCB_TAILS {
3593 size 32 3728 size 32
3729 dont_generate_debug_code
3594 } 3730 }
3595 WAITING_TID_HEAD { 3731 WAITING_TID_HEAD {
3596 size 2 3732 size 2
3733 dont_generate_debug_code
3597 } 3734 }
3598 WAITING_TID_TAIL { 3735 WAITING_TID_TAIL {
3599 size 2 3736 size 2
3737 dont_generate_debug_code
3600 } 3738 }
3601 /* 3739 /*
3602 * SCBID of the next SCB in the new SCB queue. 3740 * SCBID of the next SCB in the new SCB queue.
3603 */ 3741 */
3604 NEXT_QUEUED_SCB_ADDR { 3742 NEXT_QUEUED_SCB_ADDR {
3605 size 4 3743 size 4
3744 dont_generate_debug_code
3606 } 3745 }
3607 /* 3746 /*
3608 * head of list of SCBs that have 3747 * head of list of SCBs that have
@@ -3611,6 +3750,7 @@ scratch_ram {
3611 */ 3750 */
3612 COMPLETE_SCB_HEAD { 3751 COMPLETE_SCB_HEAD {
3613 size 2 3752 size 2
3753 dont_generate_debug_code
3614 } 3754 }
3615 /* 3755 /*
3616 * The list of completed SCBs in 3756 * The list of completed SCBs in
@@ -3618,6 +3758,7 @@ scratch_ram {
3618 */ 3758 */
3619 COMPLETE_SCB_DMAINPROG_HEAD { 3759 COMPLETE_SCB_DMAINPROG_HEAD {
3620 size 2 3760 size 2
3761 dont_generate_debug_code
3621 } 3762 }
3622 /* 3763 /*
3623 * head of list of SCBs that have 3764 * head of list of SCBs that have
@@ -3626,6 +3767,7 @@ scratch_ram {
3626 */ 3767 */
3627 COMPLETE_DMA_SCB_HEAD { 3768 COMPLETE_DMA_SCB_HEAD {
3628 size 2 3769 size 2
3770 dont_generate_debug_code
3629 } 3771 }
3630 /* 3772 /*
3631 * tail of list of SCBs that have 3773 * tail of list of SCBs that have
@@ -3634,6 +3776,7 @@ scratch_ram {
3634 */ 3776 */
3635 COMPLETE_DMA_SCB_TAIL { 3777 COMPLETE_DMA_SCB_TAIL {
3636 size 2 3778 size 2
3779 dont_generate_debug_code
3637 } 3780 }
3638 /* 3781 /*
3639 * head of list of SCBs that have 3782 * head of list of SCBs that have
@@ -3643,6 +3786,7 @@ scratch_ram {
3643 */ 3786 */
3644 COMPLETE_ON_QFREEZE_HEAD { 3787 COMPLETE_ON_QFREEZE_HEAD {
3645 size 2 3788 size 2
3789 dont_generate_debug_code
3646 } 3790 }
3647 /* 3791 /*
3648 * Counting semaphore to prevent new select-outs 3792 * Counting semaphore to prevent new select-outs
@@ -3667,6 +3811,7 @@ scratch_ram {
3667 */ 3811 */
3668 MSG_OUT { 3812 MSG_OUT {
3669 size 1 3813 size 1
3814 dont_generate_debug_code
3670 } 3815 }
3671 /* Parameters for DMA Logic */ 3816 /* Parameters for DMA Logic */
3672 DMAPARAMS { 3817 DMAPARAMS {
@@ -3682,6 +3827,7 @@ scratch_ram {
3682 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ 3827 field DIRECTION 0x04 /* Set indicates PCI->SCSI */
3683 field FIFOFLUSH 0x02 3828 field FIFOFLUSH 0x02
3684 field FIFORESET 0x01 3829 field FIFORESET 0x01
3830 dont_generate_debug_code
3685 } 3831 }
3686 SEQ_FLAGS { 3832 SEQ_FLAGS {
3687 size 1 3833 size 1
@@ -3703,9 +3849,11 @@ scratch_ram {
3703 */ 3849 */
3704 SAVED_SCSIID { 3850 SAVED_SCSIID {
3705 size 1 3851 size 1
3852 dont_generate_debug_code
3706 } 3853 }
3707 SAVED_LUN { 3854 SAVED_LUN {
3708 size 1 3855 size 1
3856 dont_generate_debug_code
3709 } 3857 }
3710 /* 3858 /*
3711 * The last bus phase as seen by the sequencer. 3859 * The last bus phase as seen by the sequencer.
@@ -3733,6 +3881,7 @@ scratch_ram {
3733 */ 3881 */
3734 QOUTFIFO_ENTRY_VALID_TAG { 3882 QOUTFIFO_ENTRY_VALID_TAG {
3735 size 1 3883 size 1
3884 dont_generate_debug_code
3736 } 3885 }
3737 /* 3886 /*
3738 * Kernel and sequencer offsets into the queue of 3887 * Kernel and sequencer offsets into the queue of
@@ -3742,10 +3891,12 @@ scratch_ram {
3742 KERNEL_TQINPOS { 3891 KERNEL_TQINPOS {
3743 size 1 3892 size 1
3744 count 1 3893 count 1
3894 dont_generate_debug_code
3745 } 3895 }
3746 TQINPOS { 3896 TQINPOS {
3747 size 1 3897 size 1
3748 count 8 3898 count 8
3899 dont_generate_debug_code
3749 } 3900 }
3750 /* 3901 /*
3751 * Base address of our shared data with the kernel driver in host 3902 * Base address of our shared data with the kernel driver in host
@@ -3754,6 +3905,7 @@ scratch_ram {
3754 */ 3905 */
3755 SHARED_DATA_ADDR { 3906 SHARED_DATA_ADDR {
3756 size 4 3907 size 4
3908 dont_generate_debug_code
3757 } 3909 }
3758 /* 3910 /*
3759 * Pointer to location in host memory for next 3911 * Pointer to location in host memory for next
@@ -3761,6 +3913,7 @@ scratch_ram {
3761 */ 3913 */
3762 QOUTFIFO_NEXT_ADDR { 3914 QOUTFIFO_NEXT_ADDR {
3763 size 4 3915 size 4
3916 dont_generate_debug_code
3764 } 3917 }
3765 ARG_1 { 3918 ARG_1 {
3766 size 1 3919 size 1
@@ -3773,11 +3926,13 @@ scratch_ram {
3773 mask CONT_MSG_LOOP_READ 0x03 3926 mask CONT_MSG_LOOP_READ 0x03
3774 mask CONT_MSG_LOOP_TARG 0x02 3927 mask CONT_MSG_LOOP_TARG 0x02
3775 alias RETURN_1 3928 alias RETURN_1
3929 dont_generate_debug_code
3776 } 3930 }
3777 ARG_2 { 3931 ARG_2 {
3778 size 1 3932 size 1
3779 count 1 3933 count 1
3780 alias RETURN_2 3934 alias RETURN_2
3935 dont_generate_debug_code
3781 } 3936 }
3782 3937
3783 /* 3938 /*
@@ -3785,6 +3940,7 @@ scratch_ram {
3785 */ 3940 */
3786 LAST_MSG { 3941 LAST_MSG {
3787 size 1 3942 size 1
3943 dont_generate_debug_code
3788 } 3944 }
3789 3945
3790 /* 3946 /*
@@ -3801,6 +3957,7 @@ scratch_ram {
3801 field MANUALP 0x0C 3957 field MANUALP 0x0C
3802 field ENAUTOATNP 0x02 3958 field ENAUTOATNP 0x02
3803 field ALTSTIM 0x01 3959 field ALTSTIM 0x01
3960 dont_generate_debug_code
3804 } 3961 }
3805 3962
3806 /* 3963 /*
@@ -3809,6 +3966,7 @@ scratch_ram {
3809 INITIATOR_TAG { 3966 INITIATOR_TAG {
3810 size 1 3967 size 1
3811 count 1 3968 count 1
3969 dont_generate_debug_code
3812 } 3970 }
3813 3971
3814 SEQ_FLAGS2 { 3972 SEQ_FLAGS2 {
@@ -3820,6 +3978,7 @@ scratch_ram {
3820 3978
3821 ALLOCFIFO_SCBPTR { 3979 ALLOCFIFO_SCBPTR {
3822 size 2 3980 size 2
3981 dont_generate_debug_code
3823 } 3982 }
3824 3983
3825 /* 3984 /*
@@ -3829,6 +3988,7 @@ scratch_ram {
3829 */ 3988 */
3830 INT_COALESCING_TIMER { 3989 INT_COALESCING_TIMER {
3831 size 2 3990 size 2
3991 dont_generate_debug_code
3832 } 3992 }
3833 3993
3834 /* 3994 /*
@@ -3838,6 +3998,7 @@ scratch_ram {
3838 */ 3998 */
3839 INT_COALESCING_MAXCMDS { 3999 INT_COALESCING_MAXCMDS {
3840 size 1 4000 size 1
4001 dont_generate_debug_code
3841 } 4002 }
3842 4003
3843 /* 4004 /*
@@ -3846,6 +4007,7 @@ scratch_ram {
3846 */ 4007 */
3847 INT_COALESCING_MINCMDS { 4008 INT_COALESCING_MINCMDS {
3848 size 1 4009 size 1
4010 dont_generate_debug_code
3849 } 4011 }
3850 4012
3851 /* 4013 /*
@@ -3853,6 +4015,7 @@ scratch_ram {
3853 */ 4015 */
3854 CMDS_PENDING { 4016 CMDS_PENDING {
3855 size 2 4017 size 2
4018 dont_generate_debug_code
3856 } 4019 }
3857 4020
3858 /* 4021 /*
@@ -3860,6 +4023,7 @@ scratch_ram {
3860 */ 4023 */
3861 INT_COALESCING_CMDCOUNT { 4024 INT_COALESCING_CMDCOUNT {
3862 size 1 4025 size 1
4026 dont_generate_debug_code
3863 } 4027 }
3864 4028
3865 /* 4029 /*
@@ -3868,6 +4032,7 @@ scratch_ram {
3868 */ 4032 */
3869 LOCAL_HS_MAILBOX { 4033 LOCAL_HS_MAILBOX {
3870 size 1 4034 size 1
4035 dont_generate_debug_code
3871 } 4036 }
3872 /* 4037 /*
3873 * Target-mode CDB type to CDB length table used 4038 * Target-mode CDB type to CDB length table used
@@ -3876,6 +4041,7 @@ scratch_ram {
3876 CMDSIZE_TABLE { 4041 CMDSIZE_TABLE {
3877 size 8 4042 size 8
3878 count 8 4043 count 8
4044 dont_generate_debug_code
3879 } 4045 }
3880 /* 4046 /*
3881 * When an SCB with the MK_MESSAGE flag is 4047 * When an SCB with the MK_MESSAGE flag is
@@ -3908,25 +4074,31 @@ scb {
3908 size 4 4074 size 4
3909 alias SCB_CDB_STORE 4075 alias SCB_CDB_STORE
3910 alias SCB_HOST_CDB_PTR 4076 alias SCB_HOST_CDB_PTR
4077 dont_generate_debug_code
3911 } 4078 }
3912 SCB_RESIDUAL_SGPTR { 4079 SCB_RESIDUAL_SGPTR {
3913 size 4 4080 size 4
3914 field SG_ADDR_MASK 0xf8 /* In the last byte */ 4081 field SG_ADDR_MASK 0xf8 /* In the last byte */
3915 field SG_OVERRUN_RESID 0x02 /* In the first byte */ 4082 field SG_OVERRUN_RESID 0x02 /* In the first byte */
3916 field SG_LIST_NULL 0x01 /* In the first byte */ 4083 field SG_LIST_NULL 0x01 /* In the first byte */
4084 dont_generate_debug_code
3917 } 4085 }
3918 SCB_SCSI_STATUS { 4086 SCB_SCSI_STATUS {
3919 size 1 4087 size 1
3920 alias SCB_HOST_CDB_LEN 4088 alias SCB_HOST_CDB_LEN
4089 dont_generate_debug_code
3921 } 4090 }
3922 SCB_TARGET_PHASES { 4091 SCB_TARGET_PHASES {
3923 size 1 4092 size 1
4093 dont_generate_debug_code
3924 } 4094 }
3925 SCB_TARGET_DATA_DIR { 4095 SCB_TARGET_DATA_DIR {
3926 size 1 4096 size 1
4097 dont_generate_debug_code
3927 } 4098 }
3928 SCB_TARGET_ITAG { 4099 SCB_TARGET_ITAG {
3929 size 1 4100 size 1
4101 dont_generate_debug_code
3930 } 4102 }
3931 SCB_SENSE_BUSADDR { 4103 SCB_SENSE_BUSADDR {
3932 /* 4104 /*
@@ -3936,10 +4108,12 @@ scb {
3936 */ 4108 */
3937 size 4 4109 size 4
3938 alias SCB_NEXT_COMPLETE 4110 alias SCB_NEXT_COMPLETE
4111 dont_generate_debug_code
3939 } 4112 }
3940 SCB_TAG { 4113 SCB_TAG {
3941 alias SCB_FIFO_USE_COUNT 4114 alias SCB_FIFO_USE_COUNT
3942 size 2 4115 size 2
4116 dont_generate_debug_code
3943 } 4117 }
3944 SCB_CONTROL { 4118 SCB_CONTROL {
3945 size 1 4119 size 1
@@ -3959,6 +4133,7 @@ scb {
3959 SCB_LUN { 4133 SCB_LUN {
3960 size 1 4134 size 1
3961 field LID 0xff 4135 field LID 0xff
4136 dont_generate_debug_code
3962 } 4137 }
3963 SCB_TASK_ATTRIBUTE { 4138 SCB_TASK_ATTRIBUTE {
3964 size 1 4139 size 1
@@ -3967,16 +4142,20 @@ scb {
3967 * ignore wide residue message handling. 4142 * ignore wide residue message handling.
3968 */ 4143 */
3969 field SCB_XFERLEN_ODD 0x01 4144 field SCB_XFERLEN_ODD 0x01
4145 dont_generate_debug_code
3970 } 4146 }
3971 SCB_CDB_LEN { 4147 SCB_CDB_LEN {
3972 size 1 4148 size 1
3973 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ 4149 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */
4150 dont_generate_debug_code
3974 } 4151 }
3975 SCB_TASK_MANAGEMENT { 4152 SCB_TASK_MANAGEMENT {
3976 size 1 4153 size 1
4154 dont_generate_debug_code
3977 } 4155 }
3978 SCB_DATAPTR { 4156 SCB_DATAPTR {
3979 size 8 4157 size 8
4158 dont_generate_debug_code
3980 } 4159 }
3981 SCB_DATACNT { 4160 SCB_DATACNT {
3982 /* 4161 /*
@@ -3986,22 +4165,27 @@ scb {
3986 size 4 4165 size 4
3987 field SG_LAST_SEG 0x80 /* In the fourth byte */ 4166 field SG_LAST_SEG 0x80 /* In the fourth byte */
3988 field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ 4167 field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
4168 dont_generate_debug_code
3989 } 4169 }
3990 SCB_SGPTR { 4170 SCB_SGPTR {
3991 size 4 4171 size 4
3992 field SG_STATUS_VALID 0x04 /* In the first byte */ 4172 field SG_STATUS_VALID 0x04 /* In the first byte */
3993 field SG_FULL_RESID 0x02 /* In the first byte */ 4173 field SG_FULL_RESID 0x02 /* In the first byte */
3994 field SG_LIST_NULL 0x01 /* In the first byte */ 4174 field SG_LIST_NULL 0x01 /* In the first byte */
4175 dont_generate_debug_code
3995 } 4176 }
3996 SCB_BUSADDR { 4177 SCB_BUSADDR {
3997 size 4 4178 size 4
4179 dont_generate_debug_code
3998 } 4180 }
3999 SCB_NEXT { 4181 SCB_NEXT {
4000 alias SCB_NEXT_SCB_BUSADDR 4182 alias SCB_NEXT_SCB_BUSADDR
4001 size 2 4183 size 2
4184 dont_generate_debug_code
4002 } 4185 }
4003 SCB_NEXT2 { 4186 SCB_NEXT2 {
4004 size 2 4187 size 2
4188 dont_generate_debug_code
4005 } 4189 }
4006 SCB_SPARE { 4190 SCB_SPARE {
4007 size 8 4191 size 8
@@ -4009,6 +4193,7 @@ scb {
4009 } 4193 }
4010 SCB_DISCONNECTED_LISTS { 4194 SCB_DISCONNECTED_LISTS {
4011 size 8 4195 size 8
4196 dont_generate_debug_code
4012 } 4197 }
4013} 4198}
4014 4199
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 55508b0fcec4..bdad54ec088c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2472,8 +2472,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2473 ahd_outb(ahd, CLRLQOINT1, 0); 2473 ahd_outb(ahd, CLRLQOINT1, 0);
2474 } else if ((status & SELTO) != 0) { 2474 } else if ((status & SELTO) != 0) {
2475 u_int scbid;
2476
2477 /* Stop the selection */ 2475 /* Stop the selection */
2478 ahd_outb(ahd, SCSISEQ0, 0); 2476 ahd_outb(ahd, SCSISEQ0, 0);
2479 2477
@@ -2583,9 +2581,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2583 case BUSFREE_DFF0: 2581 case BUSFREE_DFF0:
2584 case BUSFREE_DFF1: 2582 case BUSFREE_DFF1:
2585 { 2583 {
2586 u_int scbid;
2587 struct scb *scb;
2588
2589 mode = busfreetime == BUSFREE_DFF0 2584 mode = busfreetime == BUSFREE_DFF0
2590 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; 2585 ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
2591 ahd_set_modes(ahd, mode, mode); 2586 ahd_set_modes(ahd, mode, mode);
@@ -3689,7 +3684,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
3689 * by the capabilities of the bus connectivity of and sync settings for 3684 * by the capabilities of the bus connectivity of and sync settings for
3690 * the target. 3685 * the target.
3691 */ 3686 */
3692void 3687static void
3693ahd_devlimited_syncrate(struct ahd_softc *ahd, 3688ahd_devlimited_syncrate(struct ahd_softc *ahd,
3694 struct ahd_initiator_tinfo *tinfo, 3689 struct ahd_initiator_tinfo *tinfo,
3695 u_int *period, u_int *ppr_options, role_t role) 3690 u_int *period, u_int *ppr_options, role_t role)
@@ -4136,7 +4131,7 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4136 4131
4137 /* 4132 /*
4138 * Harpoon2A assumed that there would be a 4133 * Harpoon2A assumed that there would be a
4139 * fallback rate between 160MHz and 80Mhz, 4134 * fallback rate between 160MHz and 80MHz,
4140 * so 7 is used as the period factor rather 4135 * so 7 is used as the period factor rather
4141 * than 8 for 160MHz. 4136 * than 8 for 160MHz.
4142 */ 4137 */
@@ -8708,7 +8703,7 @@ ahd_reset_current_bus(struct ahd_softc *ahd)
8708int 8703int
8709ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) 8704ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8710{ 8705{
8711 struct ahd_devinfo devinfo; 8706 struct ahd_devinfo caminfo;
8712 u_int initiator; 8707 u_int initiator;
8713 u_int target; 8708 u_int target;
8714 u_int max_scsiid; 8709 u_int max_scsiid;
@@ -8729,7 +8724,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8729 8724
8730 ahd->pending_device = NULL; 8725 ahd->pending_device = NULL;
8731 8726
8732 ahd_compile_devinfo(&devinfo, 8727 ahd_compile_devinfo(&caminfo,
8733 CAM_TARGET_WILDCARD, 8728 CAM_TARGET_WILDCARD,
8734 CAM_TARGET_WILDCARD, 8729 CAM_TARGET_WILDCARD,
8735 CAM_LUN_WILDCARD, 8730 CAM_LUN_WILDCARD,
@@ -8868,7 +8863,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8868 } 8863 }
8869 8864
8870 /* Notify the XPT that a bus reset occurred */ 8865 /* Notify the XPT that a bus reset occurred */
8871 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, 8866 ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD,
8872 CAM_LUN_WILDCARD, AC_BUS_RESET); 8867 CAM_LUN_WILDCARD, AC_BUS_RESET);
8873 8868
8874 ahd_restart(ahd); 8869 ahd_restart(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c25b6adffbf9..a734d77e880e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -223,10 +223,10 @@ static const char *pci_bus_modes[] =
223 "PCI bus mode unknown", 223 "PCI bus mode unknown",
224 "PCI bus mode unknown", 224 "PCI bus mode unknown",
225 "PCI bus mode unknown", 225 "PCI bus mode unknown",
226 "PCI-X 101-133Mhz", 226 "PCI-X 101-133MHz",
227 "PCI-X 67-100Mhz", 227 "PCI-X 67-100MHz",
228 "PCI-X 50-66Mhz", 228 "PCI-X 50-66MHz",
229 "PCI 33 or 66Mhz" 229 "PCI 33 or 66MHz"
230}; 230};
231 231
232#define TESTMODE 0x00000800ul 232#define TESTMODE 0x00000800ul
@@ -337,8 +337,6 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
337 * 64bit bus (PCI64BIT set in devconfig). 337 * 64bit bus (PCI64BIT set in devconfig).
338 */ 338 */
339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { 339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
340 uint32_t devconfig;
341
342 if (bootverbose) 340 if (bootverbose)
343 printf("%s: Enabling 39Bit Addressing\n", 341 printf("%s: Enabling 39Bit Addressing\n",
344 ahd_name(ahd)); 342 ahd_name(ahd));
@@ -483,8 +481,6 @@ ahd_pci_test_register_access(struct ahd_softc *ahd)
483 goto fail; 481 goto fail;
484 482
485 if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { 483 if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
486 u_int targpcistat;
487
488 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 484 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
489 targpcistat = ahd_inb(ahd, TARGPCISTAT); 485 targpcistat = ahd_inb(ahd, TARGPCISTAT);
490 if ((targpcistat & STA) != 0) 486 if ((targpcistat & STA) != 0)
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index c21ceab8e913..cdcead071ef6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -34,13 +34,6 @@ ahd_reg_print_t ahd_seqintcode_print;
34#endif 34#endif
35 35
36#if AIC_DEBUG_REGISTERS 36#if AIC_DEBUG_REGISTERS
37ahd_reg_print_t ahd_clrint_print;
38#else
39#define ahd_clrint_print(regvalue, cur_col, wrap) \
40 ahd_print_register(NULL, 0, "CLRINT", 0x03, regvalue, cur_col, wrap)
41#endif
42
43#if AIC_DEBUG_REGISTERS
44ahd_reg_print_t ahd_error_print; 37ahd_reg_print_t ahd_error_print;
45#else 38#else
46#define ahd_error_print(regvalue, cur_col, wrap) \ 39#define ahd_error_print(regvalue, cur_col, wrap) \
@@ -48,20 +41,6 @@ ahd_reg_print_t ahd_error_print;
48#endif 41#endif
49 42
50#if AIC_DEBUG_REGISTERS 43#if AIC_DEBUG_REGISTERS
51ahd_reg_print_t ahd_hcntrl_print;
52#else
53#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
54 ahd_print_register(NULL, 0, "HCNTRL", 0x05, regvalue, cur_col, wrap)
55#endif
56
57#if AIC_DEBUG_REGISTERS
58ahd_reg_print_t ahd_hnscb_qoff_print;
59#else
60#define ahd_hnscb_qoff_print(regvalue, cur_col, wrap) \
61 ahd_print_register(NULL, 0, "HNSCB_QOFF", 0x06, regvalue, cur_col, wrap)
62#endif
63
64#if AIC_DEBUG_REGISTERS
65ahd_reg_print_t ahd_hescb_qoff_print; 44ahd_reg_print_t ahd_hescb_qoff_print;
66#else 45#else
67#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \ 46#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
@@ -97,13 +76,6 @@ ahd_reg_print_t ahd_swtimer_print;
97#endif 76#endif
98 77
99#if AIC_DEBUG_REGISTERS 78#if AIC_DEBUG_REGISTERS
100ahd_reg_print_t ahd_snscb_qoff_print;
101#else
102#define ahd_snscb_qoff_print(regvalue, cur_col, wrap) \
103 ahd_print_register(NULL, 0, "SNSCB_QOFF", 0x10, regvalue, cur_col, wrap)
104#endif
105
106#if AIC_DEBUG_REGISTERS
107ahd_reg_print_t ahd_sescb_qoff_print; 79ahd_reg_print_t ahd_sescb_qoff_print;
108#else 80#else
109#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \ 81#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
@@ -111,20 +83,6 @@ ahd_reg_print_t ahd_sescb_qoff_print;
111#endif 83#endif
112 84
113#if AIC_DEBUG_REGISTERS 85#if AIC_DEBUG_REGISTERS
114ahd_reg_print_t ahd_sdscb_qoff_print;
115#else
116#define ahd_sdscb_qoff_print(regvalue, cur_col, wrap) \
117 ahd_print_register(NULL, 0, "SDSCB_QOFF", 0x14, regvalue, cur_col, wrap)
118#endif
119
120#if AIC_DEBUG_REGISTERS
121ahd_reg_print_t ahd_qoff_ctlsta_print;
122#else
123#define ahd_qoff_ctlsta_print(regvalue, cur_col, wrap) \
124 ahd_print_register(NULL, 0, "QOFF_CTLSTA", 0x16, regvalue, cur_col, wrap)
125#endif
126
127#if AIC_DEBUG_REGISTERS
128ahd_reg_print_t ahd_intctl_print; 86ahd_reg_print_t ahd_intctl_print;
129#else 87#else
130#define ahd_intctl_print(regvalue, cur_col, wrap) \ 88#define ahd_intctl_print(regvalue, cur_col, wrap) \
@@ -139,13 +97,6 @@ ahd_reg_print_t ahd_dfcntrl_print;
139#endif 97#endif
140 98
141#if AIC_DEBUG_REGISTERS 99#if AIC_DEBUG_REGISTERS
142ahd_reg_print_t ahd_dscommand0_print;
143#else
144#define ahd_dscommand0_print(regvalue, cur_col, wrap) \
145 ahd_print_register(NULL, 0, "DSCOMMAND0", 0x19, regvalue, cur_col, wrap)
146#endif
147
148#if AIC_DEBUG_REGISTERS
149ahd_reg_print_t ahd_dfstatus_print; 100ahd_reg_print_t ahd_dfstatus_print;
150#else 101#else
151#define ahd_dfstatus_print(regvalue, cur_col, wrap) \ 102#define ahd_dfstatus_print(regvalue, cur_col, wrap) \
@@ -160,13 +111,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
160#endif 111#endif
161 112
162#if AIC_DEBUG_REGISTERS 113#if AIC_DEBUG_REGISTERS
163ahd_reg_print_t ahd_sg_cache_pre_print;
164#else
165#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
166 ahd_print_register(NULL, 0, "SG_CACHE_PRE", 0x1b, regvalue, cur_col, wrap)
167#endif
168
169#if AIC_DEBUG_REGISTERS
170ahd_reg_print_t ahd_lqin_print; 114ahd_reg_print_t ahd_lqin_print;
171#else 115#else
172#define ahd_lqin_print(regvalue, cur_col, wrap) \ 116#define ahd_lqin_print(regvalue, cur_col, wrap) \
@@ -293,13 +237,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
293#endif 237#endif
294 238
295#if AIC_DEBUG_REGISTERS 239#if AIC_DEBUG_REGISTERS
296ahd_reg_print_t ahd_sxfrctl1_print;
297#else
298#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
299 ahd_print_register(NULL, 0, "SXFRCTL1", 0x3d, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahd_reg_print_t ahd_dffstat_print; 240ahd_reg_print_t ahd_dffstat_print;
304#else 241#else
305#define ahd_dffstat_print(regvalue, cur_col, wrap) \ 242#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -314,13 +251,6 @@ ahd_reg_print_t ahd_multargid_print;
314#endif 251#endif
315 252
316#if AIC_DEBUG_REGISTERS 253#if AIC_DEBUG_REGISTERS
317ahd_reg_print_t ahd_scsisigo_print;
318#else
319#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
320 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
321#endif
322
323#if AIC_DEBUG_REGISTERS
324ahd_reg_print_t ahd_scsisigi_print; 254ahd_reg_print_t ahd_scsisigi_print;
325#else 255#else
326#define ahd_scsisigi_print(regvalue, cur_col, wrap) \ 256#define ahd_scsisigi_print(regvalue, cur_col, wrap) \
@@ -363,13 +293,6 @@ ahd_reg_print_t ahd_selid_print;
363#endif 293#endif
364 294
365#if AIC_DEBUG_REGISTERS 295#if AIC_DEBUG_REGISTERS
366ahd_reg_print_t ahd_optionmode_print;
367#else
368#define ahd_optionmode_print(regvalue, cur_col, wrap) \
369 ahd_print_register(NULL, 0, "OPTIONMODE", 0x4a, regvalue, cur_col, wrap)
370#endif
371
372#if AIC_DEBUG_REGISTERS
373ahd_reg_print_t ahd_sblkctl_print; 296ahd_reg_print_t ahd_sblkctl_print;
374#else 297#else
375#define ahd_sblkctl_print(regvalue, cur_col, wrap) \ 298#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
@@ -391,13 +314,6 @@ ahd_reg_print_t ahd_simode0_print;
391#endif 314#endif
392 315
393#if AIC_DEBUG_REGISTERS 316#if AIC_DEBUG_REGISTERS
394ahd_reg_print_t ahd_clrsint0_print;
395#else
396#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
397 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
398#endif
399
400#if AIC_DEBUG_REGISTERS
401ahd_reg_print_t ahd_sstat1_print; 317ahd_reg_print_t ahd_sstat1_print;
402#else 318#else
403#define ahd_sstat1_print(regvalue, cur_col, wrap) \ 319#define ahd_sstat1_print(regvalue, cur_col, wrap) \
@@ -405,13 +321,6 @@ ahd_reg_print_t ahd_sstat1_print;
405#endif 321#endif
406 322
407#if AIC_DEBUG_REGISTERS 323#if AIC_DEBUG_REGISTERS
408ahd_reg_print_t ahd_clrsint1_print;
409#else
410#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
411 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
412#endif
413
414#if AIC_DEBUG_REGISTERS
415ahd_reg_print_t ahd_sstat2_print; 324ahd_reg_print_t ahd_sstat2_print;
416#else 325#else
417#define ahd_sstat2_print(regvalue, cur_col, wrap) \ 326#define ahd_sstat2_print(regvalue, cur_col, wrap) \
@@ -461,17 +370,17 @@ ahd_reg_print_t ahd_lqistat0_print;
461#endif 370#endif
462 371
463#if AIC_DEBUG_REGISTERS 372#if AIC_DEBUG_REGISTERS
464ahd_reg_print_t ahd_lqimode0_print; 373ahd_reg_print_t ahd_clrlqiint0_print;
465#else 374#else
466#define ahd_lqimode0_print(regvalue, cur_col, wrap) \ 375#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
467 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap) 376 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
468#endif 377#endif
469 378
470#if AIC_DEBUG_REGISTERS 379#if AIC_DEBUG_REGISTERS
471ahd_reg_print_t ahd_clrlqiint0_print; 380ahd_reg_print_t ahd_lqimode0_print;
472#else 381#else
473#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \ 382#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
474 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap) 383 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
475#endif 384#endif
476 385
477#if AIC_DEBUG_REGISTERS 386#if AIC_DEBUG_REGISTERS
@@ -629,17 +538,17 @@ ahd_reg_print_t ahd_seqintsrc_print;
629#endif 538#endif
630 539
631#if AIC_DEBUG_REGISTERS 540#if AIC_DEBUG_REGISTERS
632ahd_reg_print_t ahd_seqimode_print; 541ahd_reg_print_t ahd_currscb_print;
633#else 542#else
634#define ahd_seqimode_print(regvalue, cur_col, wrap) \ 543#define ahd_currscb_print(regvalue, cur_col, wrap) \
635 ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap) 544 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
636#endif 545#endif
637 546
638#if AIC_DEBUG_REGISTERS 547#if AIC_DEBUG_REGISTERS
639ahd_reg_print_t ahd_currscb_print; 548ahd_reg_print_t ahd_seqimode_print;
640#else 549#else
641#define ahd_currscb_print(regvalue, cur_col, wrap) \ 550#define ahd_seqimode_print(regvalue, cur_col, wrap) \
642 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap) 551 ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
643#endif 552#endif
644 553
645#if AIC_DEBUG_REGISTERS 554#if AIC_DEBUG_REGISTERS
@@ -657,13 +566,6 @@ ahd_reg_print_t ahd_lastscb_print;
657#endif 566#endif
658 567
659#if AIC_DEBUG_REGISTERS 568#if AIC_DEBUG_REGISTERS
660ahd_reg_print_t ahd_shaddr_print;
661#else
662#define ahd_shaddr_print(regvalue, cur_col, wrap) \
663 ahd_print_register(NULL, 0, "SHADDR", 0x60, regvalue, cur_col, wrap)
664#endif
665
666#if AIC_DEBUG_REGISTERS
667ahd_reg_print_t ahd_negoaddr_print; 569ahd_reg_print_t ahd_negoaddr_print;
668#else 570#else
669#define ahd_negoaddr_print(regvalue, cur_col, wrap) \ 571#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
@@ -748,27 +650,6 @@ ahd_reg_print_t ahd_seloid_print;
748#endif 650#endif
749 651
750#if AIC_DEBUG_REGISTERS 652#if AIC_DEBUG_REGISTERS
751ahd_reg_print_t ahd_haddr_print;
752#else
753#define ahd_haddr_print(regvalue, cur_col, wrap) \
754 ahd_print_register(NULL, 0, "HADDR", 0x70, regvalue, cur_col, wrap)
755#endif
756
757#if AIC_DEBUG_REGISTERS
758ahd_reg_print_t ahd_hcnt_print;
759#else
760#define ahd_hcnt_print(regvalue, cur_col, wrap) \
761 ahd_print_register(NULL, 0, "HCNT", 0x78, regvalue, cur_col, wrap)
762#endif
763
764#if AIC_DEBUG_REGISTERS
765ahd_reg_print_t ahd_sghaddr_print;
766#else
767#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
768 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
769#endif
770
771#if AIC_DEBUG_REGISTERS
772ahd_reg_print_t ahd_scbhaddr_print; 653ahd_reg_print_t ahd_scbhaddr_print;
773#else 654#else
774#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \ 655#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
@@ -776,10 +657,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
776#endif 657#endif
777 658
778#if AIC_DEBUG_REGISTERS 659#if AIC_DEBUG_REGISTERS
779ahd_reg_print_t ahd_sghcnt_print; 660ahd_reg_print_t ahd_sghaddr_print;
780#else 661#else
781#define ahd_sghcnt_print(regvalue, cur_col, wrap) \ 662#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
782 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap) 663 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
783#endif 664#endif
784 665
785#if AIC_DEBUG_REGISTERS 666#if AIC_DEBUG_REGISTERS
@@ -790,10 +671,10 @@ ahd_reg_print_t ahd_scbhcnt_print;
790#endif 671#endif
791 672
792#if AIC_DEBUG_REGISTERS 673#if AIC_DEBUG_REGISTERS
793ahd_reg_print_t ahd_dff_thrsh_print; 674ahd_reg_print_t ahd_sghcnt_print;
794#else 675#else
795#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \ 676#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
796 ahd_print_register(NULL, 0, "DFF_THRSH", 0x88, regvalue, cur_col, wrap) 677 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
797#endif 678#endif
798 679
799#if AIC_DEBUG_REGISTERS 680#if AIC_DEBUG_REGISTERS
@@ -867,13 +748,6 @@ ahd_reg_print_t ahd_targpcistat_print;
867#endif 748#endif
868 749
869#if AIC_DEBUG_REGISTERS 750#if AIC_DEBUG_REGISTERS
870ahd_reg_print_t ahd_scbptr_print;
871#else
872#define ahd_scbptr_print(regvalue, cur_col, wrap) \
873 ahd_print_register(NULL, 0, "SCBPTR", 0xa8, regvalue, cur_col, wrap)
874#endif
875
876#if AIC_DEBUG_REGISTERS
877ahd_reg_print_t ahd_scbautoptr_print; 751ahd_reg_print_t ahd_scbautoptr_print;
878#else 752#else
879#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \ 753#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -881,13 +755,6 @@ ahd_reg_print_t ahd_scbautoptr_print;
881#endif 755#endif
882 756
883#if AIC_DEBUG_REGISTERS 757#if AIC_DEBUG_REGISTERS
884ahd_reg_print_t ahd_ccsgaddr_print;
885#else
886#define ahd_ccsgaddr_print(regvalue, cur_col, wrap) \
887 ahd_print_register(NULL, 0, "CCSGADDR", 0xac, regvalue, cur_col, wrap)
888#endif
889
890#if AIC_DEBUG_REGISTERS
891ahd_reg_print_t ahd_ccscbaddr_print; 758ahd_reg_print_t ahd_ccscbaddr_print;
892#else 759#else
893#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \ 760#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -909,13 +776,6 @@ ahd_reg_print_t ahd_ccsgctl_print;
909#endif 776#endif
910 777
911#if AIC_DEBUG_REGISTERS 778#if AIC_DEBUG_REGISTERS
912ahd_reg_print_t ahd_ccsgram_print;
913#else
914#define ahd_ccsgram_print(regvalue, cur_col, wrap) \
915 ahd_print_register(NULL, 0, "CCSGRAM", 0xb0, regvalue, cur_col, wrap)
916#endif
917
918#if AIC_DEBUG_REGISTERS
919ahd_reg_print_t ahd_ccscbram_print; 779ahd_reg_print_t ahd_ccscbram_print;
920#else 780#else
921#define ahd_ccscbram_print(regvalue, cur_col, wrap) \ 781#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -930,13 +790,6 @@ ahd_reg_print_t ahd_brddat_print;
930#endif 790#endif
931 791
932#if AIC_DEBUG_REGISTERS 792#if AIC_DEBUG_REGISTERS
933ahd_reg_print_t ahd_brdctl_print;
934#else
935#define ahd_brdctl_print(regvalue, cur_col, wrap) \
936 ahd_print_register(NULL, 0, "BRDCTL", 0xb9, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahd_reg_print_t ahd_seeadr_print; 793ahd_reg_print_t ahd_seeadr_print;
941#else 794#else
942#define ahd_seeadr_print(regvalue, cur_col, wrap) \ 795#define ahd_seeadr_print(regvalue, cur_col, wrap) \
@@ -972,13 +825,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
972#endif 825#endif
973 826
974#if AIC_DEBUG_REGISTERS 827#if AIC_DEBUG_REGISTERS
975ahd_reg_print_t ahd_dfdat_print;
976#else
977#define ahd_dfdat_print(regvalue, cur_col, wrap) \
978 ahd_print_register(NULL, 0, "DFDAT", 0xc4, regvalue, cur_col, wrap)
979#endif
980
981#if AIC_DEBUG_REGISTERS
982ahd_reg_print_t ahd_dspselect_print; 828ahd_reg_print_t ahd_dspselect_print;
983#else 829#else
984#define ahd_dspselect_print(regvalue, cur_col, wrap) \ 830#define ahd_dspselect_print(regvalue, cur_col, wrap) \
@@ -1000,13 +846,6 @@ ahd_reg_print_t ahd_seqctl0_print;
1000#endif 846#endif
1001 847
1002#if AIC_DEBUG_REGISTERS 848#if AIC_DEBUG_REGISTERS
1003ahd_reg_print_t ahd_flags_print;
1004#else
1005#define ahd_flags_print(regvalue, cur_col, wrap) \
1006 ahd_print_register(NULL, 0, "FLAGS", 0xd8, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahd_reg_print_t ahd_seqintctl_print; 849ahd_reg_print_t ahd_seqintctl_print;
1011#else 850#else
1012#define ahd_seqintctl_print(regvalue, cur_col, wrap) \ 851#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
@@ -1014,13 +853,6 @@ ahd_reg_print_t ahd_seqintctl_print;
1014#endif 853#endif
1015 854
1016#if AIC_DEBUG_REGISTERS 855#if AIC_DEBUG_REGISTERS
1017ahd_reg_print_t ahd_seqram_print;
1018#else
1019#define ahd_seqram_print(regvalue, cur_col, wrap) \
1020 ahd_print_register(NULL, 0, "SEQRAM", 0xda, regvalue, cur_col, wrap)
1021#endif
1022
1023#if AIC_DEBUG_REGISTERS
1024ahd_reg_print_t ahd_prgmcnt_print; 856ahd_reg_print_t ahd_prgmcnt_print;
1025#else 857#else
1026#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \ 858#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
@@ -1028,41 +860,6 @@ ahd_reg_print_t ahd_prgmcnt_print;
1028#endif 860#endif
1029 861
1030#if AIC_DEBUG_REGISTERS 862#if AIC_DEBUG_REGISTERS
1031ahd_reg_print_t ahd_accum_print;
1032#else
1033#define ahd_accum_print(regvalue, cur_col, wrap) \
1034 ahd_print_register(NULL, 0, "ACCUM", 0xe0, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahd_reg_print_t ahd_sindex_print;
1039#else
1040#define ahd_sindex_print(regvalue, cur_col, wrap) \
1041 ahd_print_register(NULL, 0, "SINDEX", 0xe2, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahd_reg_print_t ahd_dindex_print;
1046#else
1047#define ahd_dindex_print(regvalue, cur_col, wrap) \
1048 ahd_print_register(NULL, 0, "DINDEX", 0xe4, regvalue, cur_col, wrap)
1049#endif
1050
1051#if AIC_DEBUG_REGISTERS
1052ahd_reg_print_t ahd_allones_print;
1053#else
1054#define ahd_allones_print(regvalue, cur_col, wrap) \
1055 ahd_print_register(NULL, 0, "ALLONES", 0xe8, regvalue, cur_col, wrap)
1056#endif
1057
1058#if AIC_DEBUG_REGISTERS
1059ahd_reg_print_t ahd_allzeros_print;
1060#else
1061#define ahd_allzeros_print(regvalue, cur_col, wrap) \
1062 ahd_print_register(NULL, 0, "ALLZEROS", 0xea, regvalue, cur_col, wrap)
1063#endif
1064
1065#if AIC_DEBUG_REGISTERS
1066ahd_reg_print_t ahd_none_print; 863ahd_reg_print_t ahd_none_print;
1067#else 864#else
1068#define ahd_none_print(regvalue, cur_col, wrap) \ 865#define ahd_none_print(regvalue, cur_col, wrap) \
@@ -1070,27 +867,6 @@ ahd_reg_print_t ahd_none_print;
1070#endif 867#endif
1071 868
1072#if AIC_DEBUG_REGISTERS 869#if AIC_DEBUG_REGISTERS
1073ahd_reg_print_t ahd_sindir_print;
1074#else
1075#define ahd_sindir_print(regvalue, cur_col, wrap) \
1076 ahd_print_register(NULL, 0, "SINDIR", 0xec, regvalue, cur_col, wrap)
1077#endif
1078
1079#if AIC_DEBUG_REGISTERS
1080ahd_reg_print_t ahd_dindir_print;
1081#else
1082#define ahd_dindir_print(regvalue, cur_col, wrap) \
1083 ahd_print_register(NULL, 0, "DINDIR", 0xed, regvalue, cur_col, wrap)
1084#endif
1085
1086#if AIC_DEBUG_REGISTERS
1087ahd_reg_print_t ahd_stack_print;
1088#else
1089#define ahd_stack_print(regvalue, cur_col, wrap) \
1090 ahd_print_register(NULL, 0, "STACK", 0xf2, regvalue, cur_col, wrap)
1091#endif
1092
1093#if AIC_DEBUG_REGISTERS
1094ahd_reg_print_t ahd_intvec1_addr_print; 870ahd_reg_print_t ahd_intvec1_addr_print;
1095#else 871#else
1096#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \ 872#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
@@ -1126,17 +902,17 @@ ahd_reg_print_t ahd_accum_save_print;
1126#endif 902#endif
1127 903
1128#if AIC_DEBUG_REGISTERS 904#if AIC_DEBUG_REGISTERS
1129ahd_reg_print_t ahd_sram_base_print; 905ahd_reg_print_t ahd_waiting_scb_tails_print;
1130#else 906#else
1131#define ahd_sram_base_print(regvalue, cur_col, wrap) \ 907#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1132 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) 908 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1133#endif 909#endif
1134 910
1135#if AIC_DEBUG_REGISTERS 911#if AIC_DEBUG_REGISTERS
1136ahd_reg_print_t ahd_waiting_scb_tails_print; 912ahd_reg_print_t ahd_sram_base_print;
1137#else 913#else
1138#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \ 914#define ahd_sram_base_print(regvalue, cur_col, wrap) \
1139 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap) 915 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
1140#endif 916#endif
1141 917
1142#if AIC_DEBUG_REGISTERS 918#if AIC_DEBUG_REGISTERS
@@ -1224,13 +1000,6 @@ ahd_reg_print_t ahd_msg_out_print;
1224#endif 1000#endif
1225 1001
1226#if AIC_DEBUG_REGISTERS 1002#if AIC_DEBUG_REGISTERS
1227ahd_reg_print_t ahd_dmaparams_print;
1228#else
1229#define ahd_dmaparams_print(regvalue, cur_col, wrap) \
1230 ahd_print_register(NULL, 0, "DMAPARAMS", 0x138, regvalue, cur_col, wrap)
1231#endif
1232
1233#if AIC_DEBUG_REGISTERS
1234ahd_reg_print_t ahd_seq_flags_print; 1003ahd_reg_print_t ahd_seq_flags_print;
1235#else 1004#else
1236#define ahd_seq_flags_print(regvalue, cur_col, wrap) \ 1005#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
@@ -1238,20 +1007,6 @@ ahd_reg_print_t ahd_seq_flags_print;
1238#endif 1007#endif
1239 1008
1240#if AIC_DEBUG_REGISTERS 1009#if AIC_DEBUG_REGISTERS
1241ahd_reg_print_t ahd_saved_scsiid_print;
1242#else
1243#define ahd_saved_scsiid_print(regvalue, cur_col, wrap) \
1244 ahd_print_register(NULL, 0, "SAVED_SCSIID", 0x13a, regvalue, cur_col, wrap)
1245#endif
1246
1247#if AIC_DEBUG_REGISTERS
1248ahd_reg_print_t ahd_saved_lun_print;
1249#else
1250#define ahd_saved_lun_print(regvalue, cur_col, wrap) \
1251 ahd_print_register(NULL, 0, "SAVED_LUN", 0x13b, regvalue, cur_col, wrap)
1252#endif
1253
1254#if AIC_DEBUG_REGISTERS
1255ahd_reg_print_t ahd_lastphase_print; 1010ahd_reg_print_t ahd_lastphase_print;
1256#else 1011#else
1257#define ahd_lastphase_print(regvalue, cur_col, wrap) \ 1012#define ahd_lastphase_print(regvalue, cur_col, wrap) \
@@ -1273,20 +1028,6 @@ ahd_reg_print_t ahd_kernel_tqinpos_print;
1273#endif 1028#endif
1274 1029
1275#if AIC_DEBUG_REGISTERS 1030#if AIC_DEBUG_REGISTERS
1276ahd_reg_print_t ahd_tqinpos_print;
1277#else
1278#define ahd_tqinpos_print(regvalue, cur_col, wrap) \
1279 ahd_print_register(NULL, 0, "TQINPOS", 0x13f, regvalue, cur_col, wrap)
1280#endif
1281
1282#if AIC_DEBUG_REGISTERS
1283ahd_reg_print_t ahd_shared_data_addr_print;
1284#else
1285#define ahd_shared_data_addr_print(regvalue, cur_col, wrap) \
1286 ahd_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x140, regvalue, cur_col, wrap)
1287#endif
1288
1289#if AIC_DEBUG_REGISTERS
1290ahd_reg_print_t ahd_qoutfifo_next_addr_print; 1031ahd_reg_print_t ahd_qoutfifo_next_addr_print;
1291#else 1032#else
1292#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \ 1033#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
@@ -1294,20 +1035,6 @@ ahd_reg_print_t ahd_qoutfifo_next_addr_print;
1294#endif 1035#endif
1295 1036
1296#if AIC_DEBUG_REGISTERS 1037#if AIC_DEBUG_REGISTERS
1297ahd_reg_print_t ahd_arg_1_print;
1298#else
1299#define ahd_arg_1_print(regvalue, cur_col, wrap) \
1300 ahd_print_register(NULL, 0, "ARG_1", 0x148, regvalue, cur_col, wrap)
1301#endif
1302
1303#if AIC_DEBUG_REGISTERS
1304ahd_reg_print_t ahd_arg_2_print;
1305#else
1306#define ahd_arg_2_print(regvalue, cur_col, wrap) \
1307 ahd_print_register(NULL, 0, "ARG_2", 0x149, regvalue, cur_col, wrap)
1308#endif
1309
1310#if AIC_DEBUG_REGISTERS
1311ahd_reg_print_t ahd_last_msg_print; 1038ahd_reg_print_t ahd_last_msg_print;
1312#else 1039#else
1313#define ahd_last_msg_print(regvalue, cur_col, wrap) \ 1040#define ahd_last_msg_print(regvalue, cur_col, wrap) \
@@ -1406,13 +1133,6 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
1406#endif 1133#endif
1407 1134
1408#if AIC_DEBUG_REGISTERS 1135#if AIC_DEBUG_REGISTERS
1409ahd_reg_print_t ahd_scb_residual_datacnt_print;
1410#else
1411#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
1412 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
1413#endif
1414
1415#if AIC_DEBUG_REGISTERS
1416ahd_reg_print_t ahd_scb_base_print; 1136ahd_reg_print_t ahd_scb_base_print;
1417#else 1137#else
1418#define ahd_scb_base_print(regvalue, cur_col, wrap) \ 1138#define ahd_scb_base_print(regvalue, cur_col, wrap) \
@@ -1420,17 +1140,10 @@ ahd_reg_print_t ahd_scb_base_print;
1420#endif 1140#endif
1421 1141
1422#if AIC_DEBUG_REGISTERS 1142#if AIC_DEBUG_REGISTERS
1423ahd_reg_print_t ahd_scb_residual_sgptr_print; 1143ahd_reg_print_t ahd_scb_residual_datacnt_print;
1424#else
1425#define ahd_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
1426 ahd_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0x184, regvalue, cur_col, wrap)
1427#endif
1428
1429#if AIC_DEBUG_REGISTERS
1430ahd_reg_print_t ahd_scb_scsi_status_print;
1431#else 1144#else
1432#define ahd_scb_scsi_status_print(regvalue, cur_col, wrap) \ 1145#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
1433 ahd_print_register(NULL, 0, "SCB_SCSI_STATUS", 0x188, regvalue, cur_col, wrap) 1146 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
1434#endif 1147#endif
1435 1148
1436#if AIC_DEBUG_REGISTERS 1149#if AIC_DEBUG_REGISTERS
@@ -1476,13 +1189,6 @@ ahd_reg_print_t ahd_scb_task_attribute_print;
1476#endif 1189#endif
1477 1190
1478#if AIC_DEBUG_REGISTERS 1191#if AIC_DEBUG_REGISTERS
1479ahd_reg_print_t ahd_scb_cdb_len_print;
1480#else
1481#define ahd_scb_cdb_len_print(regvalue, cur_col, wrap) \
1482 ahd_print_register(NULL, 0, "SCB_CDB_LEN", 0x196, regvalue, cur_col, wrap)
1483#endif
1484
1485#if AIC_DEBUG_REGISTERS
1486ahd_reg_print_t ahd_scb_task_management_print; 1192ahd_reg_print_t ahd_scb_task_management_print;
1487#else 1193#else
1488#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \ 1194#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
@@ -1518,13 +1224,6 @@ ahd_reg_print_t ahd_scb_busaddr_print;
1518#endif 1224#endif
1519 1225
1520#if AIC_DEBUG_REGISTERS 1226#if AIC_DEBUG_REGISTERS
1521ahd_reg_print_t ahd_scb_next_print;
1522#else
1523#define ahd_scb_next_print(regvalue, cur_col, wrap) \
1524 ahd_print_register(NULL, 0, "SCB_NEXT", 0x1ac, regvalue, cur_col, wrap)
1525#endif
1526
1527#if AIC_DEBUG_REGISTERS
1528ahd_reg_print_t ahd_scb_next2_print; 1227ahd_reg_print_t ahd_scb_next2_print;
1529#else 1228#else
1530#define ahd_scb_next2_print(regvalue, cur_col, wrap) \ 1229#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
@@ -1717,10 +1416,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1717 1416
1718#define SG_CACHE_PRE 0x1b 1417#define SG_CACHE_PRE 0x1b
1719 1418
1720#define TYPEPTR 0x20
1721
1722#define LQIN 0x20 1419#define LQIN 0x20
1723 1420
1421#define TYPEPTR 0x20
1422
1724#define TAGPTR 0x21 1423#define TAGPTR 0x21
1725 1424
1726#define LUNPTR 0x22 1425#define LUNPTR 0x22
@@ -1780,6 +1479,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1780#define SINGLECMD 0x02 1479#define SINGLECMD 0x02
1781#define ABORTPENDING 0x01 1480#define ABORTPENDING 0x01
1782 1481
1482#define SCSBIST0 0x39
1483#define GSBISTERR 0x40
1484#define GSBISTDONE 0x20
1485#define GSBISTRUN 0x10
1486#define OSBISTERR 0x04
1487#define OSBISTDONE 0x02
1488#define OSBISTRUN 0x01
1489
1783#define LQCTL2 0x39 1490#define LQCTL2 0x39
1784#define LQIRETRY 0x80 1491#define LQIRETRY 0x80
1785#define LQICONTINUE 0x40 1492#define LQICONTINUE 0x40
@@ -1790,13 +1497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1790#define LQOTOIDLE 0x02 1497#define LQOTOIDLE 0x02
1791#define LQOPAUSE 0x01 1498#define LQOPAUSE 0x01
1792 1499
1793#define SCSBIST0 0x39 1500#define SCSBIST1 0x3a
1794#define GSBISTERR 0x40 1501#define NTBISTERR 0x04
1795#define GSBISTDONE 0x20 1502#define NTBISTDONE 0x02
1796#define GSBISTRUN 0x10 1503#define NTBISTRUN 0x01
1797#define OSBISTERR 0x04
1798#define OSBISTDONE 0x02
1799#define OSBISTRUN 0x01
1800 1504
1801#define SCSISEQ0 0x3a 1505#define SCSISEQ0 0x3a
1802#define TEMODEO 0x80 1506#define TEMODEO 0x80
@@ -1805,15 +1509,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1805#define FORCEBUSFREE 0x10 1509#define FORCEBUSFREE 0x10
1806#define SCSIRSTO 0x01 1510#define SCSIRSTO 0x01
1807 1511
1808#define SCSBIST1 0x3a
1809#define NTBISTERR 0x04
1810#define NTBISTDONE 0x02
1811#define NTBISTRUN 0x01
1812
1813#define SCSISEQ1 0x3b 1512#define SCSISEQ1 0x3b
1814 1513
1815#define BUSINITID 0x3c
1816
1817#define SXFRCTL0 0x3c 1514#define SXFRCTL0 0x3c
1818#define DFON 0x80 1515#define DFON 0x80
1819#define DFPEXP 0x40 1516#define DFPEXP 0x40
@@ -1822,6 +1519,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1822 1519
1823#define DLCOUNT 0x3c 1520#define DLCOUNT 0x3c
1824 1521
1522#define BUSINITID 0x3c
1523
1825#define SXFRCTL1 0x3d 1524#define SXFRCTL1 0x3d
1826#define BITBUCKET 0x80 1525#define BITBUCKET 0x80
1827#define ENSACHK 0x40 1526#define ENSACHK 0x40
@@ -1846,8 +1545,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1846#define CURRFIFO_1 0x01 1545#define CURRFIFO_1 0x01
1847#define CURRFIFO_0 0x00 1546#define CURRFIFO_0 0x00
1848 1547
1849#define MULTARGID 0x40
1850
1851#define SCSISIGO 0x40 1548#define SCSISIGO 0x40
1852#define CDO 0x80 1549#define CDO 0x80
1853#define IOO 0x40 1550#define IOO 0x40
@@ -1858,6 +1555,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1858#define REQO 0x02 1555#define REQO 0x02
1859#define ACKO 0x01 1556#define ACKO 0x01
1860 1557
1558#define MULTARGID 0x40
1559
1861#define SCSISIGI 0x41 1560#define SCSISIGI 0x41
1862#define ATNI 0x10 1561#define ATNI 0x10
1863#define SELI 0x08 1562#define SELI 0x08
@@ -1904,6 +1603,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1904#define ENAB20 0x04 1603#define ENAB20 0x04
1905#define SELWIDE 0x02 1604#define SELWIDE 0x02
1906 1605
1606#define CLRSINT0 0x4b
1607#define CLRSELDO 0x40
1608#define CLRSELDI 0x20
1609#define CLRSELINGO 0x10
1610#define CLRIOERR 0x08
1611#define CLROVERRUN 0x04
1612#define CLRSPIORDY 0x02
1613#define CLRARBDO 0x01
1614
1907#define SSTAT0 0x4b 1615#define SSTAT0 0x4b
1908#define TARGET 0x80 1616#define TARGET 0x80
1909#define SELDO 0x40 1617#define SELDO 0x40
@@ -1923,14 +1631,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1923#define ENSPIORDY 0x02 1631#define ENSPIORDY 0x02
1924#define ENARBDO 0x01 1632#define ENARBDO 0x01
1925 1633
1926#define CLRSINT0 0x4b 1634#define CLRSINT1 0x4c
1927#define CLRSELDO 0x40 1635#define CLRSELTIMEO 0x80
1928#define CLRSELDI 0x20 1636#define CLRATNO 0x40
1929#define CLRSELINGO 0x10 1637#define CLRSCSIRSTI 0x20
1930#define CLRIOERR 0x08 1638#define CLRBUSFREE 0x08
1931#define CLROVERRUN 0x04 1639#define CLRSCSIPERR 0x04
1932#define CLRSPIORDY 0x02 1640#define CLRSTRB2FAST 0x02
1933#define CLRARBDO 0x01 1641#define CLRREQINIT 0x01
1934 1642
1935#define SSTAT1 0x4c 1643#define SSTAT1 0x4c
1936#define SELTO 0x80 1644#define SELTO 0x80
@@ -1942,15 +1650,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1942#define STRB2FAST 0x02 1650#define STRB2FAST 0x02
1943#define REQINIT 0x01 1651#define REQINIT 0x01
1944 1652
1945#define CLRSINT1 0x4c
1946#define CLRSELTIMEO 0x80
1947#define CLRATNO 0x40
1948#define CLRSCSIRSTI 0x20
1949#define CLRBUSFREE 0x08
1950#define CLRSCSIPERR 0x04
1951#define CLRSTRB2FAST 0x02
1952#define CLRREQINIT 0x01
1953
1954#define SSTAT2 0x4d 1653#define SSTAT2 0x4d
1955#define BUSFREETIME 0xc0 1654#define BUSFREETIME 0xc0
1956#define NONPACKREQ 0x20 1655#define NONPACKREQ 0x20
@@ -1998,14 +1697,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1998#define LQIATNLQ 0x02 1697#define LQIATNLQ 0x02
1999#define LQIATNCMD 0x01 1698#define LQIATNCMD 0x01
2000 1699
2001#define LQIMODE0 0x50
2002#define ENLQIATNQASK 0x20
2003#define ENLQICRCT1 0x10
2004#define ENLQICRCT2 0x08
2005#define ENLQIBADLQT 0x04
2006#define ENLQIATNLQ 0x02
2007#define ENLQIATNCMD 0x01
2008
2009#define CLRLQIINT0 0x50 1700#define CLRLQIINT0 0x50
2010#define CLRLQIATNQAS 0x20 1701#define CLRLQIATNQAS 0x20
2011#define CLRLQICRCT1 0x10 1702#define CLRLQICRCT1 0x10
@@ -2014,6 +1705,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2014#define CLRLQIATNLQ 0x02 1705#define CLRLQIATNLQ 0x02
2015#define CLRLQIATNCMD 0x01 1706#define CLRLQIATNCMD 0x01
2016 1707
1708#define LQIMODE0 0x50
1709#define ENLQIATNQASK 0x20
1710#define ENLQICRCT1 0x10
1711#define ENLQICRCT2 0x08
1712#define ENLQIBADLQT 0x04
1713#define ENLQIATNLQ 0x02
1714#define ENLQIATNCMD 0x01
1715
2017#define LQIMODE1 0x51 1716#define LQIMODE1 0x51
2018#define ENLQIPHASE_LQ 0x80 1717#define ENLQIPHASE_LQ 0x80
2019#define ENLQIPHASE_NLQ 0x40 1718#define ENLQIPHASE_NLQ 0x40
@@ -2160,6 +1859,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2160#define CFG4ICMD 0x02 1859#define CFG4ICMD 0x02
2161#define CFG4TCMD 0x01 1860#define CFG4TCMD 0x01
2162 1861
1862#define CURRSCB 0x5c
1863
2163#define SEQIMODE 0x5c 1864#define SEQIMODE 0x5c
2164#define ENCTXTDONE 0x40 1865#define ENCTXTDONE 0x40
2165#define ENSAVEPTRS 0x20 1866#define ENSAVEPTRS 0x20
@@ -2169,8 +1870,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2169#define ENCFG4ICMD 0x02 1870#define ENCFG4ICMD 0x02
2170#define ENCFG4TCMD 0x01 1871#define ENCFG4TCMD 0x01
2171 1872
2172#define CURRSCB 0x5c
2173
2174#define MDFFSTAT 0x5d 1873#define MDFFSTAT 0x5d
2175#define SHCNTNEGATIVE 0x40 1874#define SHCNTNEGATIVE 0x40
2176#define SHCNTMINUS1 0x20 1875#define SHCNTMINUS1 0x20
@@ -2185,29 +1884,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2185 1884
2186#define DFFTAG 0x5e 1885#define DFFTAG 0x5e
2187 1886
1887#define LASTSCB 0x5e
1888
2188#define SCSITEST 0x5e 1889#define SCSITEST 0x5e
2189#define CNTRTEST 0x08 1890#define CNTRTEST 0x08
2190#define SEL_TXPLL_DEBUG 0x04 1891#define SEL_TXPLL_DEBUG 0x04
2191 1892
2192#define LASTSCB 0x5e
2193
2194#define IOPDNCTL 0x5f 1893#define IOPDNCTL 0x5f
2195#define DISABLE_OE 0x80 1894#define DISABLE_OE 0x80
2196#define PDN_IDIST 0x04 1895#define PDN_IDIST 0x04
2197#define PDN_DIFFSENSE 0x01 1896#define PDN_DIFFSENSE 0x01
2198 1897
2199#define DGRPCRCI 0x60
2200
2201#define SHADDR 0x60 1898#define SHADDR 0x60
2202 1899
2203#define NEGOADDR 0x60 1900#define NEGOADDR 0x60
2204 1901
2205#define NEGPERIOD 0x61 1902#define DGRPCRCI 0x60
2206 1903
2207#define NEGOFFSET 0x62 1904#define NEGPERIOD 0x61
2208 1905
2209#define PACKCRCI 0x62 1906#define PACKCRCI 0x62
2210 1907
1908#define NEGOFFSET 0x62
1909
2211#define NEGPPROPTS 0x63 1910#define NEGPPROPTS 0x63
2212#define PPROPT_PACE 0x08 1911#define PPROPT_PACE 0x08
2213#define PPROPT_QAS 0x04 1912#define PPROPT_QAS 0x04
@@ -2253,8 +1952,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2253 1952
2254#define SELOID 0x6b 1953#define SELOID 0x6b
2255 1954
2256#define FAIRNESS 0x6c
2257
2258#define PLL400CTL0 0x6c 1955#define PLL400CTL0 0x6c
2259#define PLL_VCOSEL 0x80 1956#define PLL_VCOSEL 0x80
2260#define PLL_PWDN 0x40 1957#define PLL_PWDN 0x40
@@ -2264,6 +1961,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2264#define PLL_DLPF 0x02 1961#define PLL_DLPF 0x02
2265#define PLL_ENFBM 0x01 1962#define PLL_ENFBM 0x01
2266 1963
1964#define FAIRNESS 0x6c
1965
2267#define PLL400CTL1 0x6d 1966#define PLL400CTL1 0x6d
2268#define PLL_CNTEN 0x80 1967#define PLL_CNTEN 0x80
2269#define PLL_CNTCLR 0x40 1968#define PLL_CNTCLR 0x40
@@ -2275,25 +1974,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2275 1974
2276#define HADDR 0x70 1975#define HADDR 0x70
2277 1976
2278#define HODMAADR 0x70
2279
2280#define PLLDELAY 0x70 1977#define PLLDELAY 0x70
2281#define SPLIT_DROP_REQ 0x80 1978#define SPLIT_DROP_REQ 0x80
2282 1979
2283#define HCNT 0x78 1980#define HODMAADR 0x70
2284 1981
2285#define HODMACNT 0x78 1982#define HODMACNT 0x78
2286 1983
2287#define HODMAEN 0x7a 1984#define HCNT 0x78
2288 1985
2289#define SGHADDR 0x7c 1986#define HODMAEN 0x7a
2290 1987
2291#define SCBHADDR 0x7c 1988#define SCBHADDR 0x7c
2292 1989
2293#define SGHCNT 0x84 1990#define SGHADDR 0x7c
2294 1991
2295#define SCBHCNT 0x84 1992#define SCBHCNT 0x84
2296 1993
1994#define SGHCNT 0x84
1995
2297#define DFF_THRSH 0x88 1996#define DFF_THRSH 0x88
2298#define WR_DFTHRSH 0x70 1997#define WR_DFTHRSH 0x70
2299#define RD_DFTHRSH 0x07 1998#define RD_DFTHRSH 0x07
@@ -2326,10 +2025,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2326 2025
2327#define CMCRXMSG0 0x90 2026#define CMCRXMSG0 0x90
2328 2027
2329#define OVLYRXMSG0 0x90
2330
2331#define DCHRXMSG0 0x90
2332
2333#define ROENABLE 0x90 2028#define ROENABLE 0x90
2334#define MSIROEN 0x20 2029#define MSIROEN 0x20
2335#define OVLYROEN 0x10 2030#define OVLYROEN 0x10
@@ -2338,11 +2033,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2338#define DCH1ROEN 0x02 2033#define DCH1ROEN 0x02
2339#define DCH0ROEN 0x01 2034#define DCH0ROEN 0x01
2340 2035
2341#define OVLYRXMSG1 0x91 2036#define OVLYRXMSG0 0x90
2342 2037
2343#define CMCRXMSG1 0x91 2038#define DCHRXMSG0 0x90
2344 2039
2345#define DCHRXMSG1 0x91 2040#define OVLYRXMSG1 0x91
2346 2041
2347#define NSENABLE 0x91 2042#define NSENABLE 0x91
2348#define MSINSEN 0x20 2043#define MSINSEN 0x20
@@ -2352,6 +2047,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2352#define DCH1NSEN 0x02 2047#define DCH1NSEN 0x02
2353#define DCH0NSEN 0x01 2048#define DCH0NSEN 0x01
2354 2049
2050#define CMCRXMSG1 0x91
2051
2052#define DCHRXMSG1 0x91
2053
2355#define DCHRXMSG2 0x92 2054#define DCHRXMSG2 0x92
2356 2055
2357#define CMCRXMSG2 0x92 2056#define CMCRXMSG2 0x92
@@ -2375,24 +2074,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2375#define TSCSERREN 0x02 2074#define TSCSERREN 0x02
2376#define CMPABCDIS 0x01 2075#define CMPABCDIS 0x01
2377 2076
2378#define CMCSEQBCNT 0x94
2379
2380#define OVLYSEQBCNT 0x94 2077#define OVLYSEQBCNT 0x94
2381 2078
2382#define DCHSEQBCNT 0x94 2079#define DCHSEQBCNT 0x94
2383 2080
2081#define CMCSEQBCNT 0x94
2082
2083#define CMCSPLTSTAT0 0x96
2084
2384#define DCHSPLTSTAT0 0x96 2085#define DCHSPLTSTAT0 0x96
2385 2086
2386#define OVLYSPLTSTAT0 0x96 2087#define OVLYSPLTSTAT0 0x96
2387 2088
2388#define CMCSPLTSTAT0 0x96 2089#define CMCSPLTSTAT1 0x97
2389 2090
2390#define OVLYSPLTSTAT1 0x97 2091#define OVLYSPLTSTAT1 0x97
2391 2092
2392#define DCHSPLTSTAT1 0x97 2093#define DCHSPLTSTAT1 0x97
2393 2094
2394#define CMCSPLTSTAT1 0x97
2395
2396#define SGRXMSG0 0x98 2095#define SGRXMSG0 0x98
2397#define CDNUM 0xf8 2096#define CDNUM 0xf8
2398#define CFNUM 0x07 2097#define CFNUM 0x07
@@ -2420,15 +2119,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2420#define TAG_NUM 0x1f 2119#define TAG_NUM 0x1f
2421#define RLXORD 0x10 2120#define RLXORD 0x10
2422 2121
2122#define SGSEQBCNT 0x9c
2123
2423#define SLVSPLTOUTATTR0 0x9c 2124#define SLVSPLTOUTATTR0 0x9c
2424#define LOWER_BCNT 0xff 2125#define LOWER_BCNT 0xff
2425 2126
2426#define SGSEQBCNT 0x9c
2427
2428#define SLVSPLTOUTATTR1 0x9d 2127#define SLVSPLTOUTATTR1 0x9d
2429#define CMPLT_DNUM 0xf8 2128#define CMPLT_DNUM 0xf8
2430#define CMPLT_FNUM 0x07 2129#define CMPLT_FNUM 0x07
2431 2130
2131#define SLVSPLTOUTATTR2 0x9e
2132#define CMPLT_BNUM 0xff
2133
2432#define SGSPLTSTAT0 0x9e 2134#define SGSPLTSTAT0 0x9e
2433#define STAETERM 0x80 2135#define STAETERM 0x80
2434#define SCBCERR 0x40 2136#define SCBCERR 0x40
@@ -2439,9 +2141,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2439#define RXSCEMSG 0x02 2141#define RXSCEMSG 0x02
2440#define RXSPLTRSP 0x01 2142#define RXSPLTRSP 0x01
2441 2143
2442#define SLVSPLTOUTATTR2 0x9e
2443#define CMPLT_BNUM 0xff
2444
2445#define SGSPLTSTAT1 0x9f 2144#define SGSPLTSTAT1 0x9f
2446#define RXDATABUCKET 0x01 2145#define RXDATABUCKET 0x01
2447 2146
@@ -2497,10 +2196,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2497 2196
2498#define CCSGADDR 0xac 2197#define CCSGADDR 0xac
2499 2198
2500#define CCSCBADDR 0xac
2501
2502#define CCSCBADR_BK 0xac 2199#define CCSCBADR_BK 0xac
2503 2200
2201#define CCSCBADDR 0xac
2202
2504#define CMC_RAMBIST 0xad 2203#define CMC_RAMBIST 0xad
2505#define SG_ELEMENT_SIZE 0x80 2204#define SG_ELEMENT_SIZE 0x80
2506#define SCBRAMBIST_FAIL 0x40 2205#define SCBRAMBIST_FAIL 0x40
@@ -2554,9 +2253,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2554#define SEEDAT 0xbc 2253#define SEEDAT 0xbc
2555 2254
2556#define SEECTL 0xbe 2255#define SEECTL 0xbe
2557#define SEEOP_EWDS 0x40
2558#define SEEOP_WALL 0x40 2256#define SEEOP_WALL 0x40
2559#define SEEOP_EWEN 0x40 2257#define SEEOP_EWEN 0x40
2258#define SEEOP_EWDS 0x40
2560#define SEEOPCODE 0x70 2259#define SEEOPCODE 0x70
2561#define SEERST 0x02 2260#define SEERST 0x02
2562#define SEESTART 0x01 2261#define SEESTART 0x01
@@ -2573,25 +2272,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2573 2272
2574#define SCBCNT 0xbf 2273#define SCBCNT 0xbf
2575 2274
2275#define DFWADDR 0xc0
2276
2576#define DSPFLTRCTL 0xc0 2277#define DSPFLTRCTL 0xc0
2577#define FLTRDISABLE 0x20 2278#define FLTRDISABLE 0x20
2578#define EDGESENSE 0x10 2279#define EDGESENSE 0x10
2579#define DSPFCNTSEL 0x0f 2280#define DSPFCNTSEL 0x0f
2580 2281
2581#define DFWADDR 0xc0
2582
2583#define DSPDATACTL 0xc1 2282#define DSPDATACTL 0xc1
2584#define BYPASSENAB 0x80 2283#define BYPASSENAB 0x80
2585#define DESQDIS 0x10 2284#define DESQDIS 0x10
2586#define RCVROFFSTDIS 0x04 2285#define RCVROFFSTDIS 0x04
2587#define XMITOFFSTDIS 0x02 2286#define XMITOFFSTDIS 0x02
2588 2287
2288#define DFRADDR 0xc2
2289
2589#define DSPREQCTL 0xc2 2290#define DSPREQCTL 0xc2
2590#define MANREQCTL 0xc0 2291#define MANREQCTL 0xc0
2591#define MANREQDLY 0x3f 2292#define MANREQDLY 0x3f
2592 2293
2593#define DFRADDR 0xc2
2594
2595#define DSPACKCTL 0xc3 2294#define DSPACKCTL 0xc3
2596#define MANACKCTL 0xc0 2295#define MANACKCTL 0xc0
2597#define MANACKDLY 0x3f 2296#define MANACKDLY 0x3f
@@ -2612,14 +2311,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2612 2311
2613#define WRTBIASCALC 0xc7 2312#define WRTBIASCALC 0xc7
2614 2313
2615#define DFPTRS 0xc8
2616
2617#define RCVRBIASCALC 0xc8 2314#define RCVRBIASCALC 0xc8
2618 2315
2619#define DFBKPTR 0xc9 2316#define DFPTRS 0xc8
2620 2317
2621#define SKEWCALC 0xc9 2318#define SKEWCALC 0xc9
2622 2319
2320#define DFBKPTR 0xc9
2321
2623#define DFDBCTL 0xcb 2322#define DFDBCTL 0xcb
2624#define DFF_CIO_WR_RDY 0x20 2323#define DFF_CIO_WR_RDY 0x20
2625#define DFF_CIO_RD_RDY 0x10 2324#define DFF_CIO_RD_RDY 0x10
@@ -2704,12 +2403,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2704 2403
2705#define ACCUM_SAVE 0xfa 2404#define ACCUM_SAVE 0xfa
2706 2405
2406#define WAITING_SCB_TAILS 0x100
2407
2707#define AHD_PCI_CONFIG_BASE 0x100 2408#define AHD_PCI_CONFIG_BASE 0x100
2708 2409
2709#define SRAM_BASE 0x100 2410#define SRAM_BASE 0x100
2710 2411
2711#define WAITING_SCB_TAILS 0x100
2712
2713#define WAITING_TID_HEAD 0x120 2412#define WAITING_TID_HEAD 0x120
2714 2413
2715#define WAITING_TID_TAIL 0x122 2414#define WAITING_TID_TAIL 0x122
@@ -2738,8 +2437,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2738#define PRELOADEN 0x80 2437#define PRELOADEN 0x80
2739#define WIDEODD 0x40 2438#define WIDEODD 0x40
2740#define SCSIEN 0x20 2439#define SCSIEN 0x20
2741#define SDMAENACK 0x10
2742#define SDMAEN 0x10 2440#define SDMAEN 0x10
2441#define SDMAENACK 0x10
2743#define HDMAEN 0x08 2442#define HDMAEN 0x08
2744#define HDMAENACK 0x08 2443#define HDMAENACK 0x08
2745#define DIRECTION 0x04 2444#define DIRECTION 0x04
@@ -2837,12 +2536,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2837 2536
2838#define MK_MESSAGE_SCSIID 0x162 2537#define MK_MESSAGE_SCSIID 0x162
2839 2538
2539#define SCB_BASE 0x180
2540
2840#define SCB_RESIDUAL_DATACNT 0x180 2541#define SCB_RESIDUAL_DATACNT 0x180
2841#define SCB_CDB_STORE 0x180 2542#define SCB_CDB_STORE 0x180
2842#define SCB_HOST_CDB_PTR 0x180 2543#define SCB_HOST_CDB_PTR 0x180
2843 2544
2844#define SCB_BASE 0x180
2845
2846#define SCB_RESIDUAL_SGPTR 0x184 2545#define SCB_RESIDUAL_SGPTR 0x184
2847#define SG_ADDR_MASK 0xf8 2546#define SG_ADDR_MASK 0xf8
2848#define SG_OVERRUN_RESID 0x02 2547#define SG_OVERRUN_RESID 0x02
@@ -2910,17 +2609,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2910#define SCB_DISCONNECTED_LISTS 0x1b8 2609#define SCB_DISCONNECTED_LISTS 0x1b8
2911 2610
2912 2611
2913#define CMD_GROUP_CODE_SHIFT 0x05
2914#define STIMESEL_MIN 0x18
2915#define STIMESEL_SHIFT 0x03
2916#define INVALID_ADDR 0x80
2917#define AHD_PRECOMP_MASK 0x07
2918#define TARGET_DATA_IN 0x01
2919#define CCSCBADDR_MAX 0x80
2920#define NUMDSPS 0x14
2921#define SEEOP_EWEN_ADDR 0xc0
2922#define AHD_ANNEXCOL_PER_DEV0 0x04
2923#define DST_MODE_SHIFT 0x04
2924#define AHD_TIMER_MAX_US 0x18ffe7 2612#define AHD_TIMER_MAX_US 0x18ffe7
2925#define AHD_TIMER_MAX_TICKS 0xffff 2613#define AHD_TIMER_MAX_TICKS 0xffff
2926#define AHD_SENSE_BUFSIZE 0x100 2614#define AHD_SENSE_BUFSIZE 0x100
@@ -2955,32 +2643,43 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2955#define LUNLEN_SINGLE_LEVEL_LUN 0x0f 2643#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
2956#define NVRAM_SCB_OFFSET 0x2c 2644#define NVRAM_SCB_OFFSET 0x2c
2957#define STATUS_PKT_SENSE 0xff 2645#define STATUS_PKT_SENSE 0xff
2646#define CMD_GROUP_CODE_SHIFT 0x05
2958#define MAX_OFFSET_PACED_BUG 0x7f 2647#define MAX_OFFSET_PACED_BUG 0x7f
2959#define STIMESEL_BUG_ADJ 0x08 2648#define STIMESEL_BUG_ADJ 0x08
2649#define STIMESEL_MIN 0x18
2650#define STIMESEL_SHIFT 0x03
2960#define CCSGRAM_MAXSEGS 0x10 2651#define CCSGRAM_MAXSEGS 0x10
2652#define INVALID_ADDR 0x80
2961#define SEEOP_ERAL_ADDR 0x80 2653#define SEEOP_ERAL_ADDR 0x80
2962#define AHD_SLEWRATE_DEF_REVB 0x08 2654#define AHD_SLEWRATE_DEF_REVB 0x08
2963#define AHD_PRECOMP_CUTBACK_17 0x04 2655#define AHD_PRECOMP_CUTBACK_17 0x04
2656#define AHD_PRECOMP_MASK 0x07
2964#define SRC_MODE_SHIFT 0x00 2657#define SRC_MODE_SHIFT 0x00
2965#define PKT_OVERRUN_BUFSIZE 0x200 2658#define PKT_OVERRUN_BUFSIZE 0x200
2966#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 2659#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
2660#define TARGET_DATA_IN 0x01
2967#define HOST_MSG 0xff 2661#define HOST_MSG 0xff
2968#define MAX_OFFSET 0xfe 2662#define MAX_OFFSET 0xfe
2969#define BUS_16_BIT 0x01 2663#define BUS_16_BIT 0x01
2664#define CCSCBADDR_MAX 0x80
2665#define NUMDSPS 0x14
2666#define SEEOP_EWEN_ADDR 0xc0
2667#define AHD_ANNEXCOL_PER_DEV0 0x04
2668#define DST_MODE_SHIFT 0x04
2970 2669
2971 2670
2972/* Downloaded Constant Definitions */ 2671/* Downloaded Constant Definitions */
2973#define SG_SIZEOF 0x04
2974#define SG_PREFETCH_ALIGN_MASK 0x02
2975#define SG_PREFETCH_CNT_LIMIT 0x01
2976#define CACHELINE_MASK 0x07 2672#define CACHELINE_MASK 0x07
2977#define SCB_TRANSFER_SIZE 0x06 2673#define SCB_TRANSFER_SIZE 0x06
2978#define PKT_OVERRUN_BUFOFFSET 0x05 2674#define PKT_OVERRUN_BUFOFFSET 0x05
2675#define SG_SIZEOF 0x04
2979#define SG_PREFETCH_ADDR_MASK 0x03 2676#define SG_PREFETCH_ADDR_MASK 0x03
2677#define SG_PREFETCH_ALIGN_MASK 0x02
2678#define SG_PREFETCH_CNT_LIMIT 0x01
2980#define SG_PREFETCH_CNT 0x00 2679#define SG_PREFETCH_CNT 0x00
2981#define DOWNLOAD_CONST_COUNT 0x08 2680#define DOWNLOAD_CONST_COUNT 0x08
2982 2681
2983 2682
2984/* Exported Labels */ 2683/* Exported Labels */
2985#define LABEL_timer_isr 0x28b
2986#define LABEL_seq_isr 0x28f 2684#define LABEL_seq_isr 0x28f
2685#define LABEL_timer_isr 0x28b
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index c4c8a96bf5a3..f5ea715d6ac3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,18 +8,6 @@
8 8
9#include "aic79xx_osm.h" 9#include "aic79xx_osm.h"
10 10
11static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
12 { "SRC_MODE", 0x07, 0x07 },
13 { "DST_MODE", 0x70, 0x70 }
14};
15
16int
17ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
18{
19 return (ahd_print_register(MODE_PTR_parse_table, 2, "MODE_PTR",
20 0x00, regvalue, cur_col, wrap));
21}
22
23static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = { 11static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
24 { "SPLTINT", 0x01, 0x01 }, 12 { "SPLTINT", 0x01, 0x01 },
25 { "CMDCMPLT", 0x02, 0x02 }, 13 { "CMDCMPLT", 0x02, 0x02 },
@@ -39,110 +27,6 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
39 0x01, regvalue, cur_col, wrap)); 27 0x01, regvalue, cur_col, wrap));
40} 28}
41 29
42static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
43 { "NO_SEQINT", 0x00, 0xff },
44 { "BAD_PHASE", 0x01, 0xff },
45 { "SEND_REJECT", 0x02, 0xff },
46 { "PROTO_VIOLATION", 0x03, 0xff },
47 { "NO_MATCH", 0x04, 0xff },
48 { "IGN_WIDE_RES", 0x05, 0xff },
49 { "PDATA_REINIT", 0x06, 0xff },
50 { "HOST_MSG_LOOP", 0x07, 0xff },
51 { "BAD_STATUS", 0x08, 0xff },
52 { "DATA_OVERRUN", 0x09, 0xff },
53 { "MKMSG_FAILED", 0x0a, 0xff },
54 { "MISSED_BUSFREE", 0x0b, 0xff },
55 { "DUMP_CARD_STATE", 0x0c, 0xff },
56 { "ILLEGAL_PHASE", 0x0d, 0xff },
57 { "INVALID_SEQINT", 0x0e, 0xff },
58 { "CFG4ISTAT_INTR", 0x0f, 0xff },
59 { "STATUS_OVERRUN", 0x10, 0xff },
60 { "CFG4OVERRUN", 0x11, 0xff },
61 { "ENTERING_NONPACK", 0x12, 0xff },
62 { "TASKMGMT_FUNC_COMPLETE",0x13, 0xff },
63 { "TASKMGMT_CMD_CMPLT_OKAY",0x14, 0xff },
64 { "TRACEPOINT0", 0x15, 0xff },
65 { "TRACEPOINT1", 0x16, 0xff },
66 { "TRACEPOINT2", 0x17, 0xff },
67 { "TRACEPOINT3", 0x18, 0xff },
68 { "SAW_HWERR", 0x19, 0xff },
69 { "BAD_SCB_STATUS", 0x1a, 0xff }
70};
71
72int
73ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
74{
75 return (ahd_print_register(SEQINTCODE_parse_table, 27, "SEQINTCODE",
76 0x02, regvalue, cur_col, wrap));
77}
78
79static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
80 { "CLRSPLTINT", 0x01, 0x01 },
81 { "CLRCMDINT", 0x02, 0x02 },
82 { "CLRSEQINT", 0x04, 0x04 },
83 { "CLRSCSIINT", 0x08, 0x08 },
84 { "CLRPCIINT", 0x10, 0x10 },
85 { "CLRSWTMINT", 0x20, 0x20 },
86 { "CLRBRKADRINT", 0x40, 0x40 },
87 { "CLRHWERRINT", 0x80, 0x80 }
88};
89
90int
91ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
92{
93 return (ahd_print_register(CLRINT_parse_table, 8, "CLRINT",
94 0x03, regvalue, cur_col, wrap));
95}
96
97static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
98 { "DSCTMOUT", 0x02, 0x02 },
99 { "ILLOPCODE", 0x04, 0x04 },
100 { "SQPARERR", 0x08, 0x08 },
101 { "DPARERR", 0x10, 0x10 },
102 { "MPARERR", 0x20, 0x20 },
103 { "CIOACCESFAIL", 0x40, 0x40 },
104 { "CIOPARERR", 0x80, 0x80 }
105};
106
107int
108ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
109{
110 return (ahd_print_register(ERROR_parse_table, 7, "ERROR",
111 0x04, regvalue, cur_col, wrap));
112}
113
114static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
115 { "CHIPRST", 0x01, 0x01 },
116 { "CHIPRSTACK", 0x01, 0x01 },
117 { "INTEN", 0x02, 0x02 },
118 { "PAUSE", 0x04, 0x04 },
119 { "SWTIMER_START_B", 0x08, 0x08 },
120 { "SWINT", 0x10, 0x10 },
121 { "POWRDN", 0x40, 0x40 },
122 { "SEQ_RESET", 0x80, 0x80 }
123};
124
125int
126ahd_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
127{
128 return (ahd_print_register(HCNTRL_parse_table, 8, "HCNTRL",
129 0x05, regvalue, cur_col, wrap));
130}
131
132int
133ahd_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
134{
135 return (ahd_print_register(NULL, 0, "HNSCB_QOFF",
136 0x06, regvalue, cur_col, wrap));
137}
138
139int
140ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
141{
142 return (ahd_print_register(NULL, 0, "HESCB_QOFF",
143 0x08, regvalue, cur_col, wrap));
144}
145
146static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 30static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
147 { "ENINT_COALESCE", 0x40, 0x40 }, 31 { "ENINT_COALESCE", 0x40, 0x40 },
148 { "HOST_TQINPOS", 0x80, 0x80 } 32 { "HOST_TQINPOS", 0x80, 0x80 }
@@ -170,77 +54,6 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
170 0x0c, regvalue, cur_col, wrap)); 54 0x0c, regvalue, cur_col, wrap));
171} 55}
172 56
173static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
174 { "CLRSEQ_SPLTINT", 0x01, 0x01 },
175 { "CLRSEQ_PCIINT", 0x02, 0x02 },
176 { "CLRSEQ_SCSIINT", 0x04, 0x04 },
177 { "CLRSEQ_SEQINT", 0x08, 0x08 },
178 { "CLRSEQ_SWTMRTO", 0x10, 0x10 }
179};
180
181int
182ahd_clrseqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
183{
184 return (ahd_print_register(CLRSEQINTSTAT_parse_table, 5, "CLRSEQINTSTAT",
185 0x0c, regvalue, cur_col, wrap));
186}
187
188int
189ahd_swtimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
190{
191 return (ahd_print_register(NULL, 0, "SWTIMER",
192 0x0e, regvalue, cur_col, wrap));
193}
194
195int
196ahd_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
197{
198 return (ahd_print_register(NULL, 0, "SNSCB_QOFF",
199 0x10, regvalue, cur_col, wrap));
200}
201
202int
203ahd_sescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
204{
205 return (ahd_print_register(NULL, 0, "SESCB_QOFF",
206 0x12, regvalue, cur_col, wrap));
207}
208
209int
210ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
211{
212 return (ahd_print_register(NULL, 0, "SDSCB_QOFF",
213 0x14, regvalue, cur_col, wrap));
214}
215
216static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
217 { "SCB_QSIZE_4", 0x00, 0x0f },
218 { "SCB_QSIZE_8", 0x01, 0x0f },
219 { "SCB_QSIZE_16", 0x02, 0x0f },
220 { "SCB_QSIZE_32", 0x03, 0x0f },
221 { "SCB_QSIZE_64", 0x04, 0x0f },
222 { "SCB_QSIZE_128", 0x05, 0x0f },
223 { "SCB_QSIZE_256", 0x06, 0x0f },
224 { "SCB_QSIZE_512", 0x07, 0x0f },
225 { "SCB_QSIZE_1024", 0x08, 0x0f },
226 { "SCB_QSIZE_2048", 0x09, 0x0f },
227 { "SCB_QSIZE_4096", 0x0a, 0x0f },
228 { "SCB_QSIZE_8192", 0x0b, 0x0f },
229 { "SCB_QSIZE_16384", 0x0c, 0x0f },
230 { "SCB_QSIZE", 0x0f, 0x0f },
231 { "HS_MAILBOX_ACT", 0x10, 0x10 },
232 { "SDSCB_ROLLOVR", 0x20, 0x20 },
233 { "NEW_SCB_AVAIL", 0x40, 0x40 },
234 { "EMPTY_SCB_AVAIL", 0x80, 0x80 }
235};
236
237int
238ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
239{
240 return (ahd_print_register(QOFF_CTLSTA_parse_table, 18, "QOFF_CTLSTA",
241 0x16, regvalue, cur_col, wrap));
242}
243
244static const ahd_reg_parse_entry_t INTCTL_parse_table[] = { 57static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
245 { "SPLTINTEN", 0x01, 0x01 }, 58 { "SPLTINTEN", 0x01, 0x01 },
246 { "SEQINTEN", 0x02, 0x02 }, 59 { "SEQINTEN", 0x02, 0x02 },
@@ -280,22 +93,6 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
280 0x19, regvalue, cur_col, wrap)); 93 0x19, regvalue, cur_col, wrap));
281} 94}
282 95
283static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
284 { "CIOPARCKEN", 0x01, 0x01 },
285 { "DISABLE_TWATE", 0x02, 0x02 },
286 { "EXTREQLCK", 0x10, 0x10 },
287 { "MPARCKEN", 0x20, 0x20 },
288 { "DPARCKEN", 0x40, 0x40 },
289 { "CACHETHEN", 0x80, 0x80 }
290};
291
292int
293ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
294{
295 return (ahd_print_register(DSCOMMAND0_parse_table, 6, "DSCOMMAND0",
296 0x19, regvalue, cur_col, wrap));
297}
298
299static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = { 96static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
300 { "FIFOEMP", 0x01, 0x01 }, 97 { "FIFOEMP", 0x01, 0x01 },
301 { "FIFOFULL", 0x02, 0x02 }, 98 { "FIFOFULL", 0x02, 0x02 },
@@ -327,146 +124,6 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
327 0x1b, regvalue, cur_col, wrap)); 124 0x1b, regvalue, cur_col, wrap));
328} 125}
329 126
330static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
331 { "LAST_SEG", 0x02, 0x02 },
332 { "ODD_SEG", 0x04, 0x04 },
333 { "SG_ADDR_MASK", 0xf8, 0xf8 }
334};
335
336int
337ahd_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
338{
339 return (ahd_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
340 0x1b, regvalue, cur_col, wrap));
341}
342
343int
344ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
345{
346 return (ahd_print_register(NULL, 0, "LQIN",
347 0x20, regvalue, cur_col, wrap));
348}
349
350int
351ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
352{
353 return (ahd_print_register(NULL, 0, "LUNPTR",
354 0x22, regvalue, cur_col, wrap));
355}
356
357int
358ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
359{
360 return (ahd_print_register(NULL, 0, "CMDLENPTR",
361 0x25, regvalue, cur_col, wrap));
362}
363
364int
365ahd_attrptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
366{
367 return (ahd_print_register(NULL, 0, "ATTRPTR",
368 0x26, regvalue, cur_col, wrap));
369}
370
371int
372ahd_flagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
373{
374 return (ahd_print_register(NULL, 0, "FLAGPTR",
375 0x27, regvalue, cur_col, wrap));
376}
377
378int
379ahd_cmdptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
380{
381 return (ahd_print_register(NULL, 0, "CMDPTR",
382 0x28, regvalue, cur_col, wrap));
383}
384
385int
386ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
387{
388 return (ahd_print_register(NULL, 0, "QNEXTPTR",
389 0x29, regvalue, cur_col, wrap));
390}
391
392int
393ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
394{
395 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
396 0x2b, regvalue, cur_col, wrap));
397}
398
399int
400ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
401{
402 return (ahd_print_register(NULL, 0, "ABRTBITPTR",
403 0x2c, regvalue, cur_col, wrap));
404}
405
406static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
407 { "ILUNLEN", 0x0f, 0x0f },
408 { "TLUNLEN", 0xf0, 0xf0 }
409};
410
411int
412ahd_lunlen_print(u_int regvalue, u_int *cur_col, u_int wrap)
413{
414 return (ahd_print_register(LUNLEN_parse_table, 2, "LUNLEN",
415 0x30, regvalue, cur_col, wrap));
416}
417
418int
419ahd_cdblimit_print(u_int regvalue, u_int *cur_col, u_int wrap)
420{
421 return (ahd_print_register(NULL, 0, "CDBLIMIT",
422 0x31, regvalue, cur_col, wrap));
423}
424
425int
426ahd_maxcmd_print(u_int regvalue, u_int *cur_col, u_int wrap)
427{
428 return (ahd_print_register(NULL, 0, "MAXCMD",
429 0x32, regvalue, cur_col, wrap));
430}
431
432int
433ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
434{
435 return (ahd_print_register(NULL, 0, "MAXCMDCNT",
436 0x33, regvalue, cur_col, wrap));
437}
438
439static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
440 { "ABORTPENDING", 0x01, 0x01 },
441 { "SINGLECMD", 0x02, 0x02 },
442 { "PCI2PCI", 0x04, 0x04 }
443};
444
445int
446ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
447{
448 return (ahd_print_register(LQCTL1_parse_table, 3, "LQCTL1",
449 0x38, regvalue, cur_col, wrap));
450}
451
452static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
453 { "LQOPAUSE", 0x01, 0x01 },
454 { "LQOTOIDLE", 0x02, 0x02 },
455 { "LQOCONTINUE", 0x04, 0x04 },
456 { "LQORETRY", 0x08, 0x08 },
457 { "LQIPAUSE", 0x10, 0x10 },
458 { "LQITOIDLE", 0x20, 0x20 },
459 { "LQICONTINUE", 0x40, 0x40 },
460 { "LQIRETRY", 0x80, 0x80 }
461};
462
463int
464ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
465{
466 return (ahd_print_register(LQCTL2_parse_table, 8, "LQCTL2",
467 0x39, regvalue, cur_col, wrap));
468}
469
470static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = { 127static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
471 { "SCSIRSTO", 0x01, 0x01 }, 128 { "SCSIRSTO", 0x01, 0x01 },
472 { "FORCEBUSFREE", 0x10, 0x10 }, 129 { "FORCEBUSFREE", 0x10, 0x10 },
@@ -498,37 +155,6 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
498 0x3b, regvalue, cur_col, wrap)); 155 0x3b, regvalue, cur_col, wrap));
499} 156}
500 157
501static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
502 { "SPIOEN", 0x08, 0x08 },
503 { "BIOSCANCELEN", 0x10, 0x10 },
504 { "DFPEXP", 0x40, 0x40 },
505 { "DFON", 0x80, 0x80 }
506};
507
508int
509ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
510{
511 return (ahd_print_register(SXFRCTL0_parse_table, 4, "SXFRCTL0",
512 0x3c, regvalue, cur_col, wrap));
513}
514
515static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
516 { "STPWEN", 0x01, 0x01 },
517 { "ACTNEGEN", 0x02, 0x02 },
518 { "ENSTIMER", 0x04, 0x04 },
519 { "STIMESEL", 0x18, 0x18 },
520 { "ENSPCHK", 0x20, 0x20 },
521 { "ENSACHK", 0x40, 0x40 },
522 { "BITBUCKET", 0x80, 0x80 }
523};
524
525int
526ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
527{
528 return (ahd_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
529 0x3d, regvalue, cur_col, wrap));
530}
531
532static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = { 158static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
533 { "CURRFIFO_0", 0x00, 0x03 }, 159 { "CURRFIFO_0", 0x00, 0x03 },
534 { "CURRFIFO_1", 0x01, 0x03 }, 160 { "CURRFIFO_1", 0x01, 0x03 },
@@ -545,40 +171,6 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
545 0x3f, regvalue, cur_col, wrap)); 171 0x3f, regvalue, cur_col, wrap));
546} 172}
547 173
548int
549ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
550{
551 return (ahd_print_register(NULL, 0, "MULTARGID",
552 0x40, regvalue, cur_col, wrap));
553}
554
555static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
556 { "P_DATAOUT", 0x00, 0xe0 },
557 { "P_DATAOUT_DT", 0x20, 0xe0 },
558 { "P_DATAIN", 0x40, 0xe0 },
559 { "P_DATAIN_DT", 0x60, 0xe0 },
560 { "P_COMMAND", 0x80, 0xe0 },
561 { "P_MESGOUT", 0xa0, 0xe0 },
562 { "P_STATUS", 0xc0, 0xe0 },
563 { "P_MESGIN", 0xe0, 0xe0 },
564 { "ACKO", 0x01, 0x01 },
565 { "REQO", 0x02, 0x02 },
566 { "BSYO", 0x04, 0x04 },
567 { "SELO", 0x08, 0x08 },
568 { "ATNO", 0x10, 0x10 },
569 { "MSGO", 0x20, 0x20 },
570 { "IOO", 0x40, 0x40 },
571 { "CDO", 0x80, 0x80 },
572 { "PHASE_MASK", 0xe0, 0xe0 }
573};
574
575int
576ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
577{
578 return (ahd_print_register(SCSISIGO_parse_table, 17, "SCSISIGO",
579 0x40, regvalue, cur_col, wrap));
580}
581
582static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = { 174static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
583 { "P_DATAOUT", 0x00, 0xe0 }, 175 { "P_DATAOUT", 0x00, 0xe0 },
584 { "P_DATAOUT_DT", 0x20, 0xe0 }, 176 { "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -624,31 +216,12 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
624} 216}
625 217
626int 218int
627ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
628{
629 return (ahd_print_register(NULL, 0, "SCSIDAT",
630 0x44, regvalue, cur_col, wrap));
631}
632
633int
634ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap) 219ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
635{ 220{
636 return (ahd_print_register(NULL, 0, "SCSIBUS", 221 return (ahd_print_register(NULL, 0, "SCSIBUS",
637 0x46, regvalue, cur_col, wrap)); 222 0x46, regvalue, cur_col, wrap));
638} 223}
639 224
640static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
641 { "TARGID", 0x0f, 0x0f },
642 { "CLKOUT", 0x80, 0x80 }
643};
644
645int
646ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
647{
648 return (ahd_print_register(TARGIDIN_parse_table, 2, "TARGIDIN",
649 0x48, regvalue, cur_col, wrap));
650}
651
652static const ahd_reg_parse_entry_t SELID_parse_table[] = { 225static const ahd_reg_parse_entry_t SELID_parse_table[] = {
653 { "ONEBIT", 0x08, 0x08 }, 226 { "ONEBIT", 0x08, 0x08 },
654 { "SELID_MASK", 0xf0, 0xf0 } 227 { "SELID_MASK", 0xf0, 0xf0 }
@@ -661,38 +234,6 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
661 0x49, regvalue, cur_col, wrap)); 234 0x49, regvalue, cur_col, wrap));
662} 235}
663 236
664static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
665 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
666 { "ENDGFORMCHK", 0x04, 0x04 },
667 { "BUSFREEREV", 0x10, 0x10 },
668 { "BIASCANCTL", 0x20, 0x20 },
669 { "AUTOACKEN", 0x40, 0x40 },
670 { "BIOSCANCTL", 0x80, 0x80 },
671 { "OPTIONMODE_DEFAULTS",0x02, 0x02 }
672};
673
674int
675ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
676{
677 return (ahd_print_register(OPTIONMODE_parse_table, 7, "OPTIONMODE",
678 0x4a, regvalue, cur_col, wrap));
679}
680
681static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
682 { "SELWIDE", 0x02, 0x02 },
683 { "ENAB20", 0x04, 0x04 },
684 { "ENAB40", 0x08, 0x08 },
685 { "DIAGLEDON", 0x40, 0x40 },
686 { "DIAGLEDEN", 0x80, 0x80 }
687};
688
689int
690ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
691{
692 return (ahd_print_register(SBLKCTL_parse_table, 5, "SBLKCTL",
693 0x4a, regvalue, cur_col, wrap));
694}
695
696static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = { 237static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
697 { "ARBDO", 0x01, 0x01 }, 238 { "ARBDO", 0x01, 0x01 },
698 { "SPIORDY", 0x02, 0x02 }, 239 { "SPIORDY", 0x02, 0x02 },
@@ -728,23 +269,6 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
728 0x4b, regvalue, cur_col, wrap)); 269 0x4b, regvalue, cur_col, wrap));
729} 270}
730 271
731static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
732 { "CLRARBDO", 0x01, 0x01 },
733 { "CLRSPIORDY", 0x02, 0x02 },
734 { "CLROVERRUN", 0x04, 0x04 },
735 { "CLRIOERR", 0x08, 0x08 },
736 { "CLRSELINGO", 0x10, 0x10 },
737 { "CLRSELDI", 0x20, 0x20 },
738 { "CLRSELDO", 0x40, 0x40 }
739};
740
741int
742ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
743{
744 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
745 0x4b, regvalue, cur_col, wrap));
746}
747
748static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = { 272static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
749 { "REQINIT", 0x01, 0x01 }, 273 { "REQINIT", 0x01, 0x01 },
750 { "STRB2FAST", 0x02, 0x02 }, 274 { "STRB2FAST", 0x02, 0x02 },
@@ -763,23 +287,6 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
763 0x4c, regvalue, cur_col, wrap)); 287 0x4c, regvalue, cur_col, wrap));
764} 288}
765 289
766static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
767 { "CLRREQINIT", 0x01, 0x01 },
768 { "CLRSTRB2FAST", 0x02, 0x02 },
769 { "CLRSCSIPERR", 0x04, 0x04 },
770 { "CLRBUSFREE", 0x08, 0x08 },
771 { "CLRSCSIRSTI", 0x20, 0x20 },
772 { "CLRATNO", 0x40, 0x40 },
773 { "CLRSELTIMEO", 0x80, 0x80 }
774};
775
776int
777ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
778{
779 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
780 0x4c, regvalue, cur_col, wrap));
781}
782
783static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = { 290static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
784 { "BUSFREE_LQO", 0x40, 0xc0 }, 291 { "BUSFREE_LQO", 0x40, 0xc0 },
785 { "BUSFREE_DFF0", 0x80, 0xc0 }, 292 { "BUSFREE_DFF0", 0x80, 0xc0 },
@@ -800,20 +307,6 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
800 0x4d, regvalue, cur_col, wrap)); 307 0x4d, regvalue, cur_col, wrap));
801} 308}
802 309
803static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
804 { "CLRDMADONE", 0x01, 0x01 },
805 { "CLRSDONE", 0x02, 0x02 },
806 { "CLRWIDE_RES", 0x04, 0x04 },
807 { "CLRNONPACKREQ", 0x20, 0x20 }
808};
809
810int
811ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
812{
813 return (ahd_print_register(CLRSINT2_parse_table, 4, "CLRSINT2",
814 0x4d, regvalue, cur_col, wrap));
815}
816
817static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = { 310static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
818 { "DTERR", 0x01, 0x01 }, 311 { "DTERR", 0x01, 0x01 },
819 { "DGFORMERR", 0x02, 0x02 }, 312 { "DGFORMERR", 0x02, 0x02 },
@@ -833,26 +326,12 @@ ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap)
833} 326}
834 327
835int 328int
836ahd_lqistate_print(u_int regvalue, u_int *cur_col, u_int wrap)
837{
838 return (ahd_print_register(NULL, 0, "LQISTATE",
839 0x4e, regvalue, cur_col, wrap));
840}
841
842int
843ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 329ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
844{ 330{
845 return (ahd_print_register(NULL, 0, "SOFFCNT", 331 return (ahd_print_register(NULL, 0, "SOFFCNT",
846 0x4f, regvalue, cur_col, wrap)); 332 0x4f, regvalue, cur_col, wrap));
847} 333}
848 334
849int
850ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
851{
852 return (ahd_print_register(NULL, 0, "LQOSTATE",
853 0x4f, regvalue, cur_col, wrap));
854}
855
856static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = { 335static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
857 { "LQIATNCMD", 0x01, 0x01 }, 336 { "LQIATNCMD", 0x01, 0x01 },
858 { "LQIATNLQ", 0x02, 0x02 }, 337 { "LQIATNLQ", 0x02, 0x02 },
@@ -869,56 +348,6 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
869 0x50, regvalue, cur_col, wrap)); 348 0x50, regvalue, cur_col, wrap));
870} 349}
871 350
872static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
873 { "ENLQIATNCMD", 0x01, 0x01 },
874 { "ENLQIATNLQ", 0x02, 0x02 },
875 { "ENLQIBADLQT", 0x04, 0x04 },
876 { "ENLQICRCT2", 0x08, 0x08 },
877 { "ENLQICRCT1", 0x10, 0x10 },
878 { "ENLQIATNQASK", 0x20, 0x20 }
879};
880
881int
882ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
883{
884 return (ahd_print_register(LQIMODE0_parse_table, 6, "LQIMODE0",
885 0x50, regvalue, cur_col, wrap));
886}
887
888static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
889 { "CLRLQIATNCMD", 0x01, 0x01 },
890 { "CLRLQIATNLQ", 0x02, 0x02 },
891 { "CLRLQIBADLQT", 0x04, 0x04 },
892 { "CLRLQICRCT2", 0x08, 0x08 },
893 { "CLRLQICRCT1", 0x10, 0x10 },
894 { "CLRLQIATNQAS", 0x20, 0x20 }
895};
896
897int
898ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
899{
900 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
901 0x50, regvalue, cur_col, wrap));
902}
903
904static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
905 { "ENLQIOVERI_NLQ", 0x01, 0x01 },
906 { "ENLQIOVERI_LQ", 0x02, 0x02 },
907 { "ENLQIBADLQI", 0x04, 0x04 },
908 { "ENLQICRCI_NLQ", 0x08, 0x08 },
909 { "ENLQICRCI_LQ", 0x10, 0x10 },
910 { "ENLIQABORT", 0x20, 0x20 },
911 { "ENLQIPHASE_NLQ", 0x40, 0x40 },
912 { "ENLQIPHASE_LQ", 0x80, 0x80 }
913};
914
915int
916ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
917{
918 return (ahd_print_register(LQIMODE1_parse_table, 8, "LQIMODE1",
919 0x51, regvalue, cur_col, wrap));
920}
921
922static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = { 351static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
923 { "LQIOVERI_NLQ", 0x01, 0x01 }, 352 { "LQIOVERI_NLQ", 0x01, 0x01 },
924 { "LQIOVERI_LQ", 0x02, 0x02 }, 353 { "LQIOVERI_LQ", 0x02, 0x02 },
@@ -937,24 +366,6 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
937 0x51, regvalue, cur_col, wrap)); 366 0x51, regvalue, cur_col, wrap));
938} 367}
939 368
940static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
941 { "CLRLQIOVERI_NLQ", 0x01, 0x01 },
942 { "CLRLQIOVERI_LQ", 0x02, 0x02 },
943 { "CLRLQIBADLQI", 0x04, 0x04 },
944 { "CLRLQICRCI_NLQ", 0x08, 0x08 },
945 { "CLRLQICRCI_LQ", 0x10, 0x10 },
946 { "CLRLIQABORT", 0x20, 0x20 },
947 { "CLRLQIPHASE_NLQ", 0x40, 0x40 },
948 { "CLRLQIPHASE_LQ", 0x80, 0x80 }
949};
950
951int
952ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
953{
954 return (ahd_print_register(CLRLQIINT1_parse_table, 8, "CLRLQIINT1",
955 0x51, regvalue, cur_col, wrap));
956}
957
958static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = { 369static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
959 { "LQIGSAVAIL", 0x01, 0x01 }, 370 { "LQIGSAVAIL", 0x01, 0x01 },
960 { "LQISTOPCMD", 0x02, 0x02 }, 371 { "LQISTOPCMD", 0x02, 0x02 },
@@ -985,30 +396,6 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
985 0x53, regvalue, cur_col, wrap)); 396 0x53, regvalue, cur_col, wrap));
986} 397}
987 398
988static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
989 { "ENOSRAMPERR", 0x01, 0x01 },
990 { "ENNTRAMPERR", 0x02, 0x02 }
991};
992
993int
994ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
995{
996 return (ahd_print_register(SIMODE3_parse_table, 2, "SIMODE3",
997 0x53, regvalue, cur_col, wrap));
998}
999
1000static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
1001 { "CLROSRAMPERR", 0x01, 0x01 },
1002 { "CLRNTRAMPERR", 0x02, 0x02 }
1003};
1004
1005int
1006ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1007{
1008 return (ahd_print_register(CLRSINT3_parse_table, 2, "CLRSINT3",
1009 0x53, regvalue, cur_col, wrap));
1010}
1011
1012static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = { 399static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
1013 { "LQOTCRC", 0x01, 0x01 }, 400 { "LQOTCRC", 0x01, 0x01 },
1014 { "LQOATNPKT", 0x02, 0x02 }, 401 { "LQOATNPKT", 0x02, 0x02 },
@@ -1024,51 +411,6 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1024 0x54, regvalue, cur_col, wrap)); 411 0x54, regvalue, cur_col, wrap));
1025} 412}
1026 413
1027static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
1028 { "CLRLQOTCRC", 0x01, 0x01 },
1029 { "CLRLQOATNPKT", 0x02, 0x02 },
1030 { "CLRLQOATNLQ", 0x04, 0x04 },
1031 { "CLRLQOSTOPT2", 0x08, 0x08 },
1032 { "CLRLQOTARGSCBPERR", 0x10, 0x10 }
1033};
1034
1035int
1036ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1037{
1038 return (ahd_print_register(CLRLQOINT0_parse_table, 5, "CLRLQOINT0",
1039 0x54, regvalue, cur_col, wrap));
1040}
1041
1042static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
1043 { "ENLQOTCRC", 0x01, 0x01 },
1044 { "ENLQOATNPKT", 0x02, 0x02 },
1045 { "ENLQOATNLQ", 0x04, 0x04 },
1046 { "ENLQOSTOPT2", 0x08, 0x08 },
1047 { "ENLQOTARGSCBPERR", 0x10, 0x10 }
1048};
1049
1050int
1051ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1052{
1053 return (ahd_print_register(LQOMODE0_parse_table, 5, "LQOMODE0",
1054 0x54, regvalue, cur_col, wrap));
1055}
1056
1057static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
1058 { "ENLQOPHACHGINPKT", 0x01, 0x01 },
1059 { "ENLQOBUSFREE", 0x02, 0x02 },
1060 { "ENLQOBADQAS", 0x04, 0x04 },
1061 { "ENLQOSTOPI2", 0x08, 0x08 },
1062 { "ENLQOINITSCBPERR", 0x10, 0x10 }
1063};
1064
1065int
1066ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1067{
1068 return (ahd_print_register(LQOMODE1_parse_table, 5, "LQOMODE1",
1069 0x55, regvalue, cur_col, wrap));
1070}
1071
1072static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = { 414static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
1073 { "LQOPHACHGINPKT", 0x01, 0x01 }, 415 { "LQOPHACHGINPKT", 0x01, 0x01 },
1074 { "LQOBUSFREE", 0x02, 0x02 }, 416 { "LQOBUSFREE", 0x02, 0x02 },
@@ -1084,21 +426,6 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1084 0x55, regvalue, cur_col, wrap)); 426 0x55, regvalue, cur_col, wrap));
1085} 427}
1086 428
1087static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
1088 { "CLRLQOPHACHGINPKT", 0x01, 0x01 },
1089 { "CLRLQOBUSFREE", 0x02, 0x02 },
1090 { "CLRLQOBADQAS", 0x04, 0x04 },
1091 { "CLRLQOSTOPI2", 0x08, 0x08 },
1092 { "CLRLQOINITSCBPERR", 0x10, 0x10 }
1093};
1094
1095int
1096ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1097{
1098 return (ahd_print_register(CLRLQOINT1_parse_table, 5, "CLRLQOINT1",
1099 0x55, regvalue, cur_col, wrap));
1100}
1101
1102static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = { 429static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
1103 { "LQOSTOP0", 0x01, 0x01 }, 430 { "LQOSTOP0", 0x01, 0x01 },
1104 { "LQOPHACHGOUTPKT", 0x02, 0x02 }, 431 { "LQOPHACHGOUTPKT", 0x02, 0x02 },
@@ -1113,13 +440,6 @@ ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1113 0x56, regvalue, cur_col, wrap)); 440 0x56, regvalue, cur_col, wrap));
1114} 441}
1115 442
1116int
1117ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1118{
1119 return (ahd_print_register(NULL, 0, "OS_SPACE_CNT",
1120 0x56, regvalue, cur_col, wrap));
1121}
1122
1123static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = { 443static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
1124 { "ENREQINIT", 0x01, 0x01 }, 444 { "ENREQINIT", 0x01, 0x01 },
1125 { "ENSTRB2FAST", 0x02, 0x02 }, 445 { "ENSTRB2FAST", 0x02, 0x02 },
@@ -1138,13 +458,6 @@ ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1138 0x57, regvalue, cur_col, wrap)); 458 0x57, regvalue, cur_col, wrap));
1139} 459}
1140 460
1141int
1142ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1143{
1144 return (ahd_print_register(NULL, 0, "GSFIFO",
1145 0x58, regvalue, cur_col, wrap));
1146}
1147
1148static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = { 461static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
1149 { "RSTCHN", 0x01, 0x01 }, 462 { "RSTCHN", 0x01, 0x01 },
1150 { "CLRCHN", 0x02, 0x02 }, 463 { "CLRCHN", 0x02, 0x02 },
@@ -1159,44 +472,6 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1159 0x5a, regvalue, cur_col, wrap)); 472 0x5a, regvalue, cur_col, wrap));
1160} 473}
1161 474
1162static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
1163 { "LQONOCHKOVER", 0x01, 0x01 },
1164 { "LQONOHOLDLACK", 0x02, 0x02 },
1165 { "LQOBUSETDLY", 0x40, 0x40 },
1166 { "LQOH2A_VERSION", 0x80, 0x80 }
1167};
1168
1169int
1170ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1171{
1172 return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
1173 0x5a, regvalue, cur_col, wrap));
1174}
1175
1176int
1177ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1178{
1179 return (ahd_print_register(NULL, 0, "NEXTSCB",
1180 0x5a, regvalue, cur_col, wrap));
1181}
1182
1183static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
1184 { "CLRCFG4TCMD", 0x01, 0x01 },
1185 { "CLRCFG4ICMD", 0x02, 0x02 },
1186 { "CLRCFG4TSTAT", 0x04, 0x04 },
1187 { "CLRCFG4ISTAT", 0x08, 0x08 },
1188 { "CLRCFG4DATA", 0x10, 0x10 },
1189 { "CLRSAVEPTRS", 0x20, 0x20 },
1190 { "CLRCTXTDONE", 0x40, 0x40 }
1191};
1192
1193int
1194ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1195{
1196 return (ahd_print_register(CLRSEQINTSRC_parse_table, 7, "CLRSEQINTSRC",
1197 0x5b, regvalue, cur_col, wrap));
1198}
1199
1200static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = { 475static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
1201 { "CFG4TCMD", 0x01, 0x01 }, 476 { "CFG4TCMD", 0x01, 0x01 },
1202 { "CFG4ICMD", 0x02, 0x02 }, 477 { "CFG4ICMD", 0x02, 0x02 },
@@ -1231,13 +506,6 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
1231 0x5c, regvalue, cur_col, wrap)); 506 0x5c, regvalue, cur_col, wrap));
1232} 507}
1233 508
1234int
1235ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1236{
1237 return (ahd_print_register(NULL, 0, "CURRSCB",
1238 0x5c, regvalue, cur_col, wrap));
1239}
1240
1241static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = { 509static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
1242 { "FIFOFREE", 0x01, 0x01 }, 510 { "FIFOFREE", 0x01, 0x01 },
1243 { "DATAINFIFO", 0x02, 0x02 }, 511 { "DATAINFIFO", 0x02, 0x02 },
@@ -1256,308 +524,12 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1256} 524}
1257 525
1258int 526int
1259ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1260{
1261 return (ahd_print_register(NULL, 0, "LASTSCB",
1262 0x5e, regvalue, cur_col, wrap));
1263}
1264
1265int
1266ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1267{
1268 return (ahd_print_register(NULL, 0, "SHADDR",
1269 0x60, regvalue, cur_col, wrap));
1270}
1271
1272int
1273ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1274{
1275 return (ahd_print_register(NULL, 0, "NEGOADDR",
1276 0x60, regvalue, cur_col, wrap));
1277}
1278
1279int
1280ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1281{
1282 return (ahd_print_register(NULL, 0, "NEGPERIOD",
1283 0x61, regvalue, cur_col, wrap));
1284}
1285
1286int
1287ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1288{
1289 return (ahd_print_register(NULL, 0, "NEGOFFSET",
1290 0x62, regvalue, cur_col, wrap));
1291}
1292
1293static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
1294 { "PPROPT_IUT", 0x01, 0x01 },
1295 { "PPROPT_DT", 0x02, 0x02 },
1296 { "PPROPT_QAS", 0x04, 0x04 },
1297 { "PPROPT_PACE", 0x08, 0x08 }
1298};
1299
1300int
1301ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1302{
1303 return (ahd_print_register(NEGPPROPTS_parse_table, 4, "NEGPPROPTS",
1304 0x63, regvalue, cur_col, wrap));
1305}
1306
1307static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
1308 { "WIDEXFER", 0x01, 0x01 },
1309 { "ENAUTOATNO", 0x02, 0x02 },
1310 { "ENAUTOATNI", 0x04, 0x04 },
1311 { "ENSLOWCRC", 0x08, 0x08 },
1312 { "RTI_OVRDTRN", 0x10, 0x10 },
1313 { "RTI_WRTDIS", 0x20, 0x20 },
1314 { "ENSNAPSHOT", 0x40, 0x40 }
1315};
1316
1317int
1318ahd_negconopts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1319{
1320 return (ahd_print_register(NEGCONOPTS_parse_table, 7, "NEGCONOPTS",
1321 0x64, regvalue, cur_col, wrap));
1322}
1323
1324int
1325ahd_annexcol_print(u_int regvalue, u_int *cur_col, u_int wrap)
1326{
1327 return (ahd_print_register(NULL, 0, "ANNEXCOL",
1328 0x65, regvalue, cur_col, wrap));
1329}
1330
1331int
1332ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1333{
1334 return (ahd_print_register(NULL, 0, "ANNEXDAT",
1335 0x66, regvalue, cur_col, wrap));
1336}
1337
1338static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
1339 { "LSTSGCLRDIS", 0x01, 0x01 },
1340 { "SHVALIDSTDIS", 0x02, 0x02 },
1341 { "DFFACTCLR", 0x04, 0x04 },
1342 { "SDONEMSKDIS", 0x08, 0x08 },
1343 { "WIDERESEN", 0x10, 0x10 },
1344 { "CURRFIFODEF", 0x20, 0x20 },
1345 { "STSELSKIDDIS", 0x40, 0x40 },
1346 { "BIDICHKDIS", 0x80, 0x80 }
1347};
1348
1349int
1350ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
1351{
1352 return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
1353 0x66, regvalue, cur_col, wrap));
1354}
1355
1356int
1357ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1358{
1359 return (ahd_print_register(NULL, 0, "IOWNID",
1360 0x67, regvalue, cur_col, wrap));
1361}
1362
1363int
1364ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1365{
1366 return (ahd_print_register(NULL, 0, "SHCNT",
1367 0x68, regvalue, cur_col, wrap));
1368}
1369
1370int
1371ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1372{
1373 return (ahd_print_register(NULL, 0, "TOWNID",
1374 0x69, regvalue, cur_col, wrap));
1375}
1376
1377int
1378ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap) 527ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1379{ 528{
1380 return (ahd_print_register(NULL, 0, "SELOID", 529 return (ahd_print_register(NULL, 0, "SELOID",
1381 0x6b, regvalue, cur_col, wrap)); 530 0x6b, regvalue, cur_col, wrap));
1382} 531}
1383 532
1384int
1385ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1386{
1387 return (ahd_print_register(NULL, 0, "HADDR",
1388 0x70, regvalue, cur_col, wrap));
1389}
1390
1391int
1392ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1393{
1394 return (ahd_print_register(NULL, 0, "HCNT",
1395 0x78, regvalue, cur_col, wrap));
1396}
1397
1398int
1399ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1400{
1401 return (ahd_print_register(NULL, 0, "SGHADDR",
1402 0x7c, regvalue, cur_col, wrap));
1403}
1404
1405int
1406ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1407{
1408 return (ahd_print_register(NULL, 0, "SCBHADDR",
1409 0x7c, regvalue, cur_col, wrap));
1410}
1411
1412int
1413ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1414{
1415 return (ahd_print_register(NULL, 0, "SGHCNT",
1416 0x84, regvalue, cur_col, wrap));
1417}
1418
1419int
1420ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1421{
1422 return (ahd_print_register(NULL, 0, "SCBHCNT",
1423 0x84, regvalue, cur_col, wrap));
1424}
1425
1426static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1427 { "WR_DFTHRSH_MIN", 0x00, 0x70 },
1428 { "RD_DFTHRSH_MIN", 0x00, 0x07 },
1429 { "RD_DFTHRSH_25", 0x01, 0x07 },
1430 { "RD_DFTHRSH_50", 0x02, 0x07 },
1431 { "RD_DFTHRSH_63", 0x03, 0x07 },
1432 { "RD_DFTHRSH_75", 0x04, 0x07 },
1433 { "RD_DFTHRSH_85", 0x05, 0x07 },
1434 { "RD_DFTHRSH_90", 0x06, 0x07 },
1435 { "RD_DFTHRSH_MAX", 0x07, 0x07 },
1436 { "WR_DFTHRSH_25", 0x10, 0x70 },
1437 { "WR_DFTHRSH_50", 0x20, 0x70 },
1438 { "WR_DFTHRSH_63", 0x30, 0x70 },
1439 { "WR_DFTHRSH_75", 0x40, 0x70 },
1440 { "WR_DFTHRSH_85", 0x50, 0x70 },
1441 { "WR_DFTHRSH_90", 0x60, 0x70 },
1442 { "WR_DFTHRSH_MAX", 0x70, 0x70 },
1443 { "RD_DFTHRSH", 0x07, 0x07 },
1444 { "WR_DFTHRSH", 0x70, 0x70 }
1445};
1446
1447int
1448ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1449{
1450 return (ahd_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
1451 0x88, regvalue, cur_col, wrap));
1452}
1453
1454static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
1455 { "CMPABCDIS", 0x01, 0x01 },
1456 { "TSCSERREN", 0x02, 0x02 },
1457 { "SRSPDPEEN", 0x04, 0x04 },
1458 { "SPLTSTADIS", 0x08, 0x08 },
1459 { "SPLTSMADIS", 0x10, 0x10 },
1460 { "UNEXPSCIEN", 0x20, 0x20 },
1461 { "SERRPULSE", 0x80, 0x80 }
1462};
1463
1464int
1465ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1466{
1467 return (ahd_print_register(PCIXCTL_parse_table, 7, "PCIXCTL",
1468 0x93, regvalue, cur_col, wrap));
1469}
1470
1471static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
1472 { "RXSPLTRSP", 0x01, 0x01 },
1473 { "RXSCEMSG", 0x02, 0x02 },
1474 { "RXOVRUN", 0x04, 0x04 },
1475 { "CNTNOTCMPLT", 0x08, 0x08 },
1476 { "SCDATBUCKET", 0x10, 0x10 },
1477 { "SCADERR", 0x20, 0x20 },
1478 { "SCBCERR", 0x40, 0x40 },
1479 { "STAETERM", 0x80, 0x80 }
1480};
1481
1482int
1483ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1484{
1485 return (ahd_print_register(DCHSPLTSTAT0_parse_table, 8, "DCHSPLTSTAT0",
1486 0x96, regvalue, cur_col, wrap));
1487}
1488
1489static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
1490 { "RXDATABUCKET", 0x01, 0x01 }
1491};
1492
1493int
1494ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1495{
1496 return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
1497 0x97, regvalue, cur_col, wrap));
1498}
1499
1500static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
1501 { "RXSPLTRSP", 0x01, 0x01 },
1502 { "RXSCEMSG", 0x02, 0x02 },
1503 { "RXOVRUN", 0x04, 0x04 },
1504 { "CNTNOTCMPLT", 0x08, 0x08 },
1505 { "SCDATBUCKET", 0x10, 0x10 },
1506 { "SCADERR", 0x20, 0x20 },
1507 { "SCBCERR", 0x40, 0x40 },
1508 { "STAETERM", 0x80, 0x80 }
1509};
1510
1511int
1512ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1513{
1514 return (ahd_print_register(SGSPLTSTAT0_parse_table, 8, "SGSPLTSTAT0",
1515 0x9e, regvalue, cur_col, wrap));
1516}
1517
1518static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
1519 { "RXDATABUCKET", 0x01, 0x01 }
1520};
1521
1522int
1523ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1524{
1525 return (ahd_print_register(SGSPLTSTAT1_parse_table, 1, "SGSPLTSTAT1",
1526 0x9f, regvalue, cur_col, wrap));
1527}
1528
1529static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
1530 { "DPR", 0x01, 0x01 },
1531 { "TWATERR", 0x02, 0x02 },
1532 { "RDPERR", 0x04, 0x04 },
1533 { "SCAAPERR", 0x08, 0x08 },
1534 { "RTA", 0x10, 0x10 },
1535 { "RMA", 0x20, 0x20 },
1536 { "SSE", 0x40, 0x40 },
1537 { "DPE", 0x80, 0x80 }
1538};
1539
1540int
1541ahd_df0pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1542{
1543 return (ahd_print_register(DF0PCISTAT_parse_table, 8, "DF0PCISTAT",
1544 0xa0, regvalue, cur_col, wrap));
1545}
1546
1547int
1548ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1549{
1550 return (ahd_print_register(NULL, 0, "REG0",
1551 0xa0, regvalue, cur_col, wrap));
1552}
1553
1554int
1555ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1556{
1557 return (ahd_print_register(NULL, 0, "REG_ISR",
1558 0xa4, regvalue, cur_col, wrap));
1559}
1560
1561static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = { 533static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
1562 { "SEGS_AVAIL", 0x01, 0x01 }, 534 { "SEGS_AVAIL", 0x01, 0x01 },
1563 { "LOADING_NEEDED", 0x02, 0x02 }, 535 { "LOADING_NEEDED", 0x02, 0x02 },
@@ -1571,54 +543,6 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
1571 0xa6, regvalue, cur_col, wrap)); 543 0xa6, regvalue, cur_col, wrap));
1572} 544}
1573 545
1574static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
1575 { "TWATERR", 0x02, 0x02 },
1576 { "STA", 0x08, 0x08 },
1577 { "SSE", 0x40, 0x40 },
1578 { "DPE", 0x80, 0x80 }
1579};
1580
1581int
1582ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1583{
1584 return (ahd_print_register(TARGPCISTAT_parse_table, 4, "TARGPCISTAT",
1585 0xa7, regvalue, cur_col, wrap));
1586}
1587
1588int
1589ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1590{
1591 return (ahd_print_register(NULL, 0, "SCBPTR",
1592 0xa8, regvalue, cur_col, wrap));
1593}
1594
1595static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
1596 { "SCBPTR_OFF", 0x07, 0x07 },
1597 { "SCBPTR_ADDR", 0x38, 0x38 },
1598 { "AUSCBPTR_EN", 0x80, 0x80 }
1599};
1600
1601int
1602ahd_scbautoptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1603{
1604 return (ahd_print_register(SCBAUTOPTR_parse_table, 3, "SCBAUTOPTR",
1605 0xab, regvalue, cur_col, wrap));
1606}
1607
1608int
1609ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1610{
1611 return (ahd_print_register(NULL, 0, "CCSGADDR",
1612 0xac, regvalue, cur_col, wrap));
1613}
1614
1615int
1616ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1617{
1618 return (ahd_print_register(NULL, 0, "CCSCBADDR",
1619 0xac, regvalue, cur_col, wrap));
1620}
1621
1622static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = { 546static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1623 { "CCSCBRESET", 0x01, 0x01 }, 547 { "CCSCBRESET", 0x01, 0x01 },
1624 { "CCSCBDIR", 0x04, 0x04 }, 548 { "CCSCBDIR", 0x04, 0x04 },
@@ -1651,138 +575,6 @@ ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1651 0xad, regvalue, cur_col, wrap)); 575 0xad, regvalue, cur_col, wrap));
1652} 576}
1653 577
1654int
1655ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1656{
1657 return (ahd_print_register(NULL, 0, "CCSGRAM",
1658 0xb0, regvalue, cur_col, wrap));
1659}
1660
1661int
1662ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1663{
1664 return (ahd_print_register(NULL, 0, "CCSCBRAM",
1665 0xb0, regvalue, cur_col, wrap));
1666}
1667
1668int
1669ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1670{
1671 return (ahd_print_register(NULL, 0, "BRDDAT",
1672 0xb8, regvalue, cur_col, wrap));
1673}
1674
1675static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
1676 { "BRDSTB", 0x01, 0x01 },
1677 { "BRDRW", 0x02, 0x02 },
1678 { "BRDEN", 0x04, 0x04 },
1679 { "BRDADDR", 0x38, 0x38 },
1680 { "FLXARBREQ", 0x40, 0x40 },
1681 { "FLXARBACK", 0x80, 0x80 }
1682};
1683
1684int
1685ahd_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1686{
1687 return (ahd_print_register(BRDCTL_parse_table, 6, "BRDCTL",
1688 0xb9, regvalue, cur_col, wrap));
1689}
1690
1691int
1692ahd_seeadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1693{
1694 return (ahd_print_register(NULL, 0, "SEEADR",
1695 0xba, regvalue, cur_col, wrap));
1696}
1697
1698int
1699ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1700{
1701 return (ahd_print_register(NULL, 0, "SEEDAT",
1702 0xbc, regvalue, cur_col, wrap));
1703}
1704
1705static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
1706 { "SEEOP_ERAL", 0x40, 0x70 },
1707 { "SEEOP_WRITE", 0x50, 0x70 },
1708 { "SEEOP_READ", 0x60, 0x70 },
1709 { "SEEOP_ERASE", 0x70, 0x70 },
1710 { "SEESTART", 0x01, 0x01 },
1711 { "SEERST", 0x02, 0x02 },
1712 { "SEEOPCODE", 0x70, 0x70 },
1713 { "SEEOP_EWEN", 0x40, 0x40 },
1714 { "SEEOP_WALL", 0x40, 0x40 },
1715 { "SEEOP_EWDS", 0x40, 0x40 }
1716};
1717
1718int
1719ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1720{
1721 return (ahd_print_register(SEECTL_parse_table, 10, "SEECTL",
1722 0xbe, regvalue, cur_col, wrap));
1723}
1724
1725static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
1726 { "SEESTART", 0x01, 0x01 },
1727 { "SEEBUSY", 0x02, 0x02 },
1728 { "SEEARBACK", 0x04, 0x04 },
1729 { "LDALTID_L", 0x08, 0x08 },
1730 { "SEEOPCODE", 0x70, 0x70 },
1731 { "INIT_DONE", 0x80, 0x80 }
1732};
1733
1734int
1735ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1736{
1737 return (ahd_print_register(SEESTAT_parse_table, 6, "SEESTAT",
1738 0xbe, regvalue, cur_col, wrap));
1739}
1740
1741static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
1742 { "XMITOFFSTDIS", 0x02, 0x02 },
1743 { "RCVROFFSTDIS", 0x04, 0x04 },
1744 { "DESQDIS", 0x10, 0x10 },
1745 { "BYPASSENAB", 0x80, 0x80 }
1746};
1747
1748int
1749ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1750{
1751 return (ahd_print_register(DSPDATACTL_parse_table, 4, "DSPDATACTL",
1752 0xc1, regvalue, cur_col, wrap));
1753}
1754
1755int
1756ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1757{
1758 return (ahd_print_register(NULL, 0, "DFDAT",
1759 0xc4, regvalue, cur_col, wrap));
1760}
1761
1762static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
1763 { "DSPSEL", 0x1f, 0x1f },
1764 { "AUTOINCEN", 0x80, 0x80 }
1765};
1766
1767int
1768ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
1769{
1770 return (ahd_print_register(DSPSELECT_parse_table, 2, "DSPSELECT",
1771 0xc4, regvalue, cur_col, wrap));
1772}
1773
1774static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
1775 { "XMITMANVAL", 0x3f, 0x3f },
1776 { "AUTOXBCDIS", 0x80, 0x80 }
1777};
1778
1779int
1780ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1781{
1782 return (ahd_print_register(WRTBIASCTL_parse_table, 2, "WRTBIASCTL",
1783 0xc5, regvalue, cur_col, wrap));
1784}
1785
1786static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = { 578static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
1787 { "LOADRAM", 0x01, 0x01 }, 579 { "LOADRAM", 0x01, 0x01 },
1788 { "SEQRESET", 0x02, 0x02 }, 580 { "SEQRESET", 0x02, 0x02 },
@@ -1801,18 +593,6 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1801 0xd6, regvalue, cur_col, wrap)); 593 0xd6, regvalue, cur_col, wrap));
1802} 594}
1803 595
1804static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
1805 { "CARRY", 0x01, 0x01 },
1806 { "ZERO", 0x02, 0x02 }
1807};
1808
1809int
1810ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
1811{
1812 return (ahd_print_register(FLAGS_parse_table, 2, "FLAGS",
1813 0xd8, regvalue, cur_col, wrap));
1814}
1815
1816static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = { 596static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
1817 { "IRET", 0x01, 0x01 }, 597 { "IRET", 0x01, 0x01 },
1818 { "INTMASK1", 0x02, 0x02 }, 598 { "INTMASK1", 0x02, 0x02 },
@@ -1831,118 +611,6 @@ ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1831} 611}
1832 612
1833int 613int
1834ahd_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1835{
1836 return (ahd_print_register(NULL, 0, "SEQRAM",
1837 0xda, regvalue, cur_col, wrap));
1838}
1839
1840int
1841ahd_prgmcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1842{
1843 return (ahd_print_register(NULL, 0, "PRGMCNT",
1844 0xde, regvalue, cur_col, wrap));
1845}
1846
1847int
1848ahd_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
1849{
1850 return (ahd_print_register(NULL, 0, "ACCUM",
1851 0xe0, regvalue, cur_col, wrap));
1852}
1853
1854int
1855ahd_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
1856{
1857 return (ahd_print_register(NULL, 0, "SINDEX",
1858 0xe2, regvalue, cur_col, wrap));
1859}
1860
1861int
1862ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
1863{
1864 return (ahd_print_register(NULL, 0, "DINDEX",
1865 0xe4, regvalue, cur_col, wrap));
1866}
1867
1868int
1869ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
1870{
1871 return (ahd_print_register(NULL, 0, "ALLONES",
1872 0xe8, regvalue, cur_col, wrap));
1873}
1874
1875int
1876ahd_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
1877{
1878 return (ahd_print_register(NULL, 0, "ALLZEROS",
1879 0xea, regvalue, cur_col, wrap));
1880}
1881
1882int
1883ahd_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
1884{
1885 return (ahd_print_register(NULL, 0, "NONE",
1886 0xea, regvalue, cur_col, wrap));
1887}
1888
1889int
1890ahd_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1891{
1892 return (ahd_print_register(NULL, 0, "SINDIR",
1893 0xec, regvalue, cur_col, wrap));
1894}
1895
1896int
1897ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1898{
1899 return (ahd_print_register(NULL, 0, "DINDIR",
1900 0xed, regvalue, cur_col, wrap));
1901}
1902
1903int
1904ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
1905{
1906 return (ahd_print_register(NULL, 0, "STACK",
1907 0xf2, regvalue, cur_col, wrap));
1908}
1909
1910int
1911ahd_intvec1_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1912{
1913 return (ahd_print_register(NULL, 0, "INTVEC1_ADDR",
1914 0xf4, regvalue, cur_col, wrap));
1915}
1916
1917int
1918ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1919{
1920 return (ahd_print_register(NULL, 0, "CURADDR",
1921 0xf4, regvalue, cur_col, wrap));
1922}
1923
1924int
1925ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1926{
1927 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
1928 0xf6, regvalue, cur_col, wrap));
1929}
1930
1931int
1932ahd_longjmp_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1933{
1934 return (ahd_print_register(NULL, 0, "LONGJMP_ADDR",
1935 0xf8, regvalue, cur_col, wrap));
1936}
1937
1938int
1939ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
1940{
1941 return (ahd_print_register(NULL, 0, "ACCUM_SAVE",
1942 0xfa, regvalue, cur_col, wrap));
1943}
1944
1945int
1946ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 614ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1947{ 615{
1948 return (ahd_print_register(NULL, 0, "SRAM_BASE", 616 return (ahd_print_register(NULL, 0, "SRAM_BASE",
@@ -1950,69 +618,6 @@ ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1950} 618}
1951 619
1952int 620int
1953ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
1954{
1955 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
1956 0x100, regvalue, cur_col, wrap));
1957}
1958
1959int
1960ahd_waiting_tid_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1961{
1962 return (ahd_print_register(NULL, 0, "WAITING_TID_HEAD",
1963 0x120, regvalue, cur_col, wrap));
1964}
1965
1966int
1967ahd_waiting_tid_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
1968{
1969 return (ahd_print_register(NULL, 0, "WAITING_TID_TAIL",
1970 0x122, regvalue, cur_col, wrap));
1971}
1972
1973int
1974ahd_next_queued_scb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1975{
1976 return (ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR",
1977 0x124, regvalue, cur_col, wrap));
1978}
1979
1980int
1981ahd_complete_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1982{
1983 return (ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD",
1984 0x128, regvalue, cur_col, wrap));
1985}
1986
1987int
1988ahd_complete_scb_dmainprog_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1989{
1990 return (ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD",
1991 0x12a, regvalue, cur_col, wrap));
1992}
1993
1994int
1995ahd_complete_dma_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1996{
1997 return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD",
1998 0x12c, regvalue, cur_col, wrap));
1999}
2000
2001int
2002ahd_complete_dma_scb_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
2003{
2004 return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL",
2005 0x12e, regvalue, cur_col, wrap));
2006}
2007
2008int
2009ahd_complete_on_qfreeze_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
2010{
2011 return (ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD",
2012 0x130, regvalue, cur_col, wrap));
2013}
2014
2015int
2016ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap) 621ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
2017{ 622{
2018 return (ahd_print_register(NULL, 0, "QFREEZE_COUNT", 623 return (ahd_print_register(NULL, 0, "QFREEZE_COUNT",
@@ -2033,33 +638,6 @@ ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap)
2033 0x136, regvalue, cur_col, wrap)); 638 0x136, regvalue, cur_col, wrap));
2034} 639}
2035 640
2036int
2037ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
2038{
2039 return (ahd_print_register(NULL, 0, "MSG_OUT",
2040 0x137, regvalue, cur_col, wrap));
2041}
2042
2043static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
2044 { "FIFORESET", 0x01, 0x01 },
2045 { "FIFOFLUSH", 0x02, 0x02 },
2046 { "DIRECTION", 0x04, 0x04 },
2047 { "HDMAEN", 0x08, 0x08 },
2048 { "HDMAENACK", 0x08, 0x08 },
2049 { "SDMAEN", 0x10, 0x10 },
2050 { "SDMAENACK", 0x10, 0x10 },
2051 { "SCSIEN", 0x20, 0x20 },
2052 { "WIDEODD", 0x40, 0x40 },
2053 { "PRELOADEN", 0x80, 0x80 }
2054};
2055
2056int
2057ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
2058{
2059 return (ahd_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
2060 0x138, regvalue, cur_col, wrap));
2061}
2062
2063static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 641static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
2064 { "NO_DISCONNECT", 0x01, 0x01 }, 642 { "NO_DISCONNECT", 0x01, 0x01 },
2065 { "SPHASE_PENDING", 0x02, 0x02 }, 643 { "SPHASE_PENDING", 0x02, 0x02 },
@@ -2079,20 +657,6 @@ ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
2079 0x139, regvalue, cur_col, wrap)); 657 0x139, regvalue, cur_col, wrap));
2080} 658}
2081 659
2082int
2083ahd_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2084{
2085 return (ahd_print_register(NULL, 0, "SAVED_SCSIID",
2086 0x13a, regvalue, cur_col, wrap));
2087}
2088
2089int
2090ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
2091{
2092 return (ahd_print_register(NULL, 0, "SAVED_LUN",
2093 0x13b, regvalue, cur_col, wrap));
2094}
2095
2096static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = { 660static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
2097 { "P_DATAOUT", 0x00, 0xe0 }, 661 { "P_DATAOUT", 0x00, 0xe0 },
2098 { "P_DATAOUT_DT", 0x20, 0xe0 }, 662 { "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -2116,96 +680,6 @@ ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
2116 0x13c, regvalue, cur_col, wrap)); 680 0x13c, regvalue, cur_col, wrap));
2117} 681}
2118 682
2119int
2120ahd_qoutfifo_entry_valid_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2121{
2122 return (ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG",
2123 0x13d, regvalue, cur_col, wrap));
2124}
2125
2126int
2127ahd_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
2128{
2129 return (ahd_print_register(NULL, 0, "KERNEL_TQINPOS",
2130 0x13e, regvalue, cur_col, wrap));
2131}
2132
2133int
2134ahd_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
2135{
2136 return (ahd_print_register(NULL, 0, "TQINPOS",
2137 0x13f, regvalue, cur_col, wrap));
2138}
2139
2140int
2141ahd_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2142{
2143 return (ahd_print_register(NULL, 0, "SHARED_DATA_ADDR",
2144 0x140, regvalue, cur_col, wrap));
2145}
2146
2147int
2148ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2149{
2150 return (ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR",
2151 0x144, regvalue, cur_col, wrap));
2152}
2153
2154static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
2155 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
2156 { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
2157 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
2158 { "EXIT_MSG_LOOP", 0x08, 0x08 },
2159 { "MSGOUT_PHASEMIS", 0x10, 0x10 },
2160 { "SEND_REJ", 0x20, 0x20 },
2161 { "SEND_SENSE", 0x40, 0x40 },
2162 { "SEND_MSG", 0x80, 0x80 }
2163};
2164
2165int
2166ahd_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2167{
2168 return (ahd_print_register(ARG_1_parse_table, 8, "ARG_1",
2169 0x148, regvalue, cur_col, wrap));
2170}
2171
2172int
2173ahd_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2174{
2175 return (ahd_print_register(NULL, 0, "ARG_2",
2176 0x149, regvalue, cur_col, wrap));
2177}
2178
2179int
2180ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
2181{
2182 return (ahd_print_register(NULL, 0, "LAST_MSG",
2183 0x14a, regvalue, cur_col, wrap));
2184}
2185
2186static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
2187 { "ALTSTIM", 0x01, 0x01 },
2188 { "ENAUTOATNP", 0x02, 0x02 },
2189 { "MANUALP", 0x0c, 0x0c },
2190 { "ENRSELI", 0x10, 0x10 },
2191 { "ENSELI", 0x20, 0x20 },
2192 { "MANUALCTL", 0x40, 0x40 }
2193};
2194
2195int
2196ahd_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
2197{
2198 return (ahd_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
2199 0x14b, regvalue, cur_col, wrap));
2200}
2201
2202int
2203ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2204{
2205 return (ahd_print_register(NULL, 0, "INITIATOR_TAG",
2206 0x14c, regvalue, cur_col, wrap));
2207}
2208
2209static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 683static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
2210 { "PENDING_MK_MESSAGE", 0x01, 0x01 }, 684 { "PENDING_MK_MESSAGE", 0x01, 0x01 },
2211 { "TARGET_MSG_PENDING", 0x02, 0x02 }, 685 { "TARGET_MSG_PENDING", 0x02, 0x02 },
@@ -2220,62 +694,6 @@ ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2220} 694}
2221 695
2222int 696int
2223ahd_allocfifo_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2224{
2225 return (ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR",
2226 0x14e, regvalue, cur_col, wrap));
2227}
2228
2229int
2230ahd_int_coalescing_timer_print(u_int regvalue, u_int *cur_col, u_int wrap)
2231{
2232 return (ahd_print_register(NULL, 0, "INT_COALESCING_TIMER",
2233 0x150, regvalue, cur_col, wrap));
2234}
2235
2236int
2237ahd_int_coalescing_maxcmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
2238{
2239 return (ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS",
2240 0x152, regvalue, cur_col, wrap));
2241}
2242
2243int
2244ahd_int_coalescing_mincmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
2245{
2246 return (ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS",
2247 0x153, regvalue, cur_col, wrap));
2248}
2249
2250int
2251ahd_cmds_pending_print(u_int regvalue, u_int *cur_col, u_int wrap)
2252{
2253 return (ahd_print_register(NULL, 0, "CMDS_PENDING",
2254 0x154, regvalue, cur_col, wrap));
2255}
2256
2257int
2258ahd_int_coalescing_cmdcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
2259{
2260 return (ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT",
2261 0x156, regvalue, cur_col, wrap));
2262}
2263
2264int
2265ahd_local_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
2266{
2267 return (ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX",
2268 0x157, regvalue, cur_col, wrap));
2269}
2270
2271int
2272ahd_cmdsize_table_print(u_int regvalue, u_int *cur_col, u_int wrap)
2273{
2274 return (ahd_print_register(NULL, 0, "CMDSIZE_TABLE",
2275 0x158, regvalue, cur_col, wrap));
2276}
2277
2278int
2279ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap) 697ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
2280{ 698{
2281 return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 699 return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB",
@@ -2290,53 +708,12 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2290} 708}
2291 709
2292int 710int
2293ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2294{
2295 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
2296 0x180, regvalue, cur_col, wrap));
2297}
2298
2299int
2300ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 711ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
2301{ 712{
2302 return (ahd_print_register(NULL, 0, "SCB_BASE", 713 return (ahd_print_register(NULL, 0, "SCB_BASE",
2303 0x180, regvalue, cur_col, wrap)); 714 0x180, regvalue, cur_col, wrap));
2304} 715}
2305 716
2306static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
2307 { "SG_LIST_NULL", 0x01, 0x01 },
2308 { "SG_OVERRUN_RESID", 0x02, 0x02 },
2309 { "SG_ADDR_MASK", 0xf8, 0xf8 }
2310};
2311
2312int
2313ahd_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2314{
2315 return (ahd_print_register(SCB_RESIDUAL_SGPTR_parse_table, 3, "SCB_RESIDUAL_SGPTR",
2316 0x184, regvalue, cur_col, wrap));
2317}
2318
2319int
2320ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
2321{
2322 return (ahd_print_register(NULL, 0, "SCB_SCSI_STATUS",
2323 0x188, regvalue, cur_col, wrap));
2324}
2325
2326int
2327ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2328{
2329 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
2330 0x18c, regvalue, cur_col, wrap));
2331}
2332
2333int
2334ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2335{
2336 return (ahd_print_register(NULL, 0, "SCB_TAG",
2337 0x190, regvalue, cur_col, wrap));
2338}
2339
2340static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 717static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
2341 { "SCB_TAG_TYPE", 0x03, 0x03 }, 718 { "SCB_TAG_TYPE", 0x03, 0x03 },
2342 { "DISCONNECTED", 0x04, 0x04 }, 719 { "DISCONNECTED", 0x04, 0x04 },
@@ -2366,103 +743,3 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2366 0x193, regvalue, cur_col, wrap)); 743 0x193, regvalue, cur_col, wrap));
2367} 744}
2368 745
2369static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
2370 { "LID", 0xff, 0xff }
2371};
2372
2373int
2374ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
2375{
2376 return (ahd_print_register(SCB_LUN_parse_table, 1, "SCB_LUN",
2377 0x194, regvalue, cur_col, wrap));
2378}
2379
2380static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
2381 { "SCB_XFERLEN_ODD", 0x01, 0x01 }
2382};
2383
2384int
2385ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
2386{
2387 return (ahd_print_register(SCB_TASK_ATTRIBUTE_parse_table, 1, "SCB_TASK_ATTRIBUTE",
2388 0x195, regvalue, cur_col, wrap));
2389}
2390
2391static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
2392 { "SCB_CDB_LEN_PTR", 0x80, 0x80 }
2393};
2394
2395int
2396ahd_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
2397{
2398 return (ahd_print_register(SCB_CDB_LEN_parse_table, 1, "SCB_CDB_LEN",
2399 0x196, regvalue, cur_col, wrap));
2400}
2401
2402int
2403ahd_scb_task_management_print(u_int regvalue, u_int *cur_col, u_int wrap)
2404{
2405 return (ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT",
2406 0x197, regvalue, cur_col, wrap));
2407}
2408
2409int
2410ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2411{
2412 return (ahd_print_register(NULL, 0, "SCB_DATAPTR",
2413 0x198, regvalue, cur_col, wrap));
2414}
2415
2416static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
2417 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
2418 { "SG_LAST_SEG", 0x80, 0x80 }
2419};
2420
2421int
2422ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2423{
2424 return (ahd_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
2425 0x1a0, regvalue, cur_col, wrap));
2426}
2427
2428static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
2429 { "SG_LIST_NULL", 0x01, 0x01 },
2430 { "SG_FULL_RESID", 0x02, 0x02 },
2431 { "SG_STATUS_VALID", 0x04, 0x04 }
2432};
2433
2434int
2435ahd_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2436{
2437 return (ahd_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
2438 0x1a4, regvalue, cur_col, wrap));
2439}
2440
2441int
2442ahd_scb_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2443{
2444 return (ahd_print_register(NULL, 0, "SCB_BUSADDR",
2445 0x1a8, regvalue, cur_col, wrap));
2446}
2447
2448int
2449ahd_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
2450{
2451 return (ahd_print_register(NULL, 0, "SCB_NEXT",
2452 0x1ac, regvalue, cur_col, wrap));
2453}
2454
2455int
2456ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2457{
2458 return (ahd_print_register(NULL, 0, "SCB_NEXT2",
2459 0x1ae, regvalue, cur_col, wrap));
2460}
2461
2462int
2463ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
2464{
2465 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
2466 0x1b8, regvalue, cur_col, wrap));
2467}
2468
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index 0d2f763c3427..9a96e55da39a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -51,6 +51,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
51 */ 51 */
52 52
53/* 53/*
54 * Registers marked "dont_generate_debug_code" are not (yet) referenced
55 * from the driver code, and this keyword inhibit generation
56 * of debug code for them.
57 *
58 * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
59 * is added to the register which is referenced in the driver.
60 * Unreferenced register with no dont_generate_debug_code will result
61 * in dead code. No warning is issued.
62 */
63
64/*
54 * SCSI Sequence Control (p. 3-11). 65 * SCSI Sequence Control (p. 3-11).
55 * Each bit, when set starts a specific SCSI sequence on the bus 66 * Each bit, when set starts a specific SCSI sequence on the bus
56 */ 67 */
@@ -97,6 +108,7 @@ register SXFRCTL1 {
97 field ENSTIMER 0x04 108 field ENSTIMER 0x04
98 field ACTNEGEN 0x02 109 field ACTNEGEN 0x02
99 field STPWEN 0x01 /* Powered Termination */ 110 field STPWEN 0x01 /* Powered Termination */
111 dont_generate_debug_code
100} 112}
101 113
102/* 114/*
@@ -155,6 +167,7 @@ register SCSISIGO {
155 mask P_MESGOUT CDI|MSGI 167 mask P_MESGOUT CDI|MSGI
156 mask P_STATUS CDI|IOI 168 mask P_STATUS CDI|IOI
157 mask P_MESGIN CDI|IOI|MSGI 169 mask P_MESGIN CDI|IOI|MSGI
170 dont_generate_debug_code
158} 171}
159 172
160/* 173/*
@@ -194,6 +207,7 @@ register SCSIID {
194 */ 207 */
195 alias SCSIOFFSET 208 alias SCSIOFFSET
196 mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ 209 mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */
210 dont_generate_debug_code
197} 211}
198 212
199/* 213/*
@@ -205,6 +219,7 @@ register SCSIID {
205register SCSIDATL { 219register SCSIDATL {
206 address 0x006 220 address 0x006
207 access_mode RW 221 access_mode RW
222 dont_generate_debug_code
208} 223}
209 224
210register SCSIDATH { 225register SCSIDATH {
@@ -223,6 +238,7 @@ register STCNT {
223 address 0x008 238 address 0x008
224 size 3 239 size 3
225 access_mode RW 240 access_mode RW
241 dont_generate_debug_code
226} 242}
227 243
228/* ALT_MODE registers (Ultra2 and Ultra160 chips) */ 244/* ALT_MODE registers (Ultra2 and Ultra160 chips) */
@@ -248,6 +264,7 @@ register OPTIONMODE {
248 field AUTO_MSGOUT_DE 0x02 264 field AUTO_MSGOUT_DE 0x02
249 field DIS_MSGIN_DUALEDGE 0x01 265 field DIS_MSGIN_DUALEDGE 0x01
250 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE 266 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE
267 dont_generate_debug_code
251} 268}
252 269
253/* ALT_MODE register on Ultra160 chips */ 270/* ALT_MODE register on Ultra160 chips */
@@ -256,6 +273,7 @@ register TARGCRCCNT {
256 size 2 273 size 2
257 access_mode RW 274 access_mode RW
258 count 2 275 count 2
276 dont_generate_debug_code
259} 277}
260 278
261/* 279/*
@@ -271,6 +289,7 @@ register CLRSINT0 {
271 field CLRSWRAP 0x08 289 field CLRSWRAP 0x08
272 field CLRIOERR 0x08 /* Ultra2 Only */ 290 field CLRIOERR 0x08 /* Ultra2 Only */
273 field CLRSPIORDY 0x02 291 field CLRSPIORDY 0x02
292 dont_generate_debug_code
274} 293}
275 294
276/* 295/*
@@ -306,6 +325,7 @@ register CLRSINT1 {
306 field CLRSCSIPERR 0x04 325 field CLRSCSIPERR 0x04
307 field CLRPHASECHG 0x02 326 field CLRPHASECHG 0x02
308 field CLRREQINIT 0x01 327 field CLRREQINIT 0x01
328 dont_generate_debug_code
309} 329}
310 330
311/* 331/*
@@ -360,6 +380,7 @@ register SCSIID_ULTRA2 {
360 access_mode RW 380 access_mode RW
361 mask TID 0xf0 /* Target ID mask */ 381 mask TID 0xf0 /* Target ID mask */
362 mask OID 0x0f /* Our ID mask */ 382 mask OID 0x0f /* Our ID mask */
383 dont_generate_debug_code
363} 384}
364 385
365/* 386/*
@@ -425,6 +446,7 @@ register SHADDR {
425 address 0x014 446 address 0x014
426 size 4 447 size 4
427 access_mode RO 448 access_mode RO
449 dont_generate_debug_code
428} 450}
429 451
430/* 452/*
@@ -441,6 +463,7 @@ register SELTIMER {
441 field STAGE2 0x02 463 field STAGE2 0x02
442 field STAGE1 0x01 464 field STAGE1 0x01
443 alias TARGIDIN 465 alias TARGIDIN
466 dont_generate_debug_code
444} 467}
445 468
446/* 469/*
@@ -453,6 +476,7 @@ register SELID {
453 access_mode RW 476 access_mode RW
454 mask SELID_MASK 0xf0 477 mask SELID_MASK 0xf0
455 field ONEBIT 0x08 478 field ONEBIT 0x08
479 dont_generate_debug_code
456} 480}
457 481
458register SCAMCTL { 482register SCAMCTL {
@@ -473,6 +497,7 @@ register TARGID {
473 size 2 497 size 2
474 access_mode RW 498 access_mode RW
475 count 14 499 count 14
500 dont_generate_debug_code
476} 501}
477 502
478/* 503/*
@@ -495,6 +520,7 @@ register SPIOCAP {
495 field EEPROM 0x04 /* Writable external BIOS ROM */ 520 field EEPROM 0x04 /* Writable external BIOS ROM */
496 field ROM 0x02 /* Logic for accessing external ROM */ 521 field ROM 0x02 /* Logic for accessing external ROM */
497 field SSPIOCPS 0x01 /* Termination and cable detection */ 522 field SSPIOCPS 0x01 /* Termination and cable detection */
523 dont_generate_debug_code
498} 524}
499 525
500register BRDCTL { 526register BRDCTL {
@@ -514,6 +540,7 @@ register BRDCTL {
514 field BRDDAT2 0x04 540 field BRDDAT2 0x04
515 field BRDRW_ULTRA2 0x02 541 field BRDRW_ULTRA2 0x02
516 field BRDSTB_ULTRA2 0x01 542 field BRDSTB_ULTRA2 0x01
543 dont_generate_debug_code
517} 544}
518 545
519/* 546/*
@@ -551,6 +578,7 @@ register SEECTL {
551 field SEECK 0x04 578 field SEECK 0x04
552 field SEEDO 0x02 579 field SEEDO 0x02
553 field SEEDI 0x01 580 field SEEDI 0x01
581 dont_generate_debug_code
554} 582}
555/* 583/*
556 * SCSI Block Control (p. 3-32) 584 * SCSI Block Control (p. 3-32)
@@ -601,6 +629,7 @@ register SEQRAM {
601 address 0x061 629 address 0x061
602 access_mode RW 630 access_mode RW
603 count 2 631 count 2
632 dont_generate_debug_code
604} 633}
605 634
606/* 635/*
@@ -610,6 +639,7 @@ register SEQRAM {
610register SEQADDR0 { 639register SEQADDR0 {
611 address 0x062 640 address 0x062
612 access_mode RW 641 access_mode RW
642 dont_generate_debug_code
613} 643}
614 644
615register SEQADDR1 { 645register SEQADDR1 {
@@ -617,6 +647,7 @@ register SEQADDR1 {
617 access_mode RW 647 access_mode RW
618 count 8 648 count 8
619 mask SEQADDR1_MASK 0x01 649 mask SEQADDR1_MASK 0x01
650 dont_generate_debug_code
620} 651}
621 652
622/* 653/*
@@ -627,35 +658,41 @@ register ACCUM {
627 address 0x064 658 address 0x064
628 access_mode RW 659 access_mode RW
629 accumulator 660 accumulator
661 dont_generate_debug_code
630} 662}
631 663
632register SINDEX { 664register SINDEX {
633 address 0x065 665 address 0x065
634 access_mode RW 666 access_mode RW
635 sindex 667 sindex
668 dont_generate_debug_code
636} 669}
637 670
638register DINDEX { 671register DINDEX {
639 address 0x066 672 address 0x066
640 access_mode RW 673 access_mode RW
674 dont_generate_debug_code
641} 675}
642 676
643register ALLONES { 677register ALLONES {
644 address 0x069 678 address 0x069
645 access_mode RO 679 access_mode RO
646 allones 680 allones
681 dont_generate_debug_code
647} 682}
648 683
649register ALLZEROS { 684register ALLZEROS {
650 address 0x06a 685 address 0x06a
651 access_mode RO 686 access_mode RO
652 allzeros 687 allzeros
688 dont_generate_debug_code
653} 689}
654 690
655register NONE { 691register NONE {
656 address 0x06a 692 address 0x06a
657 access_mode WO 693 access_mode WO
658 none 694 none
695 dont_generate_debug_code
659} 696}
660 697
661register FLAGS { 698register FLAGS {
@@ -664,16 +701,19 @@ register FLAGS {
664 count 18 701 count 18
665 field ZERO 0x02 702 field ZERO 0x02
666 field CARRY 0x01 703 field CARRY 0x01
704 dont_generate_debug_code
667} 705}
668 706
669register SINDIR { 707register SINDIR {
670 address 0x06c 708 address 0x06c
671 access_mode RO 709 access_mode RO
710 dont_generate_debug_code
672} 711}
673 712
674register DINDIR { 713register DINDIR {
675 address 0x06d 714 address 0x06d
676 access_mode WO 715 access_mode WO
716 dont_generate_debug_code
677} 717}
678 718
679register FUNCTION1 { 719register FUNCTION1 {
@@ -685,6 +725,7 @@ register STACK {
685 address 0x06f 725 address 0x06f
686 access_mode RO 726 access_mode RO
687 count 5 727 count 5
728 dont_generate_debug_code
688} 729}
689 730
690const STACK_SIZE 4 731const STACK_SIZE 4
@@ -716,6 +757,7 @@ register DSCOMMAND0 {
716 field RAMPS 0x04 /* External SCB RAM Present */ 757 field RAMPS 0x04 /* External SCB RAM Present */
717 field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */ 758 field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */
718 field CIOPARCKEN 0x01 /* Internal bus parity error enable */ 759 field CIOPARCKEN 0x01 /* Internal bus parity error enable */
760 dont_generate_debug_code
719} 761}
720 762
721register DSCOMMAND1 { 763register DSCOMMAND1 {
@@ -724,6 +766,7 @@ register DSCOMMAND1 {
724 mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */ 766 mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */
725 field HADDLDSEL1 0x02 /* Host Address Load Select Bits */ 767 field HADDLDSEL1 0x02 /* Host Address Load Select Bits */
726 field HADDLDSEL0 0x01 768 field HADDLDSEL0 0x01
769 dont_generate_debug_code
727} 770}
728 771
729/* 772/*
@@ -735,6 +778,7 @@ register BUSTIME {
735 count 2 778 count 2
736 mask BOFF 0xf0 779 mask BOFF 0xf0
737 mask BON 0x0f 780 mask BON 0x0f
781 dont_generate_debug_code
738} 782}
739 783
740/* 784/*
@@ -749,6 +793,7 @@ register BUSSPD {
749 mask STBON 0x07 793 mask STBON 0x07
750 mask DFTHRSH_100 0xc0 794 mask DFTHRSH_100 0xc0
751 mask DFTHRSH_75 0x80 795 mask DFTHRSH_75 0x80
796 dont_generate_debug_code
752} 797}
753 798
754/* aic7850/55/60/70/80/95 only */ 799/* aic7850/55/60/70/80/95 only */
@@ -756,6 +801,7 @@ register DSPCISTATUS {
756 address 0x086 801 address 0x086
757 count 4 802 count 4
758 mask DFTHRSH_100 0xc0 803 mask DFTHRSH_100 0xc0
804 dont_generate_debug_code
759} 805}
760 806
761/* aic7890/91/96/97 only */ 807/* aic7890/91/96/97 only */
@@ -764,6 +810,7 @@ register HS_MAILBOX {
764 mask HOST_MAILBOX 0xF0 810 mask HOST_MAILBOX 0xF0
765 mask SEQ_MAILBOX 0x0F 811 mask SEQ_MAILBOX 0x0F
766 mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ 812 mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */
813 dont_generate_debug_code
767} 814}
768 815
769const HOST_MAILBOX_SHIFT 4 816const HOST_MAILBOX_SHIFT 4
@@ -784,6 +831,7 @@ register HCNTRL {
784 field INTEN 0x02 831 field INTEN 0x02
785 field CHIPRST 0x01 832 field CHIPRST 0x01
786 field CHIPRSTACK 0x01 833 field CHIPRSTACK 0x01
834 dont_generate_debug_code
787} 835}
788 836
789/* 837/*
@@ -795,12 +843,14 @@ register HADDR {
795 address 0x088 843 address 0x088
796 size 4 844 size 4
797 access_mode RW 845 access_mode RW
846 dont_generate_debug_code
798} 847}
799 848
800register HCNT { 849register HCNT {
801 address 0x08c 850 address 0x08c
802 size 3 851 size 3
803 access_mode RW 852 access_mode RW
853 dont_generate_debug_code
804} 854}
805 855
806/* 856/*
@@ -810,6 +860,7 @@ register HCNT {
810register SCBPTR { 860register SCBPTR {
811 address 0x090 861 address 0x090
812 access_mode RW 862 access_mode RW
863 dont_generate_debug_code
813} 864}
814 865
815/* 866/*
@@ -878,6 +929,7 @@ register INTSTAT {
878 929
879 mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ 930 mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */
880 mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) 931 mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
932 dont_generate_debug_code
881} 933}
882 934
883/* 935/*
@@ -911,6 +963,7 @@ register CLRINT {
911 field CLRSCSIINT 0x04 963 field CLRSCSIINT 0x04
912 field CLRCMDINT 0x02 964 field CLRCMDINT 0x02
913 field CLRSEQINT 0x01 965 field CLRSEQINT 0x01
966 dont_generate_debug_code
914} 967}
915 968
916register DFCNTRL { 969register DFCNTRL {
@@ -944,6 +997,7 @@ register DFSTATUS {
944register DFWADDR { 997register DFWADDR {
945 address 0x95 998 address 0x95
946 access_mode RW 999 access_mode RW
1000 dont_generate_debug_code
947} 1001}
948 1002
949register DFRADDR { 1003register DFRADDR {
@@ -954,6 +1008,7 @@ register DFRADDR {
954register DFDAT { 1008register DFDAT {
955 address 0x099 1009 address 0x099
956 access_mode RW 1010 access_mode RW
1011 dont_generate_debug_code
957} 1012}
958 1013
959/* 1014/*
@@ -967,6 +1022,7 @@ register SCBCNT {
967 count 1 1022 count 1
968 field SCBAUTO 0x80 1023 field SCBAUTO 0x80
969 mask SCBCNT_MASK 0x1f 1024 mask SCBCNT_MASK 0x1f
1025 dont_generate_debug_code
970} 1026}
971 1027
972/* 1028/*
@@ -977,6 +1033,7 @@ register QINFIFO {
977 address 0x09b 1033 address 0x09b
978 access_mode RW 1034 access_mode RW
979 count 12 1035 count 12
1036 dont_generate_debug_code
980} 1037}
981 1038
982/* 1039/*
@@ -996,6 +1053,7 @@ register QOUTFIFO {
996 address 0x09d 1053 address 0x09d
997 access_mode WO 1054 access_mode WO
998 count 7 1055 count 7
1056 dont_generate_debug_code
999} 1057}
1000 1058
1001register CRCCONTROL1 { 1059register CRCCONTROL1 {
@@ -1008,6 +1066,7 @@ register CRCCONTROL1 {
1008 field CRCREQCHKEN 0x10 1066 field CRCREQCHKEN 0x10
1009 field TARGCRCENDEN 0x08 1067 field TARGCRCENDEN 0x08
1010 field TARGCRCCNTEN 0x04 1068 field TARGCRCCNTEN 0x04
1069 dont_generate_debug_code
1011} 1070}
1012 1071
1013 1072
@@ -1040,6 +1099,7 @@ register SFUNCT {
1040 access_mode RW 1099 access_mode RW
1041 count 4 1100 count 4
1042 field ALT_MODE 0x80 1101 field ALT_MODE 0x80
1102 dont_generate_debug_code
1043} 1103}
1044 1104
1045/* 1105/*
@@ -1053,24 +1113,31 @@ scb {
1053 size 4 1113 size 4
1054 alias SCB_RESIDUAL_DATACNT 1114 alias SCB_RESIDUAL_DATACNT
1055 alias SCB_CDB_STORE 1115 alias SCB_CDB_STORE
1116 dont_generate_debug_code
1056 } 1117 }
1057 SCB_RESIDUAL_SGPTR { 1118 SCB_RESIDUAL_SGPTR {
1058 size 4 1119 size 4
1120 dont_generate_debug_code
1059 } 1121 }
1060 SCB_SCSI_STATUS { 1122 SCB_SCSI_STATUS {
1061 size 1 1123 size 1
1124 dont_generate_debug_code
1062 } 1125 }
1063 SCB_TARGET_PHASES { 1126 SCB_TARGET_PHASES {
1064 size 1 1127 size 1
1128 dont_generate_debug_code
1065 } 1129 }
1066 SCB_TARGET_DATA_DIR { 1130 SCB_TARGET_DATA_DIR {
1067 size 1 1131 size 1
1132 dont_generate_debug_code
1068 } 1133 }
1069 SCB_TARGET_ITAG { 1134 SCB_TARGET_ITAG {
1070 size 1 1135 size 1
1136 dont_generate_debug_code
1071 } 1137 }
1072 SCB_DATAPTR { 1138 SCB_DATAPTR {
1073 size 4 1139 size 4
1140 dont_generate_debug_code
1074 } 1141 }
1075 SCB_DATACNT { 1142 SCB_DATACNT {
1076 /* 1143 /*
@@ -1080,12 +1147,14 @@ scb {
1080 size 4 1147 size 4
1081 field SG_LAST_SEG 0x80 /* In the fourth byte */ 1148 field SG_LAST_SEG 0x80 /* In the fourth byte */
1082 mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ 1149 mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
1150 dont_generate_debug_code
1083 } 1151 }
1084 SCB_SGPTR { 1152 SCB_SGPTR {
1085 size 4 1153 size 4
1086 field SG_RESID_VALID 0x04 /* In the first byte */ 1154 field SG_RESID_VALID 0x04 /* In the first byte */
1087 field SG_FULL_RESID 0x02 /* In the first byte */ 1155 field SG_FULL_RESID 0x02 /* In the first byte */
1088 field SG_LIST_NULL 0x01 /* In the first byte */ 1156 field SG_LIST_NULL 0x01 /* In the first byte */
1157 dont_generate_debug_code
1089 } 1158 }
1090 SCB_CONTROL { 1159 SCB_CONTROL {
1091 size 1 1160 size 1
@@ -1115,22 +1184,27 @@ scb {
1115 } 1184 }
1116 SCB_CDB_LEN { 1185 SCB_CDB_LEN {
1117 size 1 1186 size 1
1187 dont_generate_debug_code
1118 } 1188 }
1119 SCB_SCSIRATE { 1189 SCB_SCSIRATE {
1120 size 1 1190 size 1
1191 dont_generate_debug_code
1121 } 1192 }
1122 SCB_SCSIOFFSET { 1193 SCB_SCSIOFFSET {
1123 size 1 1194 size 1
1124 count 1 1195 count 1
1196 dont_generate_debug_code
1125 } 1197 }
1126 SCB_NEXT { 1198 SCB_NEXT {
1127 size 1 1199 size 1
1200 dont_generate_debug_code
1128 } 1201 }
1129 SCB_64_SPARE { 1202 SCB_64_SPARE {
1130 size 16 1203 size 16
1131 } 1204 }
1132 SCB_64_BTT { 1205 SCB_64_BTT {
1133 size 16 1206 size 16
1207 dont_generate_debug_code
1134 } 1208 }
1135} 1209}
1136 1210
@@ -1149,6 +1223,7 @@ register SEECTL_2840 {
1149 field CS_2840 0x04 1223 field CS_2840 0x04
1150 field CK_2840 0x02 1224 field CK_2840 0x02
1151 field DO_2840 0x01 1225 field DO_2840 0x01
1226 dont_generate_debug_code
1152} 1227}
1153 1228
1154register STATUS_2840 { 1229register STATUS_2840 {
@@ -1159,6 +1234,7 @@ register STATUS_2840 {
1159 mask BIOS_SEL 0x60 1234 mask BIOS_SEL 0x60
1160 mask ADSEL 0x1e 1235 mask ADSEL 0x1e
1161 field DI_2840 0x01 1236 field DI_2840 0x01
1237 dont_generate_debug_code
1162} 1238}
1163 1239
1164/* --------------------- AIC-7870-only definitions -------------------- */ 1240/* --------------------- AIC-7870-only definitions -------------------- */
@@ -1166,18 +1242,22 @@ register STATUS_2840 {
1166register CCHADDR { 1242register CCHADDR {
1167 address 0x0E0 1243 address 0x0E0
1168 size 8 1244 size 8
1245 dont_generate_debug_code
1169} 1246}
1170 1247
1171register CCHCNT { 1248register CCHCNT {
1172 address 0x0E8 1249 address 0x0E8
1250 dont_generate_debug_code
1173} 1251}
1174 1252
1175register CCSGRAM { 1253register CCSGRAM {
1176 address 0x0E9 1254 address 0x0E9
1255 dont_generate_debug_code
1177} 1256}
1178 1257
1179register CCSGADDR { 1258register CCSGADDR {
1180 address 0x0EA 1259 address 0x0EA
1260 dont_generate_debug_code
1181} 1261}
1182 1262
1183register CCSGCTL { 1263register CCSGCTL {
@@ -1186,11 +1266,13 @@ register CCSGCTL {
1186 field CCSGEN 0x08 1266 field CCSGEN 0x08
1187 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */ 1267 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */
1188 field CCSGRESET 0x01 1268 field CCSGRESET 0x01
1269 dont_generate_debug_code
1189} 1270}
1190 1271
1191register CCSCBCNT { 1272register CCSCBCNT {
1192 address 0xEF 1273 address 0xEF
1193 count 1 1274 count 1
1275 dont_generate_debug_code
1194} 1276}
1195 1277
1196register CCSCBCTL { 1278register CCSCBCTL {
@@ -1201,14 +1283,17 @@ register CCSCBCTL {
1201 field CCSCBEN 0x08 1283 field CCSCBEN 0x08
1202 field CCSCBDIR 0x04 1284 field CCSCBDIR 0x04
1203 field CCSCBRESET 0x01 1285 field CCSCBRESET 0x01
1286 dont_generate_debug_code
1204} 1287}
1205 1288
1206register CCSCBADDR { 1289register CCSCBADDR {
1207 address 0x0ED 1290 address 0x0ED
1291 dont_generate_debug_code
1208} 1292}
1209 1293
1210register CCSCBRAM { 1294register CCSCBRAM {
1211 address 0xEC 1295 address 0xEC
1296 dont_generate_debug_code
1212} 1297}
1213 1298
1214/* 1299/*
@@ -1218,23 +1303,28 @@ register SCBBADDR {
1218 address 0x0F0 1303 address 0x0F0
1219 access_mode RW 1304 access_mode RW
1220 count 3 1305 count 3
1306 dont_generate_debug_code
1221} 1307}
1222 1308
1223register CCSCBPTR { 1309register CCSCBPTR {
1224 address 0x0F1 1310 address 0x0F1
1311 dont_generate_debug_code
1225} 1312}
1226 1313
1227register HNSCB_QOFF { 1314register HNSCB_QOFF {
1228 address 0x0F4 1315 address 0x0F4
1229 count 4 1316 count 4
1317 dont_generate_debug_code
1230} 1318}
1231 1319
1232register SNSCB_QOFF { 1320register SNSCB_QOFF {
1233 address 0x0F6 1321 address 0x0F6
1322 dont_generate_debug_code
1234} 1323}
1235 1324
1236register SDSCB_QOFF { 1325register SDSCB_QOFF {
1237 address 0x0F8 1326 address 0x0F8
1327 dont_generate_debug_code
1238} 1328}
1239 1329
1240register QOFF_CTLSTA { 1330register QOFF_CTLSTA {
@@ -1244,6 +1334,7 @@ register QOFF_CTLSTA {
1244 field SDSCB_ROLLOVER 0x10 1334 field SDSCB_ROLLOVER 0x10
1245 mask SCB_QSIZE 0x07 1335 mask SCB_QSIZE 0x07
1246 mask SCB_QSIZE_256 0x06 1336 mask SCB_QSIZE_256 0x06
1337 dont_generate_debug_code
1247} 1338}
1248 1339
1249register DFF_THRSH { 1340register DFF_THRSH {
@@ -1267,6 +1358,7 @@ register DFF_THRSH {
1267 mask WR_DFTHRSH_90 0x60 1358 mask WR_DFTHRSH_90 0x60
1268 mask WR_DFTHRSH_MAX 0x70 1359 mask WR_DFTHRSH_MAX 0x70
1269 count 4 1360 count 4
1361 dont_generate_debug_code
1270} 1362}
1271 1363
1272register SG_CACHE_PRE { 1364register SG_CACHE_PRE {
@@ -1275,6 +1367,7 @@ register SG_CACHE_PRE {
1275 mask SG_ADDR_MASK 0xf8 1367 mask SG_ADDR_MASK 0xf8
1276 field LAST_SEG 0x02 1368 field LAST_SEG 0x02
1277 field LAST_SEG_DONE 0x01 1369 field LAST_SEG_DONE 0x01
1370 dont_generate_debug_code
1278} 1371}
1279 1372
1280register SG_CACHE_SHADOW { 1373register SG_CACHE_SHADOW {
@@ -1283,6 +1376,7 @@ register SG_CACHE_SHADOW {
1283 mask SG_ADDR_MASK 0xf8 1376 mask SG_ADDR_MASK 0xf8
1284 field LAST_SEG 0x02 1377 field LAST_SEG 0x02
1285 field LAST_SEG_DONE 0x01 1378 field LAST_SEG_DONE 0x01
1379 dont_generate_debug_code
1286} 1380}
1287/* ---------------------- Scratch RAM Offsets ------------------------- */ 1381/* ---------------------- Scratch RAM Offsets ------------------------- */
1288/* These offsets are either to values that are initialized by the board's 1382/* These offsets are either to values that are initialized by the board's
@@ -1309,6 +1403,7 @@ scratch_ram {
1309 BUSY_TARGETS { 1403 BUSY_TARGETS {
1310 alias TARG_SCSIRATE 1404 alias TARG_SCSIRATE
1311 size 16 1405 size 16
1406 dont_generate_debug_code
1312 } 1407 }
1313 /* 1408 /*
1314 * Bit vector of targets that have ULTRA enabled as set by 1409 * Bit vector of targets that have ULTRA enabled as set by
@@ -1321,6 +1416,7 @@ scratch_ram {
1321 alias CMDSIZE_TABLE 1416 alias CMDSIZE_TABLE
1322 size 2 1417 size 2
1323 count 2 1418 count 2
1419 dont_generate_debug_code
1324 } 1420 }
1325 /* 1421 /*
1326 * Bit vector of targets that have disconnection disabled as set by 1422 * Bit vector of targets that have disconnection disabled as set by
@@ -1331,6 +1427,7 @@ scratch_ram {
1331 DISC_DSB { 1427 DISC_DSB {
1332 size 2 1428 size 2
1333 count 6 1429 count 6
1430 dont_generate_debug_code
1334 } 1431 }
1335 CMDSIZE_TABLE_TAIL { 1432 CMDSIZE_TABLE_TAIL {
1336 size 4 1433 size 4
@@ -1341,12 +1438,14 @@ scratch_ram {
1341 */ 1438 */
1342 MWI_RESIDUAL { 1439 MWI_RESIDUAL {
1343 size 1 1440 size 1
1441 dont_generate_debug_code
1344 } 1442 }
1345 /* 1443 /*
1346 * SCBID of the next SCB to be started by the controller. 1444 * SCBID of the next SCB to be started by the controller.
1347 */ 1445 */
1348 NEXT_QUEUED_SCB { 1446 NEXT_QUEUED_SCB {
1349 size 1 1447 size 1
1448 dont_generate_debug_code
1350 } 1449 }
1351 /* 1450 /*
1352 * Single byte buffer used to designate the type or message 1451 * Single byte buffer used to designate the type or message
@@ -1354,6 +1453,7 @@ scratch_ram {
1354 */ 1453 */
1355 MSG_OUT { 1454 MSG_OUT {
1356 size 1 1455 size 1
1456 dont_generate_debug_code
1357 } 1457 }
1358 /* Parameters for DMA Logic */ 1458 /* Parameters for DMA Logic */
1359 DMAPARAMS { 1459 DMAPARAMS {
@@ -1369,6 +1469,7 @@ scratch_ram {
1369 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ 1469 field DIRECTION 0x04 /* Set indicates PCI->SCSI */
1370 field FIFOFLUSH 0x02 1470 field FIFOFLUSH 0x02
1371 field FIFORESET 0x01 1471 field FIFORESET 0x01
1472 dont_generate_debug_code
1372 } 1473 }
1373 SEQ_FLAGS { 1474 SEQ_FLAGS {
1374 size 1 1475 size 1
@@ -1390,9 +1491,11 @@ scratch_ram {
1390 */ 1491 */
1391 SAVED_SCSIID { 1492 SAVED_SCSIID {
1392 size 1 1493 size 1
1494 dont_generate_debug_code
1393 } 1495 }
1394 SAVED_LUN { 1496 SAVED_LUN {
1395 size 1 1497 size 1
1498 dont_generate_debug_code
1396 } 1499 }
1397 /* 1500 /*
1398 * The last bus phase as seen by the sequencer. 1501 * The last bus phase as seen by the sequencer.
@@ -1417,6 +1520,7 @@ scratch_ram {
1417 */ 1520 */
1418 WAITING_SCBH { 1521 WAITING_SCBH {
1419 size 1 1522 size 1
1523 dont_generate_debug_code
1420 } 1524 }
1421 /* 1525 /*
1422 * head of list of SCBs that are 1526 * head of list of SCBs that are
@@ -1425,6 +1529,7 @@ scratch_ram {
1425 */ 1529 */
1426 DISCONNECTED_SCBH { 1530 DISCONNECTED_SCBH {
1427 size 1 1531 size 1
1532 dont_generate_debug_code
1428 } 1533 }
1429 /* 1534 /*
1430 * head of list of SCBs that are 1535 * head of list of SCBs that are
@@ -1432,6 +1537,7 @@ scratch_ram {
1432 */ 1537 */
1433 FREE_SCBH { 1538 FREE_SCBH {
1434 size 1 1539 size 1
1540 dont_generate_debug_code
1435 } 1541 }
1436 /* 1542 /*
1437 * head of list of SCBs that have 1543 * head of list of SCBs that have
@@ -1446,6 +1552,7 @@ scratch_ram {
1446 */ 1552 */
1447 HSCB_ADDR { 1553 HSCB_ADDR {
1448 size 4 1554 size 4
1555 dont_generate_debug_code
1449 } 1556 }
1450 /* 1557 /*
1451 * Base address of our shared data with the kernel driver in host 1558 * Base address of our shared data with the kernel driver in host
@@ -1454,15 +1561,19 @@ scratch_ram {
1454 */ 1561 */
1455 SHARED_DATA_ADDR { 1562 SHARED_DATA_ADDR {
1456 size 4 1563 size 4
1564 dont_generate_debug_code
1457 } 1565 }
1458 KERNEL_QINPOS { 1566 KERNEL_QINPOS {
1459 size 1 1567 size 1
1568 dont_generate_debug_code
1460 } 1569 }
1461 QINPOS { 1570 QINPOS {
1462 size 1 1571 size 1
1572 dont_generate_debug_code
1463 } 1573 }
1464 QOUTPOS { 1574 QOUTPOS {
1465 size 1 1575 size 1
1576 dont_generate_debug_code
1466 } 1577 }
1467 /* 1578 /*
1468 * Kernel and sequencer offsets into the queue of 1579 * Kernel and sequencer offsets into the queue of
@@ -1471,9 +1582,11 @@ scratch_ram {
1471 */ 1582 */
1472 KERNEL_TQINPOS { 1583 KERNEL_TQINPOS {
1473 size 1 1584 size 1
1585 dont_generate_debug_code
1474 } 1586 }
1475 TQINPOS { 1587 TQINPOS {
1476 size 1 1588 size 1
1589 dont_generate_debug_code
1477 } 1590 }
1478 ARG_1 { 1591 ARG_1 {
1479 size 1 1592 size 1
@@ -1486,10 +1599,12 @@ scratch_ram {
1486 mask CONT_MSG_LOOP 0x04 1599 mask CONT_MSG_LOOP 0x04
1487 mask CONT_TARG_SESSION 0x02 1600 mask CONT_TARG_SESSION 0x02
1488 alias RETURN_1 1601 alias RETURN_1
1602 dont_generate_debug_code
1489 } 1603 }
1490 ARG_2 { 1604 ARG_2 {
1491 size 1 1605 size 1
1492 alias RETURN_2 1606 alias RETURN_2
1607 dont_generate_debug_code
1493 } 1608 }
1494 1609
1495 /* 1610 /*
@@ -1498,6 +1613,7 @@ scratch_ram {
1498 LAST_MSG { 1613 LAST_MSG {
1499 size 1 1614 size 1
1500 alias TARG_IMMEDIATE_SCB 1615 alias TARG_IMMEDIATE_SCB
1616 dont_generate_debug_code
1501 } 1617 }
1502 1618
1503 /* 1619 /*
@@ -1513,6 +1629,7 @@ scratch_ram {
1513 field ENAUTOATNO 0x08 1629 field ENAUTOATNO 0x08
1514 field ENAUTOATNI 0x04 1630 field ENAUTOATNI 0x04
1515 field ENAUTOATNP 0x02 1631 field ENAUTOATNP 0x02
1632 dont_generate_debug_code
1516 } 1633 }
1517} 1634}
1518 1635
@@ -1533,12 +1650,14 @@ scratch_ram {
1533 field HA_274_EXTENDED_TRANS 0x01 1650 field HA_274_EXTENDED_TRANS 0x01
1534 alias INITIATOR_TAG 1651 alias INITIATOR_TAG
1535 count 1 1652 count 1
1653 dont_generate_debug_code
1536 } 1654 }
1537 1655
1538 SEQ_FLAGS2 { 1656 SEQ_FLAGS2 {
1539 size 1 1657 size 1
1540 field SCB_DMA 0x01 1658 field SCB_DMA 0x01
1541 field TARGET_MSG_PENDING 0x02 1659 field TARGET_MSG_PENDING 0x02
1660 dont_generate_debug_code
1542 } 1661 }
1543} 1662}
1544 1663
@@ -1562,6 +1681,7 @@ scratch_ram {
1562 field ENSPCHK 0x20 1681 field ENSPCHK 0x20
1563 mask HSCSIID 0x07 /* our SCSI ID */ 1682 mask HSCSIID 0x07 /* our SCSI ID */
1564 mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ 1683 mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */
1684 dont_generate_debug_code
1565 } 1685 }
1566 INTDEF { 1686 INTDEF {
1567 address 0x05c 1687 address 0x05c
@@ -1569,11 +1689,13 @@ scratch_ram {
1569 count 1 1689 count 1
1570 field EDGE_TRIG 0x80 1690 field EDGE_TRIG 0x80
1571 mask VECTOR 0x0f 1691 mask VECTOR 0x0f
1692 dont_generate_debug_code
1572 } 1693 }
1573 HOSTCONF { 1694 HOSTCONF {
1574 address 0x05d 1695 address 0x05d
1575 size 1 1696 size 1
1576 count 1 1697 count 1
1698 dont_generate_debug_code
1577 } 1699 }
1578 HA_274_BIOSCTRL { 1700 HA_274_BIOSCTRL {
1579 address 0x05f 1701 address 0x05f
@@ -1582,6 +1704,7 @@ scratch_ram {
1582 mask BIOSMODE 0x30 1704 mask BIOSMODE 0x30
1583 mask BIOSDISABLED 0x30 1705 mask BIOSDISABLED 0x30
1584 field CHANNEL_B_PRIMARY 0x08 1706 field CHANNEL_B_PRIMARY 0x08
1707 dont_generate_debug_code
1585 } 1708 }
1586} 1709}
1587 1710
@@ -1595,6 +1718,7 @@ scratch_ram {
1595 TARG_OFFSET { 1718 TARG_OFFSET {
1596 size 16 1719 size 16
1597 count 1 1720 count 1
1721 dont_generate_debug_code
1598 } 1722 }
1599} 1723}
1600 1724
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 0ae2b4605d09..e6f2bb7365e6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -814,6 +814,7 @@ ahc_intr(struct ahc_softc *ahc)
814static void 814static void
815ahc_restart(struct ahc_softc *ahc) 815ahc_restart(struct ahc_softc *ahc)
816{ 816{
817 uint8_t sblkctl;
817 818
818 ahc_pause(ahc); 819 ahc_pause(ahc);
819 820
@@ -868,6 +869,12 @@ ahc_restart(struct ahc_softc *ahc)
868 ahc_outb(ahc, SEQADDR0, 0); 869 ahc_outb(ahc, SEQADDR0, 0);
869 ahc_outb(ahc, SEQADDR1, 0); 870 ahc_outb(ahc, SEQADDR1, 0);
870 871
872 /*
873 * Take the LED out of diagnostic mode on PM resume, too
874 */
875 sblkctl = ahc_inb(ahc, SBLKCTL);
876 ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
877
871 ahc_unpause(ahc); 878 ahc_unpause(ahc);
872} 879}
873 880
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index 2ce1febca207..e821082a4f47 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -27,20 +27,6 @@ ahc_reg_print_t ahc_sxfrctl0_print;
27#endif 27#endif
28 28
29#if AIC_DEBUG_REGISTERS 29#if AIC_DEBUG_REGISTERS
30ahc_reg_print_t ahc_sxfrctl1_print;
31#else
32#define ahc_sxfrctl1_print(regvalue, cur_col, wrap) \
33 ahc_print_register(NULL, 0, "SXFRCTL1", 0x02, regvalue, cur_col, wrap)
34#endif
35
36#if AIC_DEBUG_REGISTERS
37ahc_reg_print_t ahc_scsisigo_print;
38#else
39#define ahc_scsisigo_print(regvalue, cur_col, wrap) \
40 ahc_print_register(NULL, 0, "SCSISIGO", 0x03, regvalue, cur_col, wrap)
41#endif
42
43#if AIC_DEBUG_REGISTERS
44ahc_reg_print_t ahc_scsisigi_print; 30ahc_reg_print_t ahc_scsisigi_print;
45#else 31#else
46#define ahc_scsisigi_print(regvalue, cur_col, wrap) \ 32#define ahc_scsisigi_print(regvalue, cur_col, wrap) \
@@ -55,55 +41,6 @@ ahc_reg_print_t ahc_scsirate_print;
55#endif 41#endif
56 42
57#if AIC_DEBUG_REGISTERS 43#if AIC_DEBUG_REGISTERS
58ahc_reg_print_t ahc_scsiid_print;
59#else
60#define ahc_scsiid_print(regvalue, cur_col, wrap) \
61 ahc_print_register(NULL, 0, "SCSIID", 0x05, regvalue, cur_col, wrap)
62#endif
63
64#if AIC_DEBUG_REGISTERS
65ahc_reg_print_t ahc_scsidatl_print;
66#else
67#define ahc_scsidatl_print(regvalue, cur_col, wrap) \
68 ahc_print_register(NULL, 0, "SCSIDATL", 0x06, regvalue, cur_col, wrap)
69#endif
70
71#if AIC_DEBUG_REGISTERS
72ahc_reg_print_t ahc_scsidath_print;
73#else
74#define ahc_scsidath_print(regvalue, cur_col, wrap) \
75 ahc_print_register(NULL, 0, "SCSIDATH", 0x07, regvalue, cur_col, wrap)
76#endif
77
78#if AIC_DEBUG_REGISTERS
79ahc_reg_print_t ahc_stcnt_print;
80#else
81#define ahc_stcnt_print(regvalue, cur_col, wrap) \
82 ahc_print_register(NULL, 0, "STCNT", 0x08, regvalue, cur_col, wrap)
83#endif
84
85#if AIC_DEBUG_REGISTERS
86ahc_reg_print_t ahc_optionmode_print;
87#else
88#define ahc_optionmode_print(regvalue, cur_col, wrap) \
89 ahc_print_register(NULL, 0, "OPTIONMODE", 0x08, regvalue, cur_col, wrap)
90#endif
91
92#if AIC_DEBUG_REGISTERS
93ahc_reg_print_t ahc_targcrccnt_print;
94#else
95#define ahc_targcrccnt_print(regvalue, cur_col, wrap) \
96 ahc_print_register(NULL, 0, "TARGCRCCNT", 0x0a, regvalue, cur_col, wrap)
97#endif
98
99#if AIC_DEBUG_REGISTERS
100ahc_reg_print_t ahc_clrsint0_print;
101#else
102#define ahc_clrsint0_print(regvalue, cur_col, wrap) \
103 ahc_print_register(NULL, 0, "CLRSINT0", 0x0b, regvalue, cur_col, wrap)
104#endif
105
106#if AIC_DEBUG_REGISTERS
107ahc_reg_print_t ahc_sstat0_print; 44ahc_reg_print_t ahc_sstat0_print;
108#else 45#else
109#define ahc_sstat0_print(regvalue, cur_col, wrap) \ 46#define ahc_sstat0_print(regvalue, cur_col, wrap) \
@@ -111,13 +48,6 @@ ahc_reg_print_t ahc_sstat0_print;
111#endif 48#endif
112 49
113#if AIC_DEBUG_REGISTERS 50#if AIC_DEBUG_REGISTERS
114ahc_reg_print_t ahc_clrsint1_print;
115#else
116#define ahc_clrsint1_print(regvalue, cur_col, wrap) \
117 ahc_print_register(NULL, 0, "CLRSINT1", 0x0c, regvalue, cur_col, wrap)
118#endif
119
120#if AIC_DEBUG_REGISTERS
121ahc_reg_print_t ahc_sstat1_print; 51ahc_reg_print_t ahc_sstat1_print;
122#else 52#else
123#define ahc_sstat1_print(regvalue, cur_col, wrap) \ 53#define ahc_sstat1_print(regvalue, cur_col, wrap) \
@@ -139,13 +69,6 @@ ahc_reg_print_t ahc_sstat3_print;
139#endif 69#endif
140 70
141#if AIC_DEBUG_REGISTERS 71#if AIC_DEBUG_REGISTERS
142ahc_reg_print_t ahc_scsiid_ultra2_print;
143#else
144#define ahc_scsiid_ultra2_print(regvalue, cur_col, wrap) \
145 ahc_print_register(NULL, 0, "SCSIID_ULTRA2", 0x0f, regvalue, cur_col, wrap)
146#endif
147
148#if AIC_DEBUG_REGISTERS
149ahc_reg_print_t ahc_simode0_print; 72ahc_reg_print_t ahc_simode0_print;
150#else 73#else
151#define ahc_simode0_print(regvalue, cur_col, wrap) \ 74#define ahc_simode0_print(regvalue, cur_col, wrap) \
@@ -167,76 +90,6 @@ ahc_reg_print_t ahc_scsibusl_print;
167#endif 90#endif
168 91
169#if AIC_DEBUG_REGISTERS 92#if AIC_DEBUG_REGISTERS
170ahc_reg_print_t ahc_scsibush_print;
171#else
172#define ahc_scsibush_print(regvalue, cur_col, wrap) \
173 ahc_print_register(NULL, 0, "SCSIBUSH", 0x13, regvalue, cur_col, wrap)
174#endif
175
176#if AIC_DEBUG_REGISTERS
177ahc_reg_print_t ahc_sxfrctl2_print;
178#else
179#define ahc_sxfrctl2_print(regvalue, cur_col, wrap) \
180 ahc_print_register(NULL, 0, "SXFRCTL2", 0x13, regvalue, cur_col, wrap)
181#endif
182
183#if AIC_DEBUG_REGISTERS
184ahc_reg_print_t ahc_shaddr_print;
185#else
186#define ahc_shaddr_print(regvalue, cur_col, wrap) \
187 ahc_print_register(NULL, 0, "SHADDR", 0x14, regvalue, cur_col, wrap)
188#endif
189
190#if AIC_DEBUG_REGISTERS
191ahc_reg_print_t ahc_seltimer_print;
192#else
193#define ahc_seltimer_print(regvalue, cur_col, wrap) \
194 ahc_print_register(NULL, 0, "SELTIMER", 0x18, regvalue, cur_col, wrap)
195#endif
196
197#if AIC_DEBUG_REGISTERS
198ahc_reg_print_t ahc_selid_print;
199#else
200#define ahc_selid_print(regvalue, cur_col, wrap) \
201 ahc_print_register(NULL, 0, "SELID", 0x19, regvalue, cur_col, wrap)
202#endif
203
204#if AIC_DEBUG_REGISTERS
205ahc_reg_print_t ahc_scamctl_print;
206#else
207#define ahc_scamctl_print(regvalue, cur_col, wrap) \
208 ahc_print_register(NULL, 0, "SCAMCTL", 0x1a, regvalue, cur_col, wrap)
209#endif
210
211#if AIC_DEBUG_REGISTERS
212ahc_reg_print_t ahc_targid_print;
213#else
214#define ahc_targid_print(regvalue, cur_col, wrap) \
215 ahc_print_register(NULL, 0, "TARGID", 0x1b, regvalue, cur_col, wrap)
216#endif
217
218#if AIC_DEBUG_REGISTERS
219ahc_reg_print_t ahc_spiocap_print;
220#else
221#define ahc_spiocap_print(regvalue, cur_col, wrap) \
222 ahc_print_register(NULL, 0, "SPIOCAP", 0x1b, regvalue, cur_col, wrap)
223#endif
224
225#if AIC_DEBUG_REGISTERS
226ahc_reg_print_t ahc_brdctl_print;
227#else
228#define ahc_brdctl_print(regvalue, cur_col, wrap) \
229 ahc_print_register(NULL, 0, "BRDCTL", 0x1d, regvalue, cur_col, wrap)
230#endif
231
232#if AIC_DEBUG_REGISTERS
233ahc_reg_print_t ahc_seectl_print;
234#else
235#define ahc_seectl_print(regvalue, cur_col, wrap) \
236 ahc_print_register(NULL, 0, "SEECTL", 0x1e, regvalue, cur_col, wrap)
237#endif
238
239#if AIC_DEBUG_REGISTERS
240ahc_reg_print_t ahc_sblkctl_print; 93ahc_reg_print_t ahc_sblkctl_print;
241#else 94#else
242#define ahc_sblkctl_print(regvalue, cur_col, wrap) \ 95#define ahc_sblkctl_print(regvalue, cur_col, wrap) \
@@ -244,62 +97,6 @@ ahc_reg_print_t ahc_sblkctl_print;
244#endif 97#endif
245 98
246#if AIC_DEBUG_REGISTERS 99#if AIC_DEBUG_REGISTERS
247ahc_reg_print_t ahc_busy_targets_print;
248#else
249#define ahc_busy_targets_print(regvalue, cur_col, wrap) \
250 ahc_print_register(NULL, 0, "BUSY_TARGETS", 0x20, regvalue, cur_col, wrap)
251#endif
252
253#if AIC_DEBUG_REGISTERS
254ahc_reg_print_t ahc_ultra_enb_print;
255#else
256#define ahc_ultra_enb_print(regvalue, cur_col, wrap) \
257 ahc_print_register(NULL, 0, "ULTRA_ENB", 0x30, regvalue, cur_col, wrap)
258#endif
259
260#if AIC_DEBUG_REGISTERS
261ahc_reg_print_t ahc_disc_dsb_print;
262#else
263#define ahc_disc_dsb_print(regvalue, cur_col, wrap) \
264 ahc_print_register(NULL, 0, "DISC_DSB", 0x32, regvalue, cur_col, wrap)
265#endif
266
267#if AIC_DEBUG_REGISTERS
268ahc_reg_print_t ahc_cmdsize_table_tail_print;
269#else
270#define ahc_cmdsize_table_tail_print(regvalue, cur_col, wrap) \
271 ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL", 0x34, regvalue, cur_col, wrap)
272#endif
273
274#if AIC_DEBUG_REGISTERS
275ahc_reg_print_t ahc_mwi_residual_print;
276#else
277#define ahc_mwi_residual_print(regvalue, cur_col, wrap) \
278 ahc_print_register(NULL, 0, "MWI_RESIDUAL", 0x38, regvalue, cur_col, wrap)
279#endif
280
281#if AIC_DEBUG_REGISTERS
282ahc_reg_print_t ahc_next_queued_scb_print;
283#else
284#define ahc_next_queued_scb_print(regvalue, cur_col, wrap) \
285 ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB", 0x39, regvalue, cur_col, wrap)
286#endif
287
288#if AIC_DEBUG_REGISTERS
289ahc_reg_print_t ahc_msg_out_print;
290#else
291#define ahc_msg_out_print(regvalue, cur_col, wrap) \
292 ahc_print_register(NULL, 0, "MSG_OUT", 0x3a, regvalue, cur_col, wrap)
293#endif
294
295#if AIC_DEBUG_REGISTERS
296ahc_reg_print_t ahc_dmaparams_print;
297#else
298#define ahc_dmaparams_print(regvalue, cur_col, wrap) \
299 ahc_print_register(NULL, 0, "DMAPARAMS", 0x3b, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahc_reg_print_t ahc_seq_flags_print; 100ahc_reg_print_t ahc_seq_flags_print;
304#else 101#else
305#define ahc_seq_flags_print(regvalue, cur_col, wrap) \ 102#define ahc_seq_flags_print(regvalue, cur_col, wrap) \
@@ -307,20 +104,6 @@ ahc_reg_print_t ahc_seq_flags_print;
307#endif 104#endif
308 105
309#if AIC_DEBUG_REGISTERS 106#if AIC_DEBUG_REGISTERS
310ahc_reg_print_t ahc_saved_scsiid_print;
311#else
312#define ahc_saved_scsiid_print(regvalue, cur_col, wrap) \
313 ahc_print_register(NULL, 0, "SAVED_SCSIID", 0x3d, regvalue, cur_col, wrap)
314#endif
315
316#if AIC_DEBUG_REGISTERS
317ahc_reg_print_t ahc_saved_lun_print;
318#else
319#define ahc_saved_lun_print(regvalue, cur_col, wrap) \
320 ahc_print_register(NULL, 0, "SAVED_LUN", 0x3e, regvalue, cur_col, wrap)
321#endif
322
323#if AIC_DEBUG_REGISTERS
324ahc_reg_print_t ahc_lastphase_print; 107ahc_reg_print_t ahc_lastphase_print;
325#else 108#else
326#define ahc_lastphase_print(regvalue, cur_col, wrap) \ 109#define ahc_lastphase_print(regvalue, cur_col, wrap) \
@@ -328,153 +111,6 @@ ahc_reg_print_t ahc_lastphase_print;
328#endif 111#endif
329 112
330#if AIC_DEBUG_REGISTERS 113#if AIC_DEBUG_REGISTERS
331ahc_reg_print_t ahc_waiting_scbh_print;
332#else
333#define ahc_waiting_scbh_print(regvalue, cur_col, wrap) \
334 ahc_print_register(NULL, 0, "WAITING_SCBH", 0x40, regvalue, cur_col, wrap)
335#endif
336
337#if AIC_DEBUG_REGISTERS
338ahc_reg_print_t ahc_disconnected_scbh_print;
339#else
340#define ahc_disconnected_scbh_print(regvalue, cur_col, wrap) \
341 ahc_print_register(NULL, 0, "DISCONNECTED_SCBH", 0x41, regvalue, cur_col, wrap)
342#endif
343
344#if AIC_DEBUG_REGISTERS
345ahc_reg_print_t ahc_free_scbh_print;
346#else
347#define ahc_free_scbh_print(regvalue, cur_col, wrap) \
348 ahc_print_register(NULL, 0, "FREE_SCBH", 0x42, regvalue, cur_col, wrap)
349#endif
350
351#if AIC_DEBUG_REGISTERS
352ahc_reg_print_t ahc_complete_scbh_print;
353#else
354#define ahc_complete_scbh_print(regvalue, cur_col, wrap) \
355 ahc_print_register(NULL, 0, "COMPLETE_SCBH", 0x43, regvalue, cur_col, wrap)
356#endif
357
358#if AIC_DEBUG_REGISTERS
359ahc_reg_print_t ahc_hscb_addr_print;
360#else
361#define ahc_hscb_addr_print(regvalue, cur_col, wrap) \
362 ahc_print_register(NULL, 0, "HSCB_ADDR", 0x44, regvalue, cur_col, wrap)
363#endif
364
365#if AIC_DEBUG_REGISTERS
366ahc_reg_print_t ahc_shared_data_addr_print;
367#else
368#define ahc_shared_data_addr_print(regvalue, cur_col, wrap) \
369 ahc_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x48, regvalue, cur_col, wrap)
370#endif
371
372#if AIC_DEBUG_REGISTERS
373ahc_reg_print_t ahc_kernel_qinpos_print;
374#else
375#define ahc_kernel_qinpos_print(regvalue, cur_col, wrap) \
376 ahc_print_register(NULL, 0, "KERNEL_QINPOS", 0x4c, regvalue, cur_col, wrap)
377#endif
378
379#if AIC_DEBUG_REGISTERS
380ahc_reg_print_t ahc_qinpos_print;
381#else
382#define ahc_qinpos_print(regvalue, cur_col, wrap) \
383 ahc_print_register(NULL, 0, "QINPOS", 0x4d, regvalue, cur_col, wrap)
384#endif
385
386#if AIC_DEBUG_REGISTERS
387ahc_reg_print_t ahc_qoutpos_print;
388#else
389#define ahc_qoutpos_print(regvalue, cur_col, wrap) \
390 ahc_print_register(NULL, 0, "QOUTPOS", 0x4e, regvalue, cur_col, wrap)
391#endif
392
393#if AIC_DEBUG_REGISTERS
394ahc_reg_print_t ahc_kernel_tqinpos_print;
395#else
396#define ahc_kernel_tqinpos_print(regvalue, cur_col, wrap) \
397 ahc_print_register(NULL, 0, "KERNEL_TQINPOS", 0x4f, regvalue, cur_col, wrap)
398#endif
399
400#if AIC_DEBUG_REGISTERS
401ahc_reg_print_t ahc_tqinpos_print;
402#else
403#define ahc_tqinpos_print(regvalue, cur_col, wrap) \
404 ahc_print_register(NULL, 0, "TQINPOS", 0x50, regvalue, cur_col, wrap)
405#endif
406
407#if AIC_DEBUG_REGISTERS
408ahc_reg_print_t ahc_arg_1_print;
409#else
410#define ahc_arg_1_print(regvalue, cur_col, wrap) \
411 ahc_print_register(NULL, 0, "ARG_1", 0x51, regvalue, cur_col, wrap)
412#endif
413
414#if AIC_DEBUG_REGISTERS
415ahc_reg_print_t ahc_arg_2_print;
416#else
417#define ahc_arg_2_print(regvalue, cur_col, wrap) \
418 ahc_print_register(NULL, 0, "ARG_2", 0x52, regvalue, cur_col, wrap)
419#endif
420
421#if AIC_DEBUG_REGISTERS
422ahc_reg_print_t ahc_last_msg_print;
423#else
424#define ahc_last_msg_print(regvalue, cur_col, wrap) \
425 ahc_print_register(NULL, 0, "LAST_MSG", 0x53, regvalue, cur_col, wrap)
426#endif
427
428#if AIC_DEBUG_REGISTERS
429ahc_reg_print_t ahc_scsiseq_template_print;
430#else
431#define ahc_scsiseq_template_print(regvalue, cur_col, wrap) \
432 ahc_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x54, regvalue, cur_col, wrap)
433#endif
434
435#if AIC_DEBUG_REGISTERS
436ahc_reg_print_t ahc_ha_274_biosglobal_print;
437#else
438#define ahc_ha_274_biosglobal_print(regvalue, cur_col, wrap) \
439 ahc_print_register(NULL, 0, "HA_274_BIOSGLOBAL", 0x56, regvalue, cur_col, wrap)
440#endif
441
442#if AIC_DEBUG_REGISTERS
443ahc_reg_print_t ahc_seq_flags2_print;
444#else
445#define ahc_seq_flags2_print(regvalue, cur_col, wrap) \
446 ahc_print_register(NULL, 0, "SEQ_FLAGS2", 0x57, regvalue, cur_col, wrap)
447#endif
448
449#if AIC_DEBUG_REGISTERS
450ahc_reg_print_t ahc_scsiconf_print;
451#else
452#define ahc_scsiconf_print(regvalue, cur_col, wrap) \
453 ahc_print_register(NULL, 0, "SCSICONF", 0x5a, regvalue, cur_col, wrap)
454#endif
455
456#if AIC_DEBUG_REGISTERS
457ahc_reg_print_t ahc_intdef_print;
458#else
459#define ahc_intdef_print(regvalue, cur_col, wrap) \
460 ahc_print_register(NULL, 0, "INTDEF", 0x5c, regvalue, cur_col, wrap)
461#endif
462
463#if AIC_DEBUG_REGISTERS
464ahc_reg_print_t ahc_hostconf_print;
465#else
466#define ahc_hostconf_print(regvalue, cur_col, wrap) \
467 ahc_print_register(NULL, 0, "HOSTCONF", 0x5d, regvalue, cur_col, wrap)
468#endif
469
470#if AIC_DEBUG_REGISTERS
471ahc_reg_print_t ahc_ha_274_biosctrl_print;
472#else
473#define ahc_ha_274_biosctrl_print(regvalue, cur_col, wrap) \
474 ahc_print_register(NULL, 0, "HA_274_BIOSCTRL", 0x5f, regvalue, cur_col, wrap)
475#endif
476
477#if AIC_DEBUG_REGISTERS
478ahc_reg_print_t ahc_seqctl_print; 114ahc_reg_print_t ahc_seqctl_print;
479#else 115#else
480#define ahc_seqctl_print(regvalue, cur_col, wrap) \ 116#define ahc_seqctl_print(regvalue, cur_col, wrap) \
@@ -482,111 +118,6 @@ ahc_reg_print_t ahc_seqctl_print;
482#endif 118#endif
483 119
484#if AIC_DEBUG_REGISTERS 120#if AIC_DEBUG_REGISTERS
485ahc_reg_print_t ahc_seqram_print;
486#else
487#define ahc_seqram_print(regvalue, cur_col, wrap) \
488 ahc_print_register(NULL, 0, "SEQRAM", 0x61, regvalue, cur_col, wrap)
489#endif
490
491#if AIC_DEBUG_REGISTERS
492ahc_reg_print_t ahc_seqaddr0_print;
493#else
494#define ahc_seqaddr0_print(regvalue, cur_col, wrap) \
495 ahc_print_register(NULL, 0, "SEQADDR0", 0x62, regvalue, cur_col, wrap)
496#endif
497
498#if AIC_DEBUG_REGISTERS
499ahc_reg_print_t ahc_seqaddr1_print;
500#else
501#define ahc_seqaddr1_print(regvalue, cur_col, wrap) \
502 ahc_print_register(NULL, 0, "SEQADDR1", 0x63, regvalue, cur_col, wrap)
503#endif
504
505#if AIC_DEBUG_REGISTERS
506ahc_reg_print_t ahc_accum_print;
507#else
508#define ahc_accum_print(regvalue, cur_col, wrap) \
509 ahc_print_register(NULL, 0, "ACCUM", 0x64, regvalue, cur_col, wrap)
510#endif
511
512#if AIC_DEBUG_REGISTERS
513ahc_reg_print_t ahc_sindex_print;
514#else
515#define ahc_sindex_print(regvalue, cur_col, wrap) \
516 ahc_print_register(NULL, 0, "SINDEX", 0x65, regvalue, cur_col, wrap)
517#endif
518
519#if AIC_DEBUG_REGISTERS
520ahc_reg_print_t ahc_dindex_print;
521#else
522#define ahc_dindex_print(regvalue, cur_col, wrap) \
523 ahc_print_register(NULL, 0, "DINDEX", 0x66, regvalue, cur_col, wrap)
524#endif
525
526#if AIC_DEBUG_REGISTERS
527ahc_reg_print_t ahc_allones_print;
528#else
529#define ahc_allones_print(regvalue, cur_col, wrap) \
530 ahc_print_register(NULL, 0, "ALLONES", 0x69, regvalue, cur_col, wrap)
531#endif
532
533#if AIC_DEBUG_REGISTERS
534ahc_reg_print_t ahc_allzeros_print;
535#else
536#define ahc_allzeros_print(regvalue, cur_col, wrap) \
537 ahc_print_register(NULL, 0, "ALLZEROS", 0x6a, regvalue, cur_col, wrap)
538#endif
539
540#if AIC_DEBUG_REGISTERS
541ahc_reg_print_t ahc_none_print;
542#else
543#define ahc_none_print(regvalue, cur_col, wrap) \
544 ahc_print_register(NULL, 0, "NONE", 0x6a, regvalue, cur_col, wrap)
545#endif
546
547#if AIC_DEBUG_REGISTERS
548ahc_reg_print_t ahc_flags_print;
549#else
550#define ahc_flags_print(regvalue, cur_col, wrap) \
551 ahc_print_register(NULL, 0, "FLAGS", 0x6b, regvalue, cur_col, wrap)
552#endif
553
554#if AIC_DEBUG_REGISTERS
555ahc_reg_print_t ahc_sindir_print;
556#else
557#define ahc_sindir_print(regvalue, cur_col, wrap) \
558 ahc_print_register(NULL, 0, "SINDIR", 0x6c, regvalue, cur_col, wrap)
559#endif
560
561#if AIC_DEBUG_REGISTERS
562ahc_reg_print_t ahc_dindir_print;
563#else
564#define ahc_dindir_print(regvalue, cur_col, wrap) \
565 ahc_print_register(NULL, 0, "DINDIR", 0x6d, regvalue, cur_col, wrap)
566#endif
567
568#if AIC_DEBUG_REGISTERS
569ahc_reg_print_t ahc_function1_print;
570#else
571#define ahc_function1_print(regvalue, cur_col, wrap) \
572 ahc_print_register(NULL, 0, "FUNCTION1", 0x6e, regvalue, cur_col, wrap)
573#endif
574
575#if AIC_DEBUG_REGISTERS
576ahc_reg_print_t ahc_stack_print;
577#else
578#define ahc_stack_print(regvalue, cur_col, wrap) \
579 ahc_print_register(NULL, 0, "STACK", 0x6f, regvalue, cur_col, wrap)
580#endif
581
582#if AIC_DEBUG_REGISTERS
583ahc_reg_print_t ahc_targ_offset_print;
584#else
585#define ahc_targ_offset_print(regvalue, cur_col, wrap) \
586 ahc_print_register(NULL, 0, "TARG_OFFSET", 0x70, regvalue, cur_col, wrap)
587#endif
588
589#if AIC_DEBUG_REGISTERS
590ahc_reg_print_t ahc_sram_base_print; 121ahc_reg_print_t ahc_sram_base_print;
591#else 122#else
592#define ahc_sram_base_print(regvalue, cur_col, wrap) \ 123#define ahc_sram_base_print(regvalue, cur_col, wrap) \
@@ -594,97 +125,6 @@ ahc_reg_print_t ahc_sram_base_print;
594#endif 125#endif
595 126
596#if AIC_DEBUG_REGISTERS 127#if AIC_DEBUG_REGISTERS
597ahc_reg_print_t ahc_bctl_print;
598#else
599#define ahc_bctl_print(regvalue, cur_col, wrap) \
600 ahc_print_register(NULL, 0, "BCTL", 0x84, regvalue, cur_col, wrap)
601#endif
602
603#if AIC_DEBUG_REGISTERS
604ahc_reg_print_t ahc_dscommand0_print;
605#else
606#define ahc_dscommand0_print(regvalue, cur_col, wrap) \
607 ahc_print_register(NULL, 0, "DSCOMMAND0", 0x84, regvalue, cur_col, wrap)
608#endif
609
610#if AIC_DEBUG_REGISTERS
611ahc_reg_print_t ahc_bustime_print;
612#else
613#define ahc_bustime_print(regvalue, cur_col, wrap) \
614 ahc_print_register(NULL, 0, "BUSTIME", 0x85, regvalue, cur_col, wrap)
615#endif
616
617#if AIC_DEBUG_REGISTERS
618ahc_reg_print_t ahc_dscommand1_print;
619#else
620#define ahc_dscommand1_print(regvalue, cur_col, wrap) \
621 ahc_print_register(NULL, 0, "DSCOMMAND1", 0x85, regvalue, cur_col, wrap)
622#endif
623
624#if AIC_DEBUG_REGISTERS
625ahc_reg_print_t ahc_busspd_print;
626#else
627#define ahc_busspd_print(regvalue, cur_col, wrap) \
628 ahc_print_register(NULL, 0, "BUSSPD", 0x86, regvalue, cur_col, wrap)
629#endif
630
631#if AIC_DEBUG_REGISTERS
632ahc_reg_print_t ahc_hs_mailbox_print;
633#else
634#define ahc_hs_mailbox_print(regvalue, cur_col, wrap) \
635 ahc_print_register(NULL, 0, "HS_MAILBOX", 0x86, regvalue, cur_col, wrap)
636#endif
637
638#if AIC_DEBUG_REGISTERS
639ahc_reg_print_t ahc_dspcistatus_print;
640#else
641#define ahc_dspcistatus_print(regvalue, cur_col, wrap) \
642 ahc_print_register(NULL, 0, "DSPCISTATUS", 0x86, regvalue, cur_col, wrap)
643#endif
644
645#if AIC_DEBUG_REGISTERS
646ahc_reg_print_t ahc_hcntrl_print;
647#else
648#define ahc_hcntrl_print(regvalue, cur_col, wrap) \
649 ahc_print_register(NULL, 0, "HCNTRL", 0x87, regvalue, cur_col, wrap)
650#endif
651
652#if AIC_DEBUG_REGISTERS
653ahc_reg_print_t ahc_haddr_print;
654#else
655#define ahc_haddr_print(regvalue, cur_col, wrap) \
656 ahc_print_register(NULL, 0, "HADDR", 0x88, regvalue, cur_col, wrap)
657#endif
658
659#if AIC_DEBUG_REGISTERS
660ahc_reg_print_t ahc_hcnt_print;
661#else
662#define ahc_hcnt_print(regvalue, cur_col, wrap) \
663 ahc_print_register(NULL, 0, "HCNT", 0x8c, regvalue, cur_col, wrap)
664#endif
665
666#if AIC_DEBUG_REGISTERS
667ahc_reg_print_t ahc_scbptr_print;
668#else
669#define ahc_scbptr_print(regvalue, cur_col, wrap) \
670 ahc_print_register(NULL, 0, "SCBPTR", 0x90, regvalue, cur_col, wrap)
671#endif
672
673#if AIC_DEBUG_REGISTERS
674ahc_reg_print_t ahc_intstat_print;
675#else
676#define ahc_intstat_print(regvalue, cur_col, wrap) \
677 ahc_print_register(NULL, 0, "INTSTAT", 0x91, regvalue, cur_col, wrap)
678#endif
679
680#if AIC_DEBUG_REGISTERS
681ahc_reg_print_t ahc_clrint_print;
682#else
683#define ahc_clrint_print(regvalue, cur_col, wrap) \
684 ahc_print_register(NULL, 0, "CLRINT", 0x92, regvalue, cur_col, wrap)
685#endif
686
687#if AIC_DEBUG_REGISTERS
688ahc_reg_print_t ahc_error_print; 128ahc_reg_print_t ahc_error_print;
689#else 129#else
690#define ahc_error_print(regvalue, cur_col, wrap) \ 130#define ahc_error_print(regvalue, cur_col, wrap) \
@@ -706,69 +146,6 @@ ahc_reg_print_t ahc_dfstatus_print;
706#endif 146#endif
707 147
708#if AIC_DEBUG_REGISTERS 148#if AIC_DEBUG_REGISTERS
709ahc_reg_print_t ahc_dfwaddr_print;
710#else
711#define ahc_dfwaddr_print(regvalue, cur_col, wrap) \
712 ahc_print_register(NULL, 0, "DFWADDR", 0x95, regvalue, cur_col, wrap)
713#endif
714
715#if AIC_DEBUG_REGISTERS
716ahc_reg_print_t ahc_dfraddr_print;
717#else
718#define ahc_dfraddr_print(regvalue, cur_col, wrap) \
719 ahc_print_register(NULL, 0, "DFRADDR", 0x97, regvalue, cur_col, wrap)
720#endif
721
722#if AIC_DEBUG_REGISTERS
723ahc_reg_print_t ahc_dfdat_print;
724#else
725#define ahc_dfdat_print(regvalue, cur_col, wrap) \
726 ahc_print_register(NULL, 0, "DFDAT", 0x99, regvalue, cur_col, wrap)
727#endif
728
729#if AIC_DEBUG_REGISTERS
730ahc_reg_print_t ahc_scbcnt_print;
731#else
732#define ahc_scbcnt_print(regvalue, cur_col, wrap) \
733 ahc_print_register(NULL, 0, "SCBCNT", 0x9a, regvalue, cur_col, wrap)
734#endif
735
736#if AIC_DEBUG_REGISTERS
737ahc_reg_print_t ahc_qinfifo_print;
738#else
739#define ahc_qinfifo_print(regvalue, cur_col, wrap) \
740 ahc_print_register(NULL, 0, "QINFIFO", 0x9b, regvalue, cur_col, wrap)
741#endif
742
743#if AIC_DEBUG_REGISTERS
744ahc_reg_print_t ahc_qincnt_print;
745#else
746#define ahc_qincnt_print(regvalue, cur_col, wrap) \
747 ahc_print_register(NULL, 0, "QINCNT", 0x9c, regvalue, cur_col, wrap)
748#endif
749
750#if AIC_DEBUG_REGISTERS
751ahc_reg_print_t ahc_qoutfifo_print;
752#else
753#define ahc_qoutfifo_print(regvalue, cur_col, wrap) \
754 ahc_print_register(NULL, 0, "QOUTFIFO", 0x9d, regvalue, cur_col, wrap)
755#endif
756
757#if AIC_DEBUG_REGISTERS
758ahc_reg_print_t ahc_crccontrol1_print;
759#else
760#define ahc_crccontrol1_print(regvalue, cur_col, wrap) \
761 ahc_print_register(NULL, 0, "CRCCONTROL1", 0x9d, regvalue, cur_col, wrap)
762#endif
763
764#if AIC_DEBUG_REGISTERS
765ahc_reg_print_t ahc_qoutcnt_print;
766#else
767#define ahc_qoutcnt_print(regvalue, cur_col, wrap) \
768 ahc_print_register(NULL, 0, "QOUTCNT", 0x9e, regvalue, cur_col, wrap)
769#endif
770
771#if AIC_DEBUG_REGISTERS
772ahc_reg_print_t ahc_scsiphase_print; 149ahc_reg_print_t ahc_scsiphase_print;
773#else 150#else
774#define ahc_scsiphase_print(regvalue, cur_col, wrap) \ 151#define ahc_scsiphase_print(regvalue, cur_col, wrap) \
@@ -776,13 +153,6 @@ ahc_reg_print_t ahc_scsiphase_print;
776#endif 153#endif
777 154
778#if AIC_DEBUG_REGISTERS 155#if AIC_DEBUG_REGISTERS
779ahc_reg_print_t ahc_sfunct_print;
780#else
781#define ahc_sfunct_print(regvalue, cur_col, wrap) \
782 ahc_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
783#endif
784
785#if AIC_DEBUG_REGISTERS
786ahc_reg_print_t ahc_scb_base_print; 156ahc_reg_print_t ahc_scb_base_print;
787#else 157#else
788#define ahc_scb_base_print(regvalue, cur_col, wrap) \ 158#define ahc_scb_base_print(regvalue, cur_col, wrap) \
@@ -790,69 +160,6 @@ ahc_reg_print_t ahc_scb_base_print;
790#endif 160#endif
791 161
792#if AIC_DEBUG_REGISTERS 162#if AIC_DEBUG_REGISTERS
793ahc_reg_print_t ahc_scb_cdb_ptr_print;
794#else
795#define ahc_scb_cdb_ptr_print(regvalue, cur_col, wrap) \
796 ahc_print_register(NULL, 0, "SCB_CDB_PTR", 0xa0, regvalue, cur_col, wrap)
797#endif
798
799#if AIC_DEBUG_REGISTERS
800ahc_reg_print_t ahc_scb_residual_sgptr_print;
801#else
802#define ahc_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
803 ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0xa4, regvalue, cur_col, wrap)
804#endif
805
806#if AIC_DEBUG_REGISTERS
807ahc_reg_print_t ahc_scb_scsi_status_print;
808#else
809#define ahc_scb_scsi_status_print(regvalue, cur_col, wrap) \
810 ahc_print_register(NULL, 0, "SCB_SCSI_STATUS", 0xa8, regvalue, cur_col, wrap)
811#endif
812
813#if AIC_DEBUG_REGISTERS
814ahc_reg_print_t ahc_scb_target_phases_print;
815#else
816#define ahc_scb_target_phases_print(regvalue, cur_col, wrap) \
817 ahc_print_register(NULL, 0, "SCB_TARGET_PHASES", 0xa9, regvalue, cur_col, wrap)
818#endif
819
820#if AIC_DEBUG_REGISTERS
821ahc_reg_print_t ahc_scb_target_data_dir_print;
822#else
823#define ahc_scb_target_data_dir_print(regvalue, cur_col, wrap) \
824 ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0xaa, regvalue, cur_col, wrap)
825#endif
826
827#if AIC_DEBUG_REGISTERS
828ahc_reg_print_t ahc_scb_target_itag_print;
829#else
830#define ahc_scb_target_itag_print(regvalue, cur_col, wrap) \
831 ahc_print_register(NULL, 0, "SCB_TARGET_ITAG", 0xab, regvalue, cur_col, wrap)
832#endif
833
834#if AIC_DEBUG_REGISTERS
835ahc_reg_print_t ahc_scb_dataptr_print;
836#else
837#define ahc_scb_dataptr_print(regvalue, cur_col, wrap) \
838 ahc_print_register(NULL, 0, "SCB_DATAPTR", 0xac, regvalue, cur_col, wrap)
839#endif
840
841#if AIC_DEBUG_REGISTERS
842ahc_reg_print_t ahc_scb_datacnt_print;
843#else
844#define ahc_scb_datacnt_print(regvalue, cur_col, wrap) \
845 ahc_print_register(NULL, 0, "SCB_DATACNT", 0xb0, regvalue, cur_col, wrap)
846#endif
847
848#if AIC_DEBUG_REGISTERS
849ahc_reg_print_t ahc_scb_sgptr_print;
850#else
851#define ahc_scb_sgptr_print(regvalue, cur_col, wrap) \
852 ahc_print_register(NULL, 0, "SCB_SGPTR", 0xb4, regvalue, cur_col, wrap)
853#endif
854
855#if AIC_DEBUG_REGISTERS
856ahc_reg_print_t ahc_scb_control_print; 163ahc_reg_print_t ahc_scb_control_print;
857#else 164#else
858#define ahc_scb_control_print(regvalue, cur_col, wrap) \ 165#define ahc_scb_control_print(regvalue, cur_col, wrap) \
@@ -880,188 +187,6 @@ ahc_reg_print_t ahc_scb_tag_print;
880 ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap) 187 ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap)
881#endif 188#endif
882 189
883#if AIC_DEBUG_REGISTERS
884ahc_reg_print_t ahc_scb_cdb_len_print;
885#else
886#define ahc_scb_cdb_len_print(regvalue, cur_col, wrap) \
887 ahc_print_register(NULL, 0, "SCB_CDB_LEN", 0xbc, regvalue, cur_col, wrap)
888#endif
889
890#if AIC_DEBUG_REGISTERS
891ahc_reg_print_t ahc_scb_scsirate_print;
892#else
893#define ahc_scb_scsirate_print(regvalue, cur_col, wrap) \
894 ahc_print_register(NULL, 0, "SCB_SCSIRATE", 0xbd, regvalue, cur_col, wrap)
895#endif
896
897#if AIC_DEBUG_REGISTERS
898ahc_reg_print_t ahc_scb_scsioffset_print;
899#else
900#define ahc_scb_scsioffset_print(regvalue, cur_col, wrap) \
901 ahc_print_register(NULL, 0, "SCB_SCSIOFFSET", 0xbe, regvalue, cur_col, wrap)
902#endif
903
904#if AIC_DEBUG_REGISTERS
905ahc_reg_print_t ahc_scb_next_print;
906#else
907#define ahc_scb_next_print(regvalue, cur_col, wrap) \
908 ahc_print_register(NULL, 0, "SCB_NEXT", 0xbf, regvalue, cur_col, wrap)
909#endif
910
911#if AIC_DEBUG_REGISTERS
912ahc_reg_print_t ahc_scb_64_spare_print;
913#else
914#define ahc_scb_64_spare_print(regvalue, cur_col, wrap) \
915 ahc_print_register(NULL, 0, "SCB_64_SPARE", 0xc0, regvalue, cur_col, wrap)
916#endif
917
918#if AIC_DEBUG_REGISTERS
919ahc_reg_print_t ahc_seectl_2840_print;
920#else
921#define ahc_seectl_2840_print(regvalue, cur_col, wrap) \
922 ahc_print_register(NULL, 0, "SEECTL_2840", 0xc0, regvalue, cur_col, wrap)
923#endif
924
925#if AIC_DEBUG_REGISTERS
926ahc_reg_print_t ahc_status_2840_print;
927#else
928#define ahc_status_2840_print(regvalue, cur_col, wrap) \
929 ahc_print_register(NULL, 0, "STATUS_2840", 0xc1, regvalue, cur_col, wrap)
930#endif
931
932#if AIC_DEBUG_REGISTERS
933ahc_reg_print_t ahc_scb_64_btt_print;
934#else
935#define ahc_scb_64_btt_print(regvalue, cur_col, wrap) \
936 ahc_print_register(NULL, 0, "SCB_64_BTT", 0xd0, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahc_reg_print_t ahc_cchaddr_print;
941#else
942#define ahc_cchaddr_print(regvalue, cur_col, wrap) \
943 ahc_print_register(NULL, 0, "CCHADDR", 0xe0, regvalue, cur_col, wrap)
944#endif
945
946#if AIC_DEBUG_REGISTERS
947ahc_reg_print_t ahc_cchcnt_print;
948#else
949#define ahc_cchcnt_print(regvalue, cur_col, wrap) \
950 ahc_print_register(NULL, 0, "CCHCNT", 0xe8, regvalue, cur_col, wrap)
951#endif
952
953#if AIC_DEBUG_REGISTERS
954ahc_reg_print_t ahc_ccsgram_print;
955#else
956#define ahc_ccsgram_print(regvalue, cur_col, wrap) \
957 ahc_print_register(NULL, 0, "CCSGRAM", 0xe9, regvalue, cur_col, wrap)
958#endif
959
960#if AIC_DEBUG_REGISTERS
961ahc_reg_print_t ahc_ccsgaddr_print;
962#else
963#define ahc_ccsgaddr_print(regvalue, cur_col, wrap) \
964 ahc_print_register(NULL, 0, "CCSGADDR", 0xea, regvalue, cur_col, wrap)
965#endif
966
967#if AIC_DEBUG_REGISTERS
968ahc_reg_print_t ahc_ccsgctl_print;
969#else
970#define ahc_ccsgctl_print(regvalue, cur_col, wrap) \
971 ahc_print_register(NULL, 0, "CCSGCTL", 0xeb, regvalue, cur_col, wrap)
972#endif
973
974#if AIC_DEBUG_REGISTERS
975ahc_reg_print_t ahc_ccscbram_print;
976#else
977#define ahc_ccscbram_print(regvalue, cur_col, wrap) \
978 ahc_print_register(NULL, 0, "CCSCBRAM", 0xec, regvalue, cur_col, wrap)
979#endif
980
981#if AIC_DEBUG_REGISTERS
982ahc_reg_print_t ahc_ccscbaddr_print;
983#else
984#define ahc_ccscbaddr_print(regvalue, cur_col, wrap) \
985 ahc_print_register(NULL, 0, "CCSCBADDR", 0xed, regvalue, cur_col, wrap)
986#endif
987
988#if AIC_DEBUG_REGISTERS
989ahc_reg_print_t ahc_ccscbctl_print;
990#else
991#define ahc_ccscbctl_print(regvalue, cur_col, wrap) \
992 ahc_print_register(NULL, 0, "CCSCBCTL", 0xee, regvalue, cur_col, wrap)
993#endif
994
995#if AIC_DEBUG_REGISTERS
996ahc_reg_print_t ahc_ccscbcnt_print;
997#else
998#define ahc_ccscbcnt_print(regvalue, cur_col, wrap) \
999 ahc_print_register(NULL, 0, "CCSCBCNT", 0xef, regvalue, cur_col, wrap)
1000#endif
1001
1002#if AIC_DEBUG_REGISTERS
1003ahc_reg_print_t ahc_scbbaddr_print;
1004#else
1005#define ahc_scbbaddr_print(regvalue, cur_col, wrap) \
1006 ahc_print_register(NULL, 0, "SCBBADDR", 0xf0, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahc_reg_print_t ahc_ccscbptr_print;
1011#else
1012#define ahc_ccscbptr_print(regvalue, cur_col, wrap) \
1013 ahc_print_register(NULL, 0, "CCSCBPTR", 0xf1, regvalue, cur_col, wrap)
1014#endif
1015
1016#if AIC_DEBUG_REGISTERS
1017ahc_reg_print_t ahc_hnscb_qoff_print;
1018#else
1019#define ahc_hnscb_qoff_print(regvalue, cur_col, wrap) \
1020 ahc_print_register(NULL, 0, "HNSCB_QOFF", 0xf4, regvalue, cur_col, wrap)
1021#endif
1022
1023#if AIC_DEBUG_REGISTERS
1024ahc_reg_print_t ahc_snscb_qoff_print;
1025#else
1026#define ahc_snscb_qoff_print(regvalue, cur_col, wrap) \
1027 ahc_print_register(NULL, 0, "SNSCB_QOFF", 0xf6, regvalue, cur_col, wrap)
1028#endif
1029
1030#if AIC_DEBUG_REGISTERS
1031ahc_reg_print_t ahc_sdscb_qoff_print;
1032#else
1033#define ahc_sdscb_qoff_print(regvalue, cur_col, wrap) \
1034 ahc_print_register(NULL, 0, "SDSCB_QOFF", 0xf8, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahc_reg_print_t ahc_qoff_ctlsta_print;
1039#else
1040#define ahc_qoff_ctlsta_print(regvalue, cur_col, wrap) \
1041 ahc_print_register(NULL, 0, "QOFF_CTLSTA", 0xfa, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahc_reg_print_t ahc_dff_thrsh_print;
1046#else
1047#define ahc_dff_thrsh_print(regvalue, cur_col, wrap) \
1048 ahc_print_register(NULL, 0, "DFF_THRSH", 0xfb, regvalue, cur_col, wrap)
1049#endif
1050
1051#if AIC_DEBUG_REGISTERS
1052ahc_reg_print_t ahc_sg_cache_shadow_print;
1053#else
1054#define ahc_sg_cache_shadow_print(regvalue, cur_col, wrap) \
1055 ahc_print_register(NULL, 0, "SG_CACHE_SHADOW", 0xfc, regvalue, cur_col, wrap)
1056#endif
1057
1058#if AIC_DEBUG_REGISTERS
1059ahc_reg_print_t ahc_sg_cache_pre_print;
1060#else
1061#define ahc_sg_cache_pre_print(regvalue, cur_col, wrap) \
1062 ahc_print_register(NULL, 0, "SG_CACHE_PRE", 0xfc, regvalue, cur_col, wrap)
1063#endif
1064
1065 190
1066#define SCSISEQ 0x00 191#define SCSISEQ 0x00
1067#define TEMODE 0x80 192#define TEMODE 0x80
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 309a562b009e..9f9b88047d0c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -43,48 +43,6 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
43 0x01, regvalue, cur_col, wrap)); 43 0x01, regvalue, cur_col, wrap));
44} 44}
45 45
46static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
47 { "STPWEN", 0x01, 0x01 },
48 { "ACTNEGEN", 0x02, 0x02 },
49 { "ENSTIMER", 0x04, 0x04 },
50 { "ENSPCHK", 0x20, 0x20 },
51 { "SWRAPEN", 0x40, 0x40 },
52 { "BITBUCKET", 0x80, 0x80 },
53 { "STIMESEL", 0x18, 0x18 }
54};
55
56int
57ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
58{
59 return (ahc_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
60 0x02, regvalue, cur_col, wrap));
61}
62
63static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
64 { "ACKO", 0x01, 0x01 },
65 { "REQO", 0x02, 0x02 },
66 { "BSYO", 0x04, 0x04 },
67 { "SELO", 0x08, 0x08 },
68 { "ATNO", 0x10, 0x10 },
69 { "MSGO", 0x20, 0x20 },
70 { "IOO", 0x40, 0x40 },
71 { "CDO", 0x80, 0x80 },
72 { "P_DATAOUT", 0x00, 0x00 },
73 { "P_DATAIN", 0x40, 0x40 },
74 { "P_COMMAND", 0x80, 0x80 },
75 { "P_MESGOUT", 0xa0, 0xa0 },
76 { "P_STATUS", 0xc0, 0xc0 },
77 { "PHASE_MASK", 0xe0, 0xe0 },
78 { "P_MESGIN", 0xe0, 0xe0 }
79};
80
81int
82ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
83{
84 return (ahc_print_register(SCSISIGO_parse_table, 15, "SCSISIGO",
85 0x03, regvalue, cur_col, wrap));
86}
87
88static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = { 46static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
89 { "ACKI", 0x01, 0x01 }, 47 { "ACKI", 0x01, 0x01 },
90 { "REQI", 0x02, 0x02 }, 48 { "REQI", 0x02, 0x02 },
@@ -128,77 +86,6 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
128 0x04, regvalue, cur_col, wrap)); 86 0x04, regvalue, cur_col, wrap));
129} 87}
130 88
131static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
132 { "TWIN_CHNLB", 0x80, 0x80 },
133 { "OID", 0x0f, 0x0f },
134 { "TWIN_TID", 0x70, 0x70 },
135 { "SOFS_ULTRA2", 0x7f, 0x7f },
136 { "TID", 0xf0, 0xf0 }
137};
138
139int
140ahc_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
141{
142 return (ahc_print_register(SCSIID_parse_table, 5, "SCSIID",
143 0x05, regvalue, cur_col, wrap));
144}
145
146int
147ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
148{
149 return (ahc_print_register(NULL, 0, "SCSIDATL",
150 0x06, regvalue, cur_col, wrap));
151}
152
153int
154ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
155{
156 return (ahc_print_register(NULL, 0, "STCNT",
157 0x08, regvalue, cur_col, wrap));
158}
159
160static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
161 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
162 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
163 { "SCSIDATL_IMGEN", 0x04, 0x04 },
164 { "EXPPHASEDIS", 0x08, 0x08 },
165 { "BUSFREEREV", 0x10, 0x10 },
166 { "ATNMGMNTEN", 0x20, 0x20 },
167 { "AUTOACKEN", 0x40, 0x40 },
168 { "AUTORATEEN", 0x80, 0x80 },
169 { "OPTIONMODE_DEFAULTS",0x03, 0x03 }
170};
171
172int
173ahc_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
174{
175 return (ahc_print_register(OPTIONMODE_parse_table, 9, "OPTIONMODE",
176 0x08, regvalue, cur_col, wrap));
177}
178
179int
180ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
181{
182 return (ahc_print_register(NULL, 0, "TARGCRCCNT",
183 0x0a, regvalue, cur_col, wrap));
184}
185
186static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
187 { "CLRSPIORDY", 0x02, 0x02 },
188 { "CLRSWRAP", 0x08, 0x08 },
189 { "CLRIOERR", 0x08, 0x08 },
190 { "CLRSELINGO", 0x10, 0x10 },
191 { "CLRSELDI", 0x20, 0x20 },
192 { "CLRSELDO", 0x40, 0x40 }
193};
194
195int
196ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
197{
198 return (ahc_print_register(CLRSINT0_parse_table, 6, "CLRSINT0",
199 0x0b, regvalue, cur_col, wrap));
200}
201
202static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = { 89static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
203 { "DMADONE", 0x01, 0x01 }, 90 { "DMADONE", 0x01, 0x01 },
204 { "SPIORDY", 0x02, 0x02 }, 91 { "SPIORDY", 0x02, 0x02 },
@@ -218,23 +105,6 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
218 0x0b, regvalue, cur_col, wrap)); 105 0x0b, regvalue, cur_col, wrap));
219} 106}
220 107
221static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
222 { "CLRREQINIT", 0x01, 0x01 },
223 { "CLRPHASECHG", 0x02, 0x02 },
224 { "CLRSCSIPERR", 0x04, 0x04 },
225 { "CLRBUSFREE", 0x08, 0x08 },
226 { "CLRSCSIRSTI", 0x20, 0x20 },
227 { "CLRATNO", 0x40, 0x40 },
228 { "CLRSELTIMEO", 0x80, 0x80 }
229};
230
231int
232ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
233{
234 return (ahc_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
235 0x0c, regvalue, cur_col, wrap));
236}
237
238static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = { 108static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
239 { "REQINIT", 0x01, 0x01 }, 109 { "REQINIT", 0x01, 0x01 },
240 { "PHASECHG", 0x02, 0x02 }, 110 { "PHASECHG", 0x02, 0x02 },
@@ -284,18 +154,6 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
284 0x0e, regvalue, cur_col, wrap)); 154 0x0e, regvalue, cur_col, wrap));
285} 155}
286 156
287static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
288 { "OID", 0x0f, 0x0f },
289 { "TID", 0xf0, 0xf0 }
290};
291
292int
293ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
294{
295 return (ahc_print_register(SCSIID_ULTRA2_parse_table, 2, "SCSIID_ULTRA2",
296 0x0f, regvalue, cur_col, wrap));
297}
298
299static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = { 157static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
300 { "ENDMADONE", 0x01, 0x01 }, 158 { "ENDMADONE", 0x01, 0x01 },
301 { "ENSPIORDY", 0x02, 0x02 }, 159 { "ENSPIORDY", 0x02, 0x02 },
@@ -339,107 +197,6 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
339 0x12, regvalue, cur_col, wrap)); 197 0x12, regvalue, cur_col, wrap));
340} 198}
341 199
342int
343ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
344{
345 return (ahc_print_register(NULL, 0, "SHADDR",
346 0x14, regvalue, cur_col, wrap));
347}
348
349static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
350 { "STAGE1", 0x01, 0x01 },
351 { "STAGE2", 0x02, 0x02 },
352 { "STAGE3", 0x04, 0x04 },
353 { "STAGE4", 0x08, 0x08 },
354 { "STAGE5", 0x10, 0x10 },
355 { "STAGE6", 0x20, 0x20 }
356};
357
358int
359ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
360{
361 return (ahc_print_register(SELTIMER_parse_table, 6, "SELTIMER",
362 0x18, regvalue, cur_col, wrap));
363}
364
365static const ahc_reg_parse_entry_t SELID_parse_table[] = {
366 { "ONEBIT", 0x08, 0x08 },
367 { "SELID_MASK", 0xf0, 0xf0 }
368};
369
370int
371ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
372{
373 return (ahc_print_register(SELID_parse_table, 2, "SELID",
374 0x19, regvalue, cur_col, wrap));
375}
376
377int
378ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
379{
380 return (ahc_print_register(NULL, 0, "TARGID",
381 0x1b, regvalue, cur_col, wrap));
382}
383
384static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
385 { "SSPIOCPS", 0x01, 0x01 },
386 { "ROM", 0x02, 0x02 },
387 { "EEPROM", 0x04, 0x04 },
388 { "SEEPROM", 0x08, 0x08 },
389 { "EXT_BRDCTL", 0x10, 0x10 },
390 { "SOFTCMDEN", 0x20, 0x20 },
391 { "SOFT0", 0x40, 0x40 },
392 { "SOFT1", 0x80, 0x80 }
393};
394
395int
396ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
397{
398 return (ahc_print_register(SPIOCAP_parse_table, 8, "SPIOCAP",
399 0x1b, regvalue, cur_col, wrap));
400}
401
402static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
403 { "BRDCTL0", 0x01, 0x01 },
404 { "BRDSTB_ULTRA2", 0x01, 0x01 },
405 { "BRDCTL1", 0x02, 0x02 },
406 { "BRDRW_ULTRA2", 0x02, 0x02 },
407 { "BRDRW", 0x04, 0x04 },
408 { "BRDDAT2", 0x04, 0x04 },
409 { "BRDCS", 0x08, 0x08 },
410 { "BRDDAT3", 0x08, 0x08 },
411 { "BRDSTB", 0x10, 0x10 },
412 { "BRDDAT4", 0x10, 0x10 },
413 { "BRDDAT5", 0x20, 0x20 },
414 { "BRDDAT6", 0x40, 0x40 },
415 { "BRDDAT7", 0x80, 0x80 }
416};
417
418int
419ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
420{
421 return (ahc_print_register(BRDCTL_parse_table, 13, "BRDCTL",
422 0x1d, regvalue, cur_col, wrap));
423}
424
425static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
426 { "SEEDI", 0x01, 0x01 },
427 { "SEEDO", 0x02, 0x02 },
428 { "SEECK", 0x04, 0x04 },
429 { "SEECS", 0x08, 0x08 },
430 { "SEERDY", 0x10, 0x10 },
431 { "SEEMS", 0x20, 0x20 },
432 { "EXTARBREQ", 0x40, 0x40 },
433 { "EXTARBACK", 0x80, 0x80 }
434};
435
436int
437ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
438{
439 return (ahc_print_register(SEECTL_parse_table, 8, "SEECTL",
440 0x1e, regvalue, cur_col, wrap));
441}
442
443static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = { 200static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
444 { "XCVR", 0x01, 0x01 }, 201 { "XCVR", 0x01, 0x01 },
445 { "SELWIDE", 0x02, 0x02 }, 202 { "SELWIDE", 0x02, 0x02 },
@@ -458,68 +215,6 @@ ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
458 0x1f, regvalue, cur_col, wrap)); 215 0x1f, regvalue, cur_col, wrap));
459} 216}
460 217
461int
462ahc_busy_targets_print(u_int regvalue, u_int *cur_col, u_int wrap)
463{
464 return (ahc_print_register(NULL, 0, "BUSY_TARGETS",
465 0x20, regvalue, cur_col, wrap));
466}
467
468int
469ahc_ultra_enb_print(u_int regvalue, u_int *cur_col, u_int wrap)
470{
471 return (ahc_print_register(NULL, 0, "ULTRA_ENB",
472 0x30, regvalue, cur_col, wrap));
473}
474
475int
476ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
477{
478 return (ahc_print_register(NULL, 0, "DISC_DSB",
479 0x32, regvalue, cur_col, wrap));
480}
481
482int
483ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
484{
485 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
486 0x38, regvalue, cur_col, wrap));
487}
488
489int
490ahc_next_queued_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
491{
492 return (ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB",
493 0x39, regvalue, cur_col, wrap));
494}
495
496int
497ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
498{
499 return (ahc_print_register(NULL, 0, "MSG_OUT",
500 0x3a, regvalue, cur_col, wrap));
501}
502
503static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
504 { "FIFORESET", 0x01, 0x01 },
505 { "FIFOFLUSH", 0x02, 0x02 },
506 { "DIRECTION", 0x04, 0x04 },
507 { "HDMAEN", 0x08, 0x08 },
508 { "HDMAENACK", 0x08, 0x08 },
509 { "SDMAEN", 0x10, 0x10 },
510 { "SDMAENACK", 0x10, 0x10 },
511 { "SCSIEN", 0x20, 0x20 },
512 { "WIDEODD", 0x40, 0x40 },
513 { "PRELOADEN", 0x80, 0x80 }
514};
515
516int
517ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
518{
519 return (ahc_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
520 0x3b, regvalue, cur_col, wrap));
521}
522
523static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 218static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
524 { "NO_DISCONNECT", 0x01, 0x01 }, 219 { "NO_DISCONNECT", 0x01, 0x01 },
525 { "SPHASE_PENDING", 0x02, 0x02 }, 220 { "SPHASE_PENDING", 0x02, 0x02 },
@@ -539,20 +234,6 @@ ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
539 0x3c, regvalue, cur_col, wrap)); 234 0x3c, regvalue, cur_col, wrap));
540} 235}
541 236
542int
543ahc_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
544{
545 return (ahc_print_register(NULL, 0, "SAVED_SCSIID",
546 0x3d, regvalue, cur_col, wrap));
547}
548
549int
550ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
551{
552 return (ahc_print_register(NULL, 0, "SAVED_LUN",
553 0x3e, regvalue, cur_col, wrap));
554}
555
556static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = { 237static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
557 { "MSGI", 0x20, 0x20 }, 238 { "MSGI", 0x20, 0x20 },
558 { "IOI", 0x40, 0x40 }, 239 { "IOI", 0x40, 0x40 },
@@ -574,193 +255,6 @@ ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
574 0x3f, regvalue, cur_col, wrap)); 255 0x3f, regvalue, cur_col, wrap));
575} 256}
576 257
577int
578ahc_waiting_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
579{
580 return (ahc_print_register(NULL, 0, "WAITING_SCBH",
581 0x40, regvalue, cur_col, wrap));
582}
583
584int
585ahc_disconnected_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
586{
587 return (ahc_print_register(NULL, 0, "DISCONNECTED_SCBH",
588 0x41, regvalue, cur_col, wrap));
589}
590
591int
592ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
593{
594 return (ahc_print_register(NULL, 0, "FREE_SCBH",
595 0x42, regvalue, cur_col, wrap));
596}
597
598int
599ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
600{
601 return (ahc_print_register(NULL, 0, "HSCB_ADDR",
602 0x44, regvalue, cur_col, wrap));
603}
604
605int
606ahc_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
607{
608 return (ahc_print_register(NULL, 0, "SHARED_DATA_ADDR",
609 0x48, regvalue, cur_col, wrap));
610}
611
612int
613ahc_kernel_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
614{
615 return (ahc_print_register(NULL, 0, "KERNEL_QINPOS",
616 0x4c, regvalue, cur_col, wrap));
617}
618
619int
620ahc_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
621{
622 return (ahc_print_register(NULL, 0, "QINPOS",
623 0x4d, regvalue, cur_col, wrap));
624}
625
626int
627ahc_qoutpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
628{
629 return (ahc_print_register(NULL, 0, "QOUTPOS",
630 0x4e, regvalue, cur_col, wrap));
631}
632
633int
634ahc_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
635{
636 return (ahc_print_register(NULL, 0, "KERNEL_TQINPOS",
637 0x4f, regvalue, cur_col, wrap));
638}
639
640int
641ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
642{
643 return (ahc_print_register(NULL, 0, "TQINPOS",
644 0x50, regvalue, cur_col, wrap));
645}
646
647static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
648 { "CONT_TARG_SESSION", 0x02, 0x02 },
649 { "CONT_MSG_LOOP", 0x04, 0x04 },
650 { "EXIT_MSG_LOOP", 0x08, 0x08 },
651 { "MSGOUT_PHASEMIS", 0x10, 0x10 },
652 { "SEND_REJ", 0x20, 0x20 },
653 { "SEND_SENSE", 0x40, 0x40 },
654 { "SEND_MSG", 0x80, 0x80 }
655};
656
657int
658ahc_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
659{
660 return (ahc_print_register(ARG_1_parse_table, 7, "ARG_1",
661 0x51, regvalue, cur_col, wrap));
662}
663
664int
665ahc_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
666{
667 return (ahc_print_register(NULL, 0, "ARG_2",
668 0x52, regvalue, cur_col, wrap));
669}
670
671int
672ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
673{
674 return (ahc_print_register(NULL, 0, "LAST_MSG",
675 0x53, regvalue, cur_col, wrap));
676}
677
678static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
679 { "ENAUTOATNP", 0x02, 0x02 },
680 { "ENAUTOATNI", 0x04, 0x04 },
681 { "ENAUTOATNO", 0x08, 0x08 },
682 { "ENRSELI", 0x10, 0x10 },
683 { "ENSELI", 0x20, 0x20 },
684 { "ENSELO", 0x40, 0x40 }
685};
686
687int
688ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
689{
690 return (ahc_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
691 0x54, regvalue, cur_col, wrap));
692}
693
694static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
695 { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
696};
697
698int
699ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
700{
701 return (ahc_print_register(HA_274_BIOSGLOBAL_parse_table, 1, "HA_274_BIOSGLOBAL",
702 0x56, regvalue, cur_col, wrap));
703}
704
705static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
706 { "SCB_DMA", 0x01, 0x01 },
707 { "TARGET_MSG_PENDING", 0x02, 0x02 }
708};
709
710int
711ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
712{
713 return (ahc_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
714 0x57, regvalue, cur_col, wrap));
715}
716
717static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
718 { "ENSPCHK", 0x20, 0x20 },
719 { "RESET_SCSI", 0x40, 0x40 },
720 { "TERM_ENB", 0x80, 0x80 },
721 { "HSCSIID", 0x07, 0x07 },
722 { "HWSCSIID", 0x0f, 0x0f }
723};
724
725int
726ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
727{
728 return (ahc_print_register(SCSICONF_parse_table, 5, "SCSICONF",
729 0x5a, regvalue, cur_col, wrap));
730}
731
732static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
733 { "EDGE_TRIG", 0x80, 0x80 },
734 { "VECTOR", 0x0f, 0x0f }
735};
736
737int
738ahc_intdef_print(u_int regvalue, u_int *cur_col, u_int wrap)
739{
740 return (ahc_print_register(INTDEF_parse_table, 2, "INTDEF",
741 0x5c, regvalue, cur_col, wrap));
742}
743
744int
745ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
746{
747 return (ahc_print_register(NULL, 0, "HOSTCONF",
748 0x5d, regvalue, cur_col, wrap));
749}
750
751static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
752 { "CHANNEL_B_PRIMARY", 0x08, 0x08 },
753 { "BIOSMODE", 0x30, 0x30 },
754 { "BIOSDISABLED", 0x30, 0x30 }
755};
756
757int
758ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
759{
760 return (ahc_print_register(HA_274_BIOSCTRL_parse_table, 3, "HA_274_BIOSCTRL",
761 0x5f, regvalue, cur_col, wrap));
762}
763
764static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = { 258static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
765 { "LOADRAM", 0x01, 0x01 }, 259 { "LOADRAM", 0x01, 0x01 },
766 { "SEQRESET", 0x02, 0x02 }, 260 { "SEQRESET", 0x02, 0x02 },
@@ -780,285 +274,12 @@ ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
780} 274}
781 275
782int 276int
783ahc_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
784{
785 return (ahc_print_register(NULL, 0, "SEQRAM",
786 0x61, regvalue, cur_col, wrap));
787}
788
789int
790ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
791{
792 return (ahc_print_register(NULL, 0, "SEQADDR0",
793 0x62, regvalue, cur_col, wrap));
794}
795
796static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
797 { "SEQADDR1_MASK", 0x01, 0x01 }
798};
799
800int
801ahc_seqaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
802{
803 return (ahc_print_register(SEQADDR1_parse_table, 1, "SEQADDR1",
804 0x63, regvalue, cur_col, wrap));
805}
806
807int
808ahc_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
809{
810 return (ahc_print_register(NULL, 0, "ACCUM",
811 0x64, regvalue, cur_col, wrap));
812}
813
814int
815ahc_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
816{
817 return (ahc_print_register(NULL, 0, "SINDEX",
818 0x65, regvalue, cur_col, wrap));
819}
820
821int
822ahc_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
823{
824 return (ahc_print_register(NULL, 0, "DINDEX",
825 0x66, regvalue, cur_col, wrap));
826}
827
828int
829ahc_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
830{
831 return (ahc_print_register(NULL, 0, "ALLONES",
832 0x69, regvalue, cur_col, wrap));
833}
834
835int
836ahc_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
837{
838 return (ahc_print_register(NULL, 0, "ALLZEROS",
839 0x6a, regvalue, cur_col, wrap));
840}
841
842int
843ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
844{
845 return (ahc_print_register(NULL, 0, "NONE",
846 0x6a, regvalue, cur_col, wrap));
847}
848
849static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
850 { "CARRY", 0x01, 0x01 },
851 { "ZERO", 0x02, 0x02 }
852};
853
854int
855ahc_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
856{
857 return (ahc_print_register(FLAGS_parse_table, 2, "FLAGS",
858 0x6b, regvalue, cur_col, wrap));
859}
860
861int
862ahc_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
863{
864 return (ahc_print_register(NULL, 0, "SINDIR",
865 0x6c, regvalue, cur_col, wrap));
866}
867
868int
869ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
870{
871 return (ahc_print_register(NULL, 0, "DINDIR",
872 0x6d, regvalue, cur_col, wrap));
873}
874
875int
876ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
877{
878 return (ahc_print_register(NULL, 0, "STACK",
879 0x6f, regvalue, cur_col, wrap));
880}
881
882int
883ahc_targ_offset_print(u_int regvalue, u_int *cur_col, u_int wrap)
884{
885 return (ahc_print_register(NULL, 0, "TARG_OFFSET",
886 0x70, regvalue, cur_col, wrap));
887}
888
889int
890ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 277ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
891{ 278{
892 return (ahc_print_register(NULL, 0, "SRAM_BASE", 279 return (ahc_print_register(NULL, 0, "SRAM_BASE",
893 0x70, regvalue, cur_col, wrap)); 280 0x70, regvalue, cur_col, wrap));
894} 281}
895 282
896static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
897 { "CIOPARCKEN", 0x01, 0x01 },
898 { "USCBSIZE32", 0x02, 0x02 },
899 { "RAMPS", 0x04, 0x04 },
900 { "INTSCBRAMSEL", 0x08, 0x08 },
901 { "EXTREQLCK", 0x10, 0x10 },
902 { "MPARCKEN", 0x20, 0x20 },
903 { "DPARCKEN", 0x40, 0x40 },
904 { "CACHETHEN", 0x80, 0x80 }
905};
906
907int
908ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
909{
910 return (ahc_print_register(DSCOMMAND0_parse_table, 8, "DSCOMMAND0",
911 0x84, regvalue, cur_col, wrap));
912}
913
914static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
915 { "BON", 0x0f, 0x0f },
916 { "BOFF", 0xf0, 0xf0 }
917};
918
919int
920ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
921{
922 return (ahc_print_register(BUSTIME_parse_table, 2, "BUSTIME",
923 0x85, regvalue, cur_col, wrap));
924}
925
926static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
927 { "HADDLDSEL0", 0x01, 0x01 },
928 { "HADDLDSEL1", 0x02, 0x02 },
929 { "DSLATT", 0xfc, 0xfc }
930};
931
932int
933ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
934{
935 return (ahc_print_register(DSCOMMAND1_parse_table, 3, "DSCOMMAND1",
936 0x85, regvalue, cur_col, wrap));
937}
938
939static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
940 { "STBON", 0x07, 0x07 },
941 { "STBOFF", 0x38, 0x38 },
942 { "DFTHRSH_75", 0x80, 0x80 },
943 { "DFTHRSH", 0xc0, 0xc0 },
944 { "DFTHRSH_100", 0xc0, 0xc0 }
945};
946
947int
948ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
949{
950 return (ahc_print_register(BUSSPD_parse_table, 5, "BUSSPD",
951 0x86, regvalue, cur_col, wrap));
952}
953
954static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
955 { "SEQ_MAILBOX", 0x0f, 0x0f },
956 { "HOST_TQINPOS", 0x80, 0x80 },
957 { "HOST_MAILBOX", 0xf0, 0xf0 }
958};
959
960int
961ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
962{
963 return (ahc_print_register(HS_MAILBOX_parse_table, 3, "HS_MAILBOX",
964 0x86, regvalue, cur_col, wrap));
965}
966
967static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
968 { "DFTHRSH_100", 0xc0, 0xc0 }
969};
970
971int
972ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
973{
974 return (ahc_print_register(DSPCISTATUS_parse_table, 1, "DSPCISTATUS",
975 0x86, regvalue, cur_col, wrap));
976}
977
978static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
979 { "CHIPRST", 0x01, 0x01 },
980 { "CHIPRSTACK", 0x01, 0x01 },
981 { "INTEN", 0x02, 0x02 },
982 { "PAUSE", 0x04, 0x04 },
983 { "IRQMS", 0x08, 0x08 },
984 { "SWINT", 0x10, 0x10 },
985 { "POWRDN", 0x40, 0x40 }
986};
987
988int
989ahc_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
990{
991 return (ahc_print_register(HCNTRL_parse_table, 7, "HCNTRL",
992 0x87, regvalue, cur_col, wrap));
993}
994
995int
996ahc_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
997{
998 return (ahc_print_register(NULL, 0, "HADDR",
999 0x88, regvalue, cur_col, wrap));
1000}
1001
1002int
1003ahc_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1004{
1005 return (ahc_print_register(NULL, 0, "HCNT",
1006 0x8c, regvalue, cur_col, wrap));
1007}
1008
1009int
1010ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1011{
1012 return (ahc_print_register(NULL, 0, "SCBPTR",
1013 0x90, regvalue, cur_col, wrap));
1014}
1015
1016static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
1017 { "SEQINT", 0x01, 0x01 },
1018 { "CMDCMPLT", 0x02, 0x02 },
1019 { "SCSIINT", 0x04, 0x04 },
1020 { "BRKADRINT", 0x08, 0x08 },
1021 { "BAD_PHASE", 0x01, 0x01 },
1022 { "INT_PEND", 0x0f, 0x0f },
1023 { "SEND_REJECT", 0x11, 0x11 },
1024 { "PROTO_VIOLATION", 0x21, 0x21 },
1025 { "NO_MATCH", 0x31, 0x31 },
1026 { "IGN_WIDE_RES", 0x41, 0x41 },
1027 { "PDATA_REINIT", 0x51, 0x51 },
1028 { "HOST_MSG_LOOP", 0x61, 0x61 },
1029 { "BAD_STATUS", 0x71, 0x71 },
1030 { "PERR_DETECTED", 0x81, 0x81 },
1031 { "DATA_OVERRUN", 0x91, 0x91 },
1032 { "MKMSG_FAILED", 0xa1, 0xa1 },
1033 { "MISSED_BUSFREE", 0xb1, 0xb1 },
1034 { "SCB_MISMATCH", 0xc1, 0xc1 },
1035 { "NO_FREE_SCB", 0xd1, 0xd1 },
1036 { "OUT_OF_RANGE", 0xe1, 0xe1 },
1037 { "SEQINT_MASK", 0xf1, 0xf1 }
1038};
1039
1040int
1041ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1042{
1043 return (ahc_print_register(INTSTAT_parse_table, 21, "INTSTAT",
1044 0x91, regvalue, cur_col, wrap));
1045}
1046
1047static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
1048 { "CLRSEQINT", 0x01, 0x01 },
1049 { "CLRCMDINT", 0x02, 0x02 },
1050 { "CLRSCSIINT", 0x04, 0x04 },
1051 { "CLRBRKADRINT", 0x08, 0x08 },
1052 { "CLRPARERR", 0x10, 0x10 }
1053};
1054
1055int
1056ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
1057{
1058 return (ahc_print_register(CLRINT_parse_table, 5, "CLRINT",
1059 0x92, regvalue, cur_col, wrap));
1060}
1061
1062static const ahc_reg_parse_entry_t ERROR_parse_table[] = { 283static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
1063 { "ILLHADDR", 0x01, 0x01 }, 284 { "ILLHADDR", 0x01, 0x01 },
1064 { "ILLSADDR", 0x02, 0x02 }, 285 { "ILLSADDR", 0x02, 0x02 },
@@ -1115,62 +336,6 @@ ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
1115 0x94, regvalue, cur_col, wrap)); 336 0x94, regvalue, cur_col, wrap));
1116} 337}
1117 338
1118int
1119ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1120{
1121 return (ahc_print_register(NULL, 0, "DFWADDR",
1122 0x95, regvalue, cur_col, wrap));
1123}
1124
1125int
1126ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1127{
1128 return (ahc_print_register(NULL, 0, "DFDAT",
1129 0x99, regvalue, cur_col, wrap));
1130}
1131
1132static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
1133 { "SCBAUTO", 0x80, 0x80 },
1134 { "SCBCNT_MASK", 0x1f, 0x1f }
1135};
1136
1137int
1138ahc_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1139{
1140 return (ahc_print_register(SCBCNT_parse_table, 2, "SCBCNT",
1141 0x9a, regvalue, cur_col, wrap));
1142}
1143
1144int
1145ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1146{
1147 return (ahc_print_register(NULL, 0, "QINFIFO",
1148 0x9b, regvalue, cur_col, wrap));
1149}
1150
1151int
1152ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1153{
1154 return (ahc_print_register(NULL, 0, "QOUTFIFO",
1155 0x9d, regvalue, cur_col, wrap));
1156}
1157
1158static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
1159 { "TARGCRCCNTEN", 0x04, 0x04 },
1160 { "TARGCRCENDEN", 0x08, 0x08 },
1161 { "CRCREQCHKEN", 0x10, 0x10 },
1162 { "CRCENDCHKEN", 0x20, 0x20 },
1163 { "CRCVALCHKEN", 0x40, 0x40 },
1164 { "CRCONSEEN", 0x80, 0x80 }
1165};
1166
1167int
1168ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1169{
1170 return (ahc_print_register(CRCCONTROL1_parse_table, 6, "CRCCONTROL1",
1171 0x9d, regvalue, cur_col, wrap));
1172}
1173
1174static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = { 339static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1175 { "DATA_OUT_PHASE", 0x01, 0x01 }, 340 { "DATA_OUT_PHASE", 0x01, 0x01 },
1176 { "DATA_IN_PHASE", 0x02, 0x02 }, 341 { "DATA_IN_PHASE", 0x02, 0x02 },
@@ -1188,17 +353,6 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
1188 0x9e, regvalue, cur_col, wrap)); 353 0x9e, regvalue, cur_col, wrap));
1189} 354}
1190 355
1191static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
1192 { "ALT_MODE", 0x80, 0x80 }
1193};
1194
1195int
1196ahc_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
1197{
1198 return (ahc_print_register(SFUNCT_parse_table, 1, "SFUNCT",
1199 0x9f, regvalue, cur_col, wrap));
1200}
1201
1202int 356int
1203ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 357ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1204{ 358{
@@ -1206,80 +360,6 @@ ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1206 0xa0, regvalue, cur_col, wrap)); 360 0xa0, regvalue, cur_col, wrap));
1207} 361}
1208 362
1209int
1210ahc_scb_cdb_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1211{
1212 return (ahc_print_register(NULL, 0, "SCB_CDB_PTR",
1213 0xa0, regvalue, cur_col, wrap));
1214}
1215
1216int
1217ahc_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1218{
1219 return (ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR",
1220 0xa4, regvalue, cur_col, wrap));
1221}
1222
1223int
1224ahc_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
1225{
1226 return (ahc_print_register(NULL, 0, "SCB_SCSI_STATUS",
1227 0xa8, regvalue, cur_col, wrap));
1228}
1229
1230int
1231ahc_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
1232{
1233 return (ahc_print_register(NULL, 0, "SCB_TARGET_PHASES",
1234 0xa9, regvalue, cur_col, wrap));
1235}
1236
1237int
1238ahc_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1239{
1240 return (ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
1241 0xaa, regvalue, cur_col, wrap));
1242}
1243
1244int
1245ahc_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1246{
1247 return (ahc_print_register(NULL, 0, "SCB_TARGET_ITAG",
1248 0xab, regvalue, cur_col, wrap));
1249}
1250
1251int
1252ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1253{
1254 return (ahc_print_register(NULL, 0, "SCB_DATAPTR",
1255 0xac, regvalue, cur_col, wrap));
1256}
1257
1258static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
1259 { "SG_LAST_SEG", 0x80, 0x80 },
1260 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
1261};
1262
1263int
1264ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265{
1266 return (ahc_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
1267 0xb0, regvalue, cur_col, wrap));
1268}
1269
1270static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
1271 { "SG_LIST_NULL", 0x01, 0x01 },
1272 { "SG_FULL_RESID", 0x02, 0x02 },
1273 { "SG_RESID_VALID", 0x04, 0x04 }
1274};
1275
1276int
1277ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1278{
1279 return (ahc_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
1280 0xb4, regvalue, cur_col, wrap));
1281}
1282
1283static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 363static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
1284 { "DISCONNECTED", 0x04, 0x04 }, 364 { "DISCONNECTED", 0x04, 0x04 },
1285 { "ULTRAENB", 0x08, 0x08 }, 365 { "ULTRAENB", 0x08, 0x08 },
@@ -1331,248 +411,3 @@ ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1331 0xbb, regvalue, cur_col, wrap)); 411 0xbb, regvalue, cur_col, wrap));
1332} 412}
1333 413
1334int
1335ahc_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
1336{
1337 return (ahc_print_register(NULL, 0, "SCB_CDB_LEN",
1338 0xbc, regvalue, cur_col, wrap));
1339}
1340
1341int
1342ahc_scb_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
1343{
1344 return (ahc_print_register(NULL, 0, "SCB_SCSIRATE",
1345 0xbd, regvalue, cur_col, wrap));
1346}
1347
1348int
1349ahc_scb_scsioffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1350{
1351 return (ahc_print_register(NULL, 0, "SCB_SCSIOFFSET",
1352 0xbe, regvalue, cur_col, wrap));
1353}
1354
1355int
1356ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
1357{
1358 return (ahc_print_register(NULL, 0, "SCB_NEXT",
1359 0xbf, regvalue, cur_col, wrap));
1360}
1361
1362static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1363 { "DO_2840", 0x01, 0x01 },
1364 { "CK_2840", 0x02, 0x02 },
1365 { "CS_2840", 0x04, 0x04 }
1366};
1367
1368int
1369ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1370{
1371 return (ahc_print_register(SEECTL_2840_parse_table, 3, "SEECTL_2840",
1372 0xc0, regvalue, cur_col, wrap));
1373}
1374
1375static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
1376 { "DI_2840", 0x01, 0x01 },
1377 { "EEPROM_TF", 0x80, 0x80 },
1378 { "ADSEL", 0x1e, 0x1e },
1379 { "BIOS_SEL", 0x60, 0x60 }
1380};
1381
1382int
1383ahc_status_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1384{
1385 return (ahc_print_register(STATUS_2840_parse_table, 4, "STATUS_2840",
1386 0xc1, regvalue, cur_col, wrap));
1387}
1388
1389int
1390ahc_scb_64_btt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1391{
1392 return (ahc_print_register(NULL, 0, "SCB_64_BTT",
1393 0xd0, regvalue, cur_col, wrap));
1394}
1395
1396int
1397ahc_cchaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1398{
1399 return (ahc_print_register(NULL, 0, "CCHADDR",
1400 0xe0, regvalue, cur_col, wrap));
1401}
1402
1403int
1404ahc_cchcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1405{
1406 return (ahc_print_register(NULL, 0, "CCHCNT",
1407 0xe8, regvalue, cur_col, wrap));
1408}
1409
1410int
1411ahc_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1412{
1413 return (ahc_print_register(NULL, 0, "CCSGRAM",
1414 0xe9, regvalue, cur_col, wrap));
1415}
1416
1417int
1418ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1419{
1420 return (ahc_print_register(NULL, 0, "CCSGADDR",
1421 0xea, regvalue, cur_col, wrap));
1422}
1423
1424static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
1425 { "CCSGRESET", 0x01, 0x01 },
1426 { "SG_FETCH_NEEDED", 0x02, 0x02 },
1427 { "CCSGEN", 0x08, 0x08 },
1428 { "CCSGDONE", 0x80, 0x80 }
1429};
1430
1431int
1432ahc_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1433{
1434 return (ahc_print_register(CCSGCTL_parse_table, 4, "CCSGCTL",
1435 0xeb, regvalue, cur_col, wrap));
1436}
1437
1438int
1439ahc_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1440{
1441 return (ahc_print_register(NULL, 0, "CCSCBRAM",
1442 0xec, regvalue, cur_col, wrap));
1443}
1444
1445int
1446ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1447{
1448 return (ahc_print_register(NULL, 0, "CCSCBADDR",
1449 0xed, regvalue, cur_col, wrap));
1450}
1451
1452static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1453 { "CCSCBRESET", 0x01, 0x01 },
1454 { "CCSCBDIR", 0x04, 0x04 },
1455 { "CCSCBEN", 0x08, 0x08 },
1456 { "CCARREN", 0x10, 0x10 },
1457 { "ARRDONE", 0x40, 0x40 },
1458 { "CCSCBDONE", 0x80, 0x80 }
1459};
1460
1461int
1462ahc_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1463{
1464 return (ahc_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
1465 0xee, regvalue, cur_col, wrap));
1466}
1467
1468int
1469ahc_ccscbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1470{
1471 return (ahc_print_register(NULL, 0, "CCSCBCNT",
1472 0xef, regvalue, cur_col, wrap));
1473}
1474
1475int
1476ahc_scbbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1477{
1478 return (ahc_print_register(NULL, 0, "SCBBADDR",
1479 0xf0, regvalue, cur_col, wrap));
1480}
1481
1482int
1483ahc_ccscbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1484{
1485 return (ahc_print_register(NULL, 0, "CCSCBPTR",
1486 0xf1, regvalue, cur_col, wrap));
1487}
1488
1489int
1490ahc_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1491{
1492 return (ahc_print_register(NULL, 0, "HNSCB_QOFF",
1493 0xf4, regvalue, cur_col, wrap));
1494}
1495
1496int
1497ahc_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1498{
1499 return (ahc_print_register(NULL, 0, "SNSCB_QOFF",
1500 0xf6, regvalue, cur_col, wrap));
1501}
1502
1503int
1504ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1505{
1506 return (ahc_print_register(NULL, 0, "SDSCB_QOFF",
1507 0xf8, regvalue, cur_col, wrap));
1508}
1509
1510static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
1511 { "SDSCB_ROLLOVER", 0x10, 0x10 },
1512 { "SNSCB_ROLLOVER", 0x20, 0x20 },
1513 { "SCB_AVAIL", 0x40, 0x40 },
1514 { "SCB_QSIZE_256", 0x06, 0x06 },
1515 { "SCB_QSIZE", 0x07, 0x07 }
1516};
1517
1518int
1519ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
1520{
1521 return (ahc_print_register(QOFF_CTLSTA_parse_table, 5, "QOFF_CTLSTA",
1522 0xfa, regvalue, cur_col, wrap));
1523}
1524
1525static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1526 { "RD_DFTHRSH_MIN", 0x00, 0x00 },
1527 { "WR_DFTHRSH_MIN", 0x00, 0x00 },
1528 { "RD_DFTHRSH_25", 0x01, 0x01 },
1529 { "RD_DFTHRSH_50", 0x02, 0x02 },
1530 { "RD_DFTHRSH_63", 0x03, 0x03 },
1531 { "RD_DFTHRSH_75", 0x04, 0x04 },
1532 { "RD_DFTHRSH_85", 0x05, 0x05 },
1533 { "RD_DFTHRSH_90", 0x06, 0x06 },
1534 { "RD_DFTHRSH", 0x07, 0x07 },
1535 { "RD_DFTHRSH_MAX", 0x07, 0x07 },
1536 { "WR_DFTHRSH_25", 0x10, 0x10 },
1537 { "WR_DFTHRSH_50", 0x20, 0x20 },
1538 { "WR_DFTHRSH_63", 0x30, 0x30 },
1539 { "WR_DFTHRSH_75", 0x40, 0x40 },
1540 { "WR_DFTHRSH_85", 0x50, 0x50 },
1541 { "WR_DFTHRSH_90", 0x60, 0x60 },
1542 { "WR_DFTHRSH", 0x70, 0x70 },
1543 { "WR_DFTHRSH_MAX", 0x70, 0x70 }
1544};
1545
1546int
1547ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1548{
1549 return (ahc_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
1550 0xfb, regvalue, cur_col, wrap));
1551}
1552
1553static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
1554 { "LAST_SEG_DONE", 0x01, 0x01 },
1555 { "LAST_SEG", 0x02, 0x02 },
1556 { "SG_ADDR_MASK", 0xf8, 0xf8 }
1557};
1558
1559int
1560ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
1561{
1562 return (ahc_print_register(SG_CACHE_SHADOW_parse_table, 3, "SG_CACHE_SHADOW",
1563 0xfc, regvalue, cur_col, wrap));
1564}
1565
1566static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
1567 { "LAST_SEG_DONE", 0x01, 0x01 },
1568 { "LAST_SEG", 0x02, 0x02 },
1569 { "SG_ADDR_MASK", 0xf8, 0xf8 }
1570};
1571
1572int
1573ahc_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
1574{
1575 return (ahc_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
1576 0xfc, regvalue, cur_col, wrap));
1577}
1578
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 81be6a261cc8..e4064433842e 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -147,6 +147,8 @@ void yyerror(const char *string);
147 147
148%token T_ACCESS_MODE 148%token T_ACCESS_MODE
149 149
150%token T_DONT_GENERATE_DEBUG_CODE
151
150%token T_MODES 152%token T_MODES
151 153
152%token T_DEFINE 154%token T_DEFINE
@@ -357,6 +359,7 @@ reg_attribute:
357| size 359| size
358| count 360| count
359| access_mode 361| access_mode
362| dont_generate_debug_code
360| modes 363| modes
361| field_defn 364| field_defn
362| enum_defn 365| enum_defn
@@ -410,6 +413,13 @@ access_mode:
410 } 413 }
411; 414;
412 415
416dont_generate_debug_code:
417 T_DONT_GENERATE_DEBUG_CODE
418 {
419 cur_symbol->dont_generate_debug_code = 1;
420 }
421;
422
413modes: 423modes:
414 T_MODES mode_list 424 T_MODES mode_list
415 { 425 {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 2c7f02daf88d..93c8667cd704 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -164,6 +164,7 @@ download { return T_DOWNLOAD; }
164address { return T_ADDRESS; } 164address { return T_ADDRESS; }
165count { return T_COUNT; } 165count { return T_COUNT; }
166access_mode { return T_ACCESS_MODE; } 166access_mode { return T_ACCESS_MODE; }
167dont_generate_debug_code { return T_DONT_GENERATE_DEBUG_CODE; }
167modes { return T_MODES; } 168modes { return T_MODES; }
168RW|RO|WO { 169RW|RO|WO {
169 if (strcmp(yytext, "RW") == 0) 170 if (strcmp(yytext, "RW") == 0)
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index fcd357872b43..078ed600f47a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -539,6 +539,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
539 aic_print_include(dfile, stock_include_file); 539 aic_print_include(dfile, stock_include_file);
540 SLIST_FOREACH(curnode, &registers, links) { 540 SLIST_FOREACH(curnode, &registers, links) {
541 541
542 if (curnode->symbol->dont_generate_debug_code)
543 continue;
544
542 switch(curnode->symbol->type) { 545 switch(curnode->symbol->type) {
543 case REGISTER: 546 case REGISTER:
544 case SCBLOC: 547 case SCBLOC:
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 05190c1a2fb7..2ba73ae7c777 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -137,7 +137,8 @@ typedef struct symbol {
137 struct label_info *linfo; 137 struct label_info *linfo;
138 struct cond_info *condinfo; 138 struct cond_info *condinfo;
139 struct macro_info *macroinfo; 139 struct macro_info *macroinfo;
140 }info; 140 } info;
141 int dont_generate_debug_code;
141} symbol_t; 142} symbol_t;
142 143
143typedef struct symbol_ref { 144typedef struct symbol_ref {
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 69f8346aa288..5877f29a6005 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -189,7 +189,6 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
189 .attr = { 189 .attr = {
190 .name = "mu_read", 190 .name = "mu_read",
191 .mode = S_IRUSR , 191 .mode = S_IRUSR ,
192 .owner = THIS_MODULE,
193 }, 192 },
194 .size = 1032, 193 .size = 1032,
195 .read = arcmsr_sysfs_iop_message_read, 194 .read = arcmsr_sysfs_iop_message_read,
@@ -199,7 +198,6 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
199 .attr = { 198 .attr = {
200 .name = "mu_write", 199 .name = "mu_write",
201 .mode = S_IWUSR, 200 .mode = S_IWUSR,
202 .owner = THIS_MODULE,
203 }, 201 },
204 .size = 1032, 202 .size = 1032,
205 .write = arcmsr_sysfs_iop_message_write, 203 .write = arcmsr_sysfs_iop_message_write,
@@ -209,7 +207,6 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
209 .attr = { 207 .attr = {
210 .name = "mu_clear", 208 .name = "mu_clear",
211 .mode = S_IWUSR, 209 .mode = S_IWUSR,
212 .owner = THIS_MODULE,
213 }, 210 },
214 .size = 1, 211 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear, 212 .write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/atari_dma_emul.c b/drivers/scsi/atari_dma_emul.c
deleted file mode 100644
index cdc710ea00fa..000000000000
--- a/drivers/scsi/atari_dma_emul.c
+++ /dev/null
@@ -1,468 +0,0 @@
1/*
2 * atari_dma_emul.c -- TT SCSI DMA emulator for the Hades.
3 *
4 * Copyright 1997 Wout Klaren <W.Klaren@inter.nl.net>
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 *
10 * This code was written using the Hades TOS source code as a
11 * reference. This source code can be found on the home page
12 * of Medusa Computer Systems.
13 *
14 * Version 0.1, 1997-09-24.
15 *
16 * This code should be considered experimental. It has only been
17 * tested on a Hades with a 68060. It might not work on a Hades
18 * with a 68040. Make backups of your hard drives before using
19 * this code.
20 */
21
22#include <linux/compiler.h>
23#include <asm/thread_info.h>
24#include <asm/uaccess.h>
25
26#define hades_dma_ctrl (*(unsigned char *) 0xffff8717)
27#define hades_psdm_reg (*(unsigned char *) 0xffff8741)
28
29#define TRANSFER_SIZE 16
30
31struct m68040_frame {
32 unsigned long effaddr; /* effective address */
33 unsigned short ssw; /* special status word */
34 unsigned short wb3s; /* write back 3 status */
35 unsigned short wb2s; /* write back 2 status */
36 unsigned short wb1s; /* write back 1 status */
37 unsigned long faddr; /* fault address */
38 unsigned long wb3a; /* write back 3 address */
39 unsigned long wb3d; /* write back 3 data */
40 unsigned long wb2a; /* write back 2 address */
41 unsigned long wb2d; /* write back 2 data */
42 unsigned long wb1a; /* write back 1 address */
43 unsigned long wb1dpd0; /* write back 1 data/push data 0*/
44 unsigned long pd1; /* push data 1*/
45 unsigned long pd2; /* push data 2*/
46 unsigned long pd3; /* push data 3*/
47};
48
49static void writeback (unsigned short wbs, unsigned long wba,
50 unsigned long wbd, void *old_buserr)
51{
52 mm_segment_t fs = get_fs();
53 static void *save_buserr;
54
55 __asm__ __volatile__ ("movec.l %%vbr,%%a0\n\t"
56 "move.l %0,8(%%a0)\n\t"
57 :
58 : "r" (&&bus_error)
59 : "a0" );
60
61 save_buserr = old_buserr;
62
63 set_fs (MAKE_MM_SEG(wbs & WBTM_040));
64
65 switch (wbs & WBSIZ_040) {
66 case BA_SIZE_BYTE:
67 put_user (wbd & 0xff, (char *)wba);
68 break;
69 case BA_SIZE_WORD:
70 put_user (wbd & 0xffff, (short *)wba);
71 break;
72 case BA_SIZE_LONG:
73 put_user (wbd, (int *)wba);
74 break;
75 }
76
77 set_fs (fs);
78 return;
79
80bus_error:
81 __asm__ __volatile__ ("cmp.l %0,2(%%sp)\n\t"
82 "bcs.s .jump_old\n\t"
83 "cmp.l %1,2(%%sp)\n\t"
84 "bls.s .restore_old\n"
85 ".jump_old:\n\t"
86 "move.l %2,-(%%sp)\n\t"
87 "rts\n"
88 ".restore_old:\n\t"
89 "move.l %%a0,-(%%sp)\n\t"
90 "movec.l %%vbr,%%a0\n\t"
91 "move.l %2,8(%%a0)\n\t"
92 "move.l (%%sp)+,%%a0\n\t"
93 "rte\n\t"
94 :
95 : "i" (writeback), "i" (&&bus_error),
96 "m" (save_buserr) );
97}
98
99/*
100 * static inline void set_restdata_reg(unsigned char *cur_addr)
101 *
102 * Set the rest data register if necessary.
103 */
104
105static inline void set_restdata_reg(unsigned char *cur_addr)
106{
107 if (((long) cur_addr & ~3) != 0)
108 tt_scsi_dma.dma_restdata =
109 *((unsigned long *) ((long) cur_addr & ~3));
110}
111
112/*
113 * void hades_dma_emulator(int irq, void *dummy)
114 *
115 * This code emulates TT SCSI DMA on the Hades.
116 *
117 * Note the following:
118 *
119 * 1. When there is no byte available to read from the SCSI bus, or
120 * when a byte cannot yet bet written to the SCSI bus, a bus
121 * error occurs when reading or writing the pseudo DMA data
122 * register (hades_psdm_reg). We have to catch this bus error
123 * and try again to read or write the byte. If after several tries
124 * we still get a bus error, the interrupt handler is left. When
125 * the byte can be read or written, the interrupt handler is
126 * called again.
127 *
128 * 2. The SCSI interrupt must be disabled in this interrupt handler.
129 *
130 * 3. If we set the EOP signal, the SCSI controller still expects one
131 * byte to be read or written. Therefore the last byte is transferred
132 * separately, after setting the EOP signal.
133 *
134 * 4. When this function is left, the address pointer (start_addr) is
135 * converted to a physical address. Because it points one byte
136 * further than the last transferred byte, it can point outside the
137 * current page. If virt_to_phys() is called with this address we
138 * might get an access error. Therefore virt_to_phys() is called with
139 * start_addr - 1 if the count has reached zero. The result is
140 * increased with one.
141 */
142
143static irqreturn_t hades_dma_emulator(int irq, void *dummy)
144{
145 unsigned long dma_base;
146 register unsigned long dma_cnt asm ("d3");
147 static long save_buserr;
148 register unsigned long save_sp asm ("d4");
149 register int tries asm ("d5");
150 register unsigned char *start_addr asm ("a3"), *end_addr asm ("a4");
151 register unsigned char *eff_addr;
152 register unsigned char *psdm_reg;
153 unsigned long rem;
154
155 atari_disable_irq(IRQ_TT_MFP_SCSI);
156
157 /*
158 * Read the dma address and count registers.
159 */
160
161 dma_base = SCSI_DMA_READ_P(dma_addr);
162 dma_cnt = SCSI_DMA_READ_P(dma_cnt);
163
164 /*
165 * Check if DMA is still enabled.
166 */
167
168 if ((tt_scsi_dma.dma_ctrl & 2) == 0)
169 {
170 atari_enable_irq(IRQ_TT_MFP_SCSI);
171 return IRQ_HANDLED;
172 }
173
174 if (dma_cnt == 0)
175 {
176 printk(KERN_NOTICE "DMA emulation: count is zero.\n");
177 tt_scsi_dma.dma_ctrl &= 0xfd; /* DMA ready. */
178 atari_enable_irq(IRQ_TT_MFP_SCSI);
179 return IRQ_HANDLED;
180 }
181
182 /*
183 * Install new bus error routine.
184 */
185
186 __asm__ __volatile__ ("movec.l %%vbr,%%a0\n\t"
187 "move.l 8(%%a0),%0\n\t"
188 "move.l %1,8(%%a0)\n\t"
189 : "=&r" (save_buserr)
190 : "r" (&&scsi_bus_error)
191 : "a0" );
192
193 hades_dma_ctrl &= 0xfc; /* Bus error and EOP off. */
194
195 /*
196 * Save the stack pointer.
197 */
198
199 __asm__ __volatile__ ("move.l %%sp,%0\n\t"
200 : "=&r" (save_sp) );
201
202 tries = 100; /* Maximum number of bus errors. */
203 start_addr = phys_to_virt(dma_base);
204 end_addr = start_addr + dma_cnt;
205
206scsi_loop:
207 dma_cnt--;
208 rem = dma_cnt & (TRANSFER_SIZE - 1);
209 dma_cnt &= ~(TRANSFER_SIZE - 1);
210 psdm_reg = &hades_psdm_reg;
211
212 if (tt_scsi_dma.dma_ctrl & 1) /* Read or write? */
213 {
214 /*
215 * SCSI write. Abort when count is zero.
216 */
217
218 switch (rem)
219 {
220 case 0:
221 while (dma_cnt > 0)
222 {
223 dma_cnt -= TRANSFER_SIZE;
224
225 *psdm_reg = *start_addr++;
226 case 15:
227 *psdm_reg = *start_addr++;
228 case 14:
229 *psdm_reg = *start_addr++;
230 case 13:
231 *psdm_reg = *start_addr++;
232 case 12:
233 *psdm_reg = *start_addr++;
234 case 11:
235 *psdm_reg = *start_addr++;
236 case 10:
237 *psdm_reg = *start_addr++;
238 case 9:
239 *psdm_reg = *start_addr++;
240 case 8:
241 *psdm_reg = *start_addr++;
242 case 7:
243 *psdm_reg = *start_addr++;
244 case 6:
245 *psdm_reg = *start_addr++;
246 case 5:
247 *psdm_reg = *start_addr++;
248 case 4:
249 *psdm_reg = *start_addr++;
250 case 3:
251 *psdm_reg = *start_addr++;
252 case 2:
253 *psdm_reg = *start_addr++;
254 case 1:
255 *psdm_reg = *start_addr++;
256 }
257 }
258
259 hades_dma_ctrl |= 1; /* Set EOP. */
260 udelay(10);
261 *psdm_reg = *start_addr++; /* Dummy byte. */
262 tt_scsi_dma.dma_ctrl &= 0xfd; /* DMA ready. */
263 }
264 else
265 {
266 /*
267 * SCSI read. Abort when count is zero.
268 */
269
270 switch (rem)
271 {
272 case 0:
273 while (dma_cnt > 0)
274 {
275 dma_cnt -= TRANSFER_SIZE;
276
277 *start_addr++ = *psdm_reg;
278 case 15:
279 *start_addr++ = *psdm_reg;
280 case 14:
281 *start_addr++ = *psdm_reg;
282 case 13:
283 *start_addr++ = *psdm_reg;
284 case 12:
285 *start_addr++ = *psdm_reg;
286 case 11:
287 *start_addr++ = *psdm_reg;
288 case 10:
289 *start_addr++ = *psdm_reg;
290 case 9:
291 *start_addr++ = *psdm_reg;
292 case 8:
293 *start_addr++ = *psdm_reg;
294 case 7:
295 *start_addr++ = *psdm_reg;
296 case 6:
297 *start_addr++ = *psdm_reg;
298 case 5:
299 *start_addr++ = *psdm_reg;
300 case 4:
301 *start_addr++ = *psdm_reg;
302 case 3:
303 *start_addr++ = *psdm_reg;
304 case 2:
305 *start_addr++ = *psdm_reg;
306 case 1:
307 *start_addr++ = *psdm_reg;
308 }
309 }
310
311 hades_dma_ctrl |= 1; /* Set EOP. */
312 udelay(10);
313 *start_addr++ = *psdm_reg;
314 tt_scsi_dma.dma_ctrl &= 0xfd; /* DMA ready. */
315
316 set_restdata_reg(start_addr);
317 }
318
319 if (start_addr != end_addr)
320 printk(KERN_CRIT "DMA emulation: FATAL: Count is not zero at end of transfer.\n");
321
322 dma_cnt = end_addr - start_addr;
323
324scsi_end:
325 dma_base = (dma_cnt == 0) ? virt_to_phys(start_addr - 1) + 1 :
326 virt_to_phys(start_addr);
327
328 SCSI_DMA_WRITE_P(dma_addr, dma_base);
329 SCSI_DMA_WRITE_P(dma_cnt, dma_cnt);
330
331 /*
332 * Restore old bus error routine.
333 */
334
335 __asm__ __volatile__ ("movec.l %%vbr,%%a0\n\t"
336 "move.l %0,8(%%a0)\n\t"
337 :
338 : "r" (save_buserr)
339 : "a0" );
340
341 atari_enable_irq(IRQ_TT_MFP_SCSI);
342
343 return IRQ_HANDLED;
344
345scsi_bus_error:
346 /*
347 * First check if the bus error is caused by our code.
348 * If not, call the original handler.
349 */
350
351 __asm__ __volatile__ ("cmp.l %0,2(%%sp)\n\t"
352 "bcs.s .old_vector\n\t"
353 "cmp.l %1,2(%%sp)\n\t"
354 "bls.s .scsi_buserr\n"
355 ".old_vector:\n\t"
356 "move.l %2,-(%%sp)\n\t"
357 "rts\n"
358 ".scsi_buserr:\n\t"
359 :
360 : "i" (&&scsi_loop), "i" (&&scsi_end),
361 "m" (save_buserr) );
362
363 if (CPU_IS_060)
364 {
365 /*
366 * Get effective address and restore the stack.
367 */
368
369 __asm__ __volatile__ ("move.l 8(%%sp),%0\n\t"
370 "move.l %1,%%sp\n\t"
371 : "=a&" (eff_addr)
372 : "r" (save_sp) );
373 }
374 else
375 {
376 register struct m68040_frame *frame;
377
378 __asm__ __volatile__ ("lea 8(%%sp),%0\n\t"
379 : "=a&" (frame) );
380
381 if (tt_scsi_dma.dma_ctrl & 1)
382 {
383 /*
384 * Bus error while writing.
385 */
386
387 if (frame->wb3s & WBV_040)
388 {
389 if (frame->wb3a == (long) &hades_psdm_reg)
390 start_addr--;
391 else
392 writeback(frame->wb3s, frame->wb3a,
393 frame->wb3d, &&scsi_bus_error);
394 }
395
396 if (frame->wb2s & WBV_040)
397 {
398 if (frame->wb2a == (long) &hades_psdm_reg)
399 start_addr--;
400 else
401 writeback(frame->wb2s, frame->wb2a,
402 frame->wb2d, &&scsi_bus_error);
403 }
404
405 if (frame->wb1s & WBV_040)
406 {
407 if (frame->wb1a == (long) &hades_psdm_reg)
408 start_addr--;
409 }
410 }
411 else
412 {
413 /*
414 * Bus error while reading.
415 */
416
417 if (frame->wb3s & WBV_040)
418 writeback(frame->wb3s, frame->wb3a,
419 frame->wb3d, &&scsi_bus_error);
420 }
421
422 eff_addr = (unsigned char *) frame->faddr;
423
424 __asm__ __volatile__ ("move.l %0,%%sp\n\t"
425 :
426 : "r" (save_sp) );
427 }
428
429 dma_cnt = end_addr - start_addr;
430
431 if (eff_addr == &hades_psdm_reg)
432 {
433 /*
434 * Bus error occurred while reading the pseudo
435 * DMA register. Time out.
436 */
437
438 tries--;
439
440 if (tries <= 0)
441 {
442 if ((tt_scsi_dma.dma_ctrl & 1) == 0) /* Read or write? */
443 set_restdata_reg(start_addr);
444
445 if (dma_cnt <= 1)
446 printk(KERN_CRIT "DMA emulation: Fatal "
447 "error while %s the last byte.\n",
448 (tt_scsi_dma.dma_ctrl & 1)
449 ? "writing" : "reading");
450
451 goto scsi_end;
452 }
453 else
454 goto scsi_loop;
455 }
456 else
457 {
458 /*
459 * Bus error during pseudo DMA transfer.
460 * Terminate the DMA transfer.
461 */
462
463 hades_dma_ctrl |= 3; /* Set EOP and bus error. */
464 if ((tt_scsi_dma.dma_ctrl & 1) == 0) /* Read or write? */
465 set_restdata_reg(start_addr);
466 goto scsi_end;
467 }
468}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index f5732d8f67fe..21fe07f9df87 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -249,10 +249,6 @@ static int setup_hostid = -1;
249module_param(setup_hostid, int, 0); 249module_param(setup_hostid, int, 0);
250 250
251 251
252#if defined(CONFIG_TT_DMA_EMUL)
253#include "atari_dma_emul.c"
254#endif
255
256#if defined(REAL_DMA) 252#if defined(REAL_DMA)
257 253
258static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) 254static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
@@ -695,21 +691,8 @@ int atari_scsi_detect(struct scsi_host_template *host)
695#ifdef REAL_DMA 691#ifdef REAL_DMA
696 tt_scsi_dma.dma_ctrl = 0; 692 tt_scsi_dma.dma_ctrl = 0;
697 atari_dma_residual = 0; 693 atari_dma_residual = 0;
698#ifdef CONFIG_TT_DMA_EMUL 694
699 if (MACH_IS_HADES) { 695 if (MACH_IS_MEDUSA) {
700 if (request_irq(IRQ_AUTO_2, hades_dma_emulator,
701 IRQ_TYPE_PRIO, "Hades DMA emulator",
702 hades_dma_emulator)) {
703 printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2);
704 free_irq(IRQ_TT_MFP_SCSI, instance);
705 scsi_unregister(atari_scsi_host);
706 atari_stram_free(atari_dma_buffer);
707 atari_dma_buffer = 0;
708 return 0;
709 }
710 }
711#endif
712 if (MACH_IS_MEDUSA || MACH_IS_HADES) {
713 /* While the read overruns (described by Drew Eckhardt in 696 /* While the read overruns (described by Drew Eckhardt in
714 * NCR5380.c) never happened on TTs, they do in fact on the Medusa 697 * NCR5380.c) never happened on TTs, they do in fact on the Medusa
715 * (This was the cause why SCSI didn't work right for so long 698 * (This was the cause why SCSI didn't work right for so long
@@ -1007,11 +990,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
1007 Scsi_Cmnd *cmd, int write_flag) 990 Scsi_Cmnd *cmd, int write_flag)
1008{ 991{
1009 unsigned long possible_len, limit; 992 unsigned long possible_len, limit;
1010#ifndef CONFIG_TT_DMA_EMUL 993
1011 if (MACH_IS_HADES)
1012 /* Hades has no SCSI DMA at all :-( Always force use of PIO */
1013 return 0;
1014#endif
1015 if (IS_A_TT()) 994 if (IS_A_TT())
1016 /* TT SCSI DMA can transfer arbitrary #bytes */ 995 /* TT SCSI DMA can transfer arbitrary #bytes */
1017 return wanted_len; 996 return wanted_len;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 3c257fe0893e..88ecf94ad979 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -914,9 +914,9 @@ static int ch_probe(struct device *dev)
914 ch->minor = minor; 914 ch->minor = minor;
915 sprintf(ch->name,"ch%d",ch->minor); 915 sprintf(ch->name,"ch%d",ch->minor);
916 916
917 class_dev = device_create_drvdata(ch_sysfs_class, dev, 917 class_dev = device_create(ch_sysfs_class, dev,
918 MKDEV(SCSI_CHANGER_MAJOR, ch->minor), 918 MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
919 ch, "s%s", ch->name); 919 "s%s", ch->name);
920 if (IS_ERR(class_dev)) { 920 if (IS_ERR(class_dev)) {
921 printk(KERN_WARNING "ch%d: device_create failed\n", 921 printk(KERN_WARNING "ch%d: device_create failed\n",
922 ch->minor); 922 ch->minor);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9785d7384199..4003deefb7d8 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
1364static const char * const hostbyte_table[]={ 1364static const char * const hostbyte_table[]={
1365"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", 1365"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
1366"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", 1366"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
1367"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"}; 1367"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
1368"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
1368#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) 1369#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1369 1370
1370static const char * const driverbyte_table[]={ 1371static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 708e475896b9..e356b43753ff 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
109 } 109 }
110 110
111 rq->cmd_type = REQ_TYPE_BLOCK_PC; 111 rq->cmd_type = REQ_TYPE_BLOCK_PC;
112 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 112 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
113 REQ_FAILFAST_DRIVER;
113 rq->retries = ALUA_FAILOVER_RETRIES; 114 rq->retries = ALUA_FAILOVER_RETRIES;
114 rq->timeout = ALUA_FAILOVER_TIMEOUT; 115 rq->timeout = ALUA_FAILOVER_TIMEOUT;
115 116
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index ef693e8412e9..0e572d2c5b0a 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
84 /* 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands. 85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */ 86 */
87 char buffer[CLARIION_BUFFER_SIZE]; 87 unsigned char buffer[CLARIION_BUFFER_SIZE];
88 /* 88 /*
89 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
90 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
176 err = SCSI_DH_DEV_TEMP_BUSY; 176 err = SCSI_DH_DEV_TEMP_BUSY;
177 goto out; 177 goto out;
178 } 178 }
179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { 179 if (csdev->buffer[4] > 2) {
180 /* Invalid buffer format */ 180 /* Invalid buffer format */
181 sdev_printk(KERN_NOTICE, sdev, 181 sdev_printk(KERN_NOTICE, sdev,
182 "%s: invalid VPD page 0xC0 format\n", 182 "%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
278 return NULL; 278 return NULL;
279 } 279 }
280 280
281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd); 281 rq->cmd_len = COMMAND_SIZE(cmd);
283 rq->cmd[0] = cmd; 282 rq->cmd[0] = cmd;
284 283
@@ -304,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
304 303
305 rq->cmd[4] = len; 304 rq->cmd[4] = len;
306 rq->cmd_type = REQ_TYPE_BLOCK_PC; 305 rq->cmd_type = REQ_TYPE_BLOCK_PC;
307 rq->cmd_flags |= REQ_FAILFAST; 306 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
307 REQ_FAILFAST_DRIVER;
308 rq->timeout = CLARIION_TIMEOUT; 308 rq->timeout = CLARIION_TIMEOUT;
309 rq->retries = CLARIION_RETRIES; 309 rq->retries = CLARIION_RETRIES;
310 310
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index a6a4ef3ad51c..9aec4ca64e56 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -112,9 +112,9 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
112 return SCSI_DH_RES_TEMP_UNAVAIL; 112 return SCSI_DH_RES_TEMP_UNAVAIL;
113 113
114 req->cmd_type = REQ_TYPE_BLOCK_PC; 114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST; 115 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
116 REQ_FAILFAST_DRIVER;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 117 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY; 118 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT; 119 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense; 120 req->sense = h->sense;
@@ -205,9 +205,9 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
205 return SCSI_DH_RES_TEMP_UNAVAIL; 205 return SCSI_DH_RES_TEMP_UNAVAIL;
206 206
207 req->cmd_type = REQ_TYPE_BLOCK_PC; 207 req->cmd_type = REQ_TYPE_BLOCK_PC;
208 req->cmd_flags |= REQ_FAILFAST; 208 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
209 REQ_FAILFAST_DRIVER;
209 req->cmd_len = COMMAND_SIZE(START_STOP); 210 req->cmd_len = COMMAND_SIZE(START_STOP);
210 memset(req->cmd, 0, MAX_COMMAND_SIZE);
211 req->cmd[0] = START_STOP; 211 req->cmd[0] = START_STOP;
212 req->cmd[4] = 1; /* Start spin cycle */ 212 req->cmd[4] = 1; /* Start spin cycle */
213 req->timeout = HP_SW_TIMEOUT; 213 req->timeout = HP_SW_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 6e2f130d56de..3d50cabca7ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,10 +225,9 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 memset(rq->cmd, 0, BLK_MAX_CDB);
229
230 rq->cmd_type = REQ_TYPE_BLOCK_PC; 228 rq->cmd_type = REQ_TYPE_BLOCK_PC;
231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 229 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
230 REQ_FAILFAST_DRIVER;
232 rq->retries = RDAC_RETRIES; 231 rq->retries = RDAC_RETRIES;
233 rq->timeout = RDAC_TIMEOUT; 232 rq->timeout = RDAC_TIMEOUT;
234 233
@@ -402,6 +401,9 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
402 } 401 }
403 } 402 }
404 403
404 if (h->lun_state == RDAC_LUN_UNOWNED)
405 h->state = RDAC_STATE_PASSIVE;
406
405 return err; 407 return err;
406} 408}
407 409
@@ -590,6 +592,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
590 {"STK", "OPENstorage D280"}, 592 {"STK", "OPENstorage D280"},
591 {"SUN", "CSM200_R"}, 593 {"SUN", "CSM200_R"},
592 {"SUN", "LCSM100_F"}, 594 {"SUN", "LCSM100_F"},
595 {"DELL", "MD3000"},
596 {"DELL", "MD3000i"},
593 {NULL, NULL}, 597 {NULL, NULL},
594}; 598};
595 599
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 1fe0901e8119..6194ed5d02c4 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -271,7 +271,7 @@ rebuild_sys_tab:
271 pHba->initialized = TRUE; 271 pHba->initialized = TRUE;
272 pHba->state &= ~DPTI_STATE_RESET; 272 pHba->state &= ~DPTI_STATE_RESET;
273 if (adpt_sysfs_class) { 273 if (adpt_sysfs_class) {
274 struct device *dev = device_create_drvdata(adpt_sysfs_class, 274 struct device *dev = device_create(adpt_sysfs_class,
275 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL, 275 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
276 "dpti%d", pHba->unit); 276 "dpti%d", pHba->unit);
277 if (IS_ERR(dev)) { 277 if (IS_ERR(dev)) {
@@ -2445,7 +2445,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2445 hba_status = detailed_status >> 8; 2445 hba_status = detailed_status >> 8;
2446 2446
2447 // calculate resid for sg 2447 // calculate resid for sg
2448 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5)); 2448 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2449 2449
2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2451 2451
@@ -2456,7 +2456,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2456 case I2O_SCSI_DSC_SUCCESS: 2456 case I2O_SCSI_DSC_SUCCESS:
2457 cmd->result = (DID_OK << 16); 2457 cmd->result = (DID_OK << 16);
2458 // handle underflow 2458 // handle underflow
2459 if(readl(reply+5) < cmd->underflow ) { 2459 if (readl(reply+20) < cmd->underflow) {
2460 cmd->result = (DID_ERROR <<16); 2460 cmd->result = (DID_ERROR <<16);
2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); 2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2462 } 2462 }
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index bb43a1388188..28e22acf87ea 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -521,7 +521,8 @@ struct esp {
521 521
522 struct completion *eh_reset; 522 struct completion *eh_reset;
523 523
524 struct sbus_dma *dma; 524 void *dma;
525 int dmarev;
525}; 526};
526 527
527/* A front-end driver for the ESP chip should do the following in 528/* A front-end driver for the ESP chip should do the following in
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index c33bcb284df7..56f4e6bffc21 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -290,9 +290,11 @@
290#include <scsi/scsi_ioctl.h> 290#include <scsi/scsi_ioctl.h>
291#include "fdomain.h" 291#include "fdomain.h"
292 292
293#ifndef PCMCIA
293MODULE_AUTHOR("Rickard E. Faith"); 294MODULE_AUTHOR("Rickard E. Faith");
294MODULE_DESCRIPTION("Future domain SCSI driver"); 295MODULE_DESCRIPTION("Future domain SCSI driver");
295MODULE_LICENSE("GPL"); 296MODULE_LICENSE("GPL");
297#endif
296 298
297 299
298#define VERSION "$Revision: 5.51 $" 300#define VERSION "$Revision: 5.51 $"
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692b..c387c15a2128 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
464 464
465 /* use request field to save the ptr. to completion struct. */ 465 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 466 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ;
468 scp->cmd_len = 12; 467 scp->cmd_len = 12;
469 scp->cmnd = cmnd; 468 scp->cmnd = cmnd;
470 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
1995 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1996 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1997 ulong flags; 1996 ulong flags;
1998 unchar b, t;
1999 1997
2000 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
2001 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
2002 2000
2003 if (!cmndinfo->internal_command) { 2001 if (!cmndinfo->internal_command)
2004 cmndinfo->priority = priority; 2002 cmndinfo->priority = priority;
2005 b = scp->device->channel;
2006 t = scp->device->id;
2007 if (priority >= DEFAULT_PRI) {
2008 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2009 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2010 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2011 cmndinfo->timeout = gdth_update_timeout(scp, 0);
2012 }
2013 }
2014 }
2015 2003
2016 if (ha->req_first==NULL) { 2004 if (ha->req_first==NULL) {
2017 ha->req_first = scp; /* queue was empty */ 2005 ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
3899 return ((const char *)ha->binfo.type_string); 3887 return ((const char *)ha->binfo.type_string);
3900} 3888}
3901 3889
3890static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t;
3895 ulong flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3899 b = scp->device->channel;
3900 t = scp->device->id;
3901
3902 /*
3903 * We don't really honor the command timeout, but we try to
3904 * honor 6 times of the actual command timeout! So reset the
3905 * timer if this is less than 6th timeout on this command!
3906 */
3907 if (++cmndinfo->timeout_count < 6)
3908 retval = BLK_EH_RESET_TIMER;
3909
3910 /* Reset the timeout if it is locked IO */
3911 spin_lock_irqsave(&ha->smp_lock, flags);
3912 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3913 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3914 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3915 retval = BLK_EH_RESET_TIMER;
3916 }
3917 spin_unlock_irqrestore(&ha->smp_lock, flags);
3918
3919 return retval;
3920}
3921
3922
3902static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 3923static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3903{ 3924{
3904 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
3992 BUG_ON(!cmndinfo); 4013 BUG_ON(!cmndinfo);
3993 4014
3994 scp->scsi_done = done; 4015 scp->scsi_done = done;
3995 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4016 cmndinfo->timeout_count = 0;
3996 cmndinfo->priority = DEFAULT_PRI; 4017 cmndinfo->priority = DEFAULT_PRI;
3997 4018
3998 return __gdth_queuecommand(ha, scp, cmndinfo); 4019 return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
4096 ha->hdr[j].lock = 1; 4117 ha->hdr[j].lock = 1;
4097 spin_unlock_irqrestore(&ha->smp_lock, flags); 4118 spin_unlock_irqrestore(&ha->smp_lock, flags);
4098 gdth_wait_completion(ha, ha->bus_cnt, j); 4119 gdth_wait_completion(ha, ha->bus_cnt, j);
4099 gdth_stop_timeout(ha, ha->bus_cnt, j);
4100 } else { 4120 } else {
4101 spin_lock_irqsave(&ha->smp_lock, flags); 4121 spin_lock_irqsave(&ha->smp_lock, flags);
4102 ha->hdr[j].lock = 0; 4122 ha->hdr[j].lock = 0;
4103 spin_unlock_irqrestore(&ha->smp_lock, flags); 4123 spin_unlock_irqrestore(&ha->smp_lock, flags);
4104 gdth_start_timeout(ha, ha->bus_cnt, j);
4105 gdth_next(ha); 4124 gdth_next(ha);
4106 } 4125 }
4107 } 4126 }
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4539 spin_lock_irqsave(&ha->smp_lock, flags); 4558 spin_lock_irqsave(&ha->smp_lock, flags);
4540 ha->raw[i].lock = 1; 4559 ha->raw[i].lock = 1;
4541 spin_unlock_irqrestore(&ha->smp_lock, flags); 4560 spin_unlock_irqrestore(&ha->smp_lock, flags);
4542 for (j = 0; j < ha->tid_cnt; ++j) { 4561 for (j = 0; j < ha->tid_cnt; ++j)
4543 gdth_wait_completion(ha, i, j); 4562 gdth_wait_completion(ha, i, j);
4544 gdth_stop_timeout(ha, i, j);
4545 }
4546 } else { 4563 } else {
4547 spin_lock_irqsave(&ha->smp_lock, flags); 4564 spin_lock_irqsave(&ha->smp_lock, flags);
4548 ha->raw[i].lock = 0; 4565 ha->raw[i].lock = 0;
4549 spin_unlock_irqrestore(&ha->smp_lock, flags); 4566 spin_unlock_irqrestore(&ha->smp_lock, flags);
4550 for (j = 0; j < ha->tid_cnt; ++j) { 4567 for (j = 0; j < ha->tid_cnt; ++j)
4551 gdth_start_timeout(ha, i, j);
4552 gdth_next(ha); 4568 gdth_next(ha);
4553 }
4554 } 4569 }
4555 } 4570 }
4556 break; 4571 break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
4644 .slave_configure = gdth_slave_configure, 4659 .slave_configure = gdth_slave_configure,
4645 .bios_param = gdth_bios_param, 4660 .bios_param = gdth_bios_param,
4646 .proc_info = gdth_proc_info, 4661 .proc_info = gdth_proc_info,
4662 .eh_timed_out = gdth_timed_out,
4647 .proc_name = "gdth", 4663 .proc_name = "gdth",
4648 .can_queue = GDTH_MAXCMDS, 4664 .can_queue = GDTH_MAXCMDS,
4649 .this_id = -1, 4665 .this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727cf..1646444e9bd5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 unchar priority;
919 int timeout; 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 ushort status;
922 ulong32 info; 922 ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26aec..59349a316e13 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
748 } 748 }
749 spin_unlock_irqrestore(&ha->smp_lock, flags); 749 spin_unlock_irqrestore(&ha->smp_lock, flags);
750} 750}
751
752static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
753{
754 ulong flags;
755 Scsi_Cmnd *scp;
756 unchar b, t;
757
758 spin_lock_irqsave(&ha->smp_lock, flags);
759
760 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
761 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
762 if (!cmndinfo->internal_command) {
763 b = scp->device->channel;
764 t = scp->device->id;
765 if (t == (unchar)id && b == (unchar)busnum) {
766 TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
767 cmndinfo->timeout = gdth_update_timeout(scp, 0);
768 }
769 }
770 }
771 spin_unlock_irqrestore(&ha->smp_lock, flags);
772}
773
774static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
775{
776 ulong flags;
777 Scsi_Cmnd *scp;
778 unchar b, t;
779
780 spin_lock_irqsave(&ha->smp_lock, flags);
781
782 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
783 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
784 if (!cmndinfo->internal_command) {
785 b = scp->device->channel;
786 t = scp->device->id;
787 if (t == (unchar)id && b == (unchar)busnum) {
788 TRACE2(("gdth_start_timeout(): update_timeout()\n"));
789 gdth_update_timeout(scp, cmndinfo->timeout);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ha->smp_lock, flags);
794}
795
796static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
797{
798 int oldto;
799
800 oldto = scp->timeout_per_command;
801 scp->timeout_per_command = timeout;
802
803 if (timeout == 0) {
804 del_timer(&scp->eh_timeout);
805 scp->eh_timeout.data = (unsigned long) NULL;
806 scp->eh_timeout.expires = 0;
807 } else {
808 if (scp->eh_timeout.data != (unsigned long) NULL)
809 del_timer(&scp->eh_timeout);
810 scp->eh_timeout.data = (unsigned long) scp;
811 scp->eh_timeout.expires = jiffies + timeout;
812 add_timer(&scp->eh_timeout);
813 }
814
815 return oldto;
816}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36e..9b900cc9ebe8 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 ulong64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
24static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
25static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
26 23
27#endif 24#endif
28 25
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1d..3fdbb13e80a8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) 464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
465{ 465{
466 struct device *cdev; 466 struct device *cdev;
467 struct Scsi_Host *shost = ERR_PTR(-ENXIO); 467 struct Scsi_Host *shost = NULL;
468 468
469 cdev = class_find_device(&shost_class, NULL, &hostnum, 469 cdev = class_find_device(&shost_class, NULL, &hostnum,
470 __scsi_host_match); 470 __scsi_host_match);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e0b7c8eb32e..7650707a40de 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2031 spin_unlock_irqrestore(shost->host_lock, flags); 2031 spin_unlock_irqrestore(shost->host_lock, flags);
2032 } else 2032 } else
2033 ibmvfc_issue_fc_host_lip(shost); 2033 ibmvfc_issue_fc_host_lip(shost);
2034
2035 scsi_target_unblock(&rport->dev);
2036 LEAVE; 2034 LEAVE;
2037} 2035}
2038 2036
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6e..87e09f35d3d4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
756 init_event_struct(evt_struct, 756 init_event_struct(evt_struct,
757 handle_cmd_rsp, 757 handle_cmd_rsp,
758 VIOSRP_SRP_FORMAT, 758 VIOSRP_SRP_FORMAT,
759 cmnd->timeout_per_command/HZ); 759 cmnd->request->timeout/HZ);
760 760
761 evt_struct->cmnd = cmnd; 761 evt_struct->cmnd = cmnd;
762 evt_struct->cmnd_done = done; 762 evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc45..2370fd82ebfe 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -40,7 +40,6 @@
40#include <linux/ioport.h> 40#include <linux/ioport.h>
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/errno.h> 42#include <linux/errno.h>
43#include <linux/hdreg.h>
44#include <linux/slab.h> 43#include <linux/slab.h>
45#include <linux/ide.h> 44#include <linux/ide.h>
46#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
@@ -83,7 +82,6 @@ typedef struct ide_scsi_obj {
83 struct gendisk *disk; 82 struct gendisk *disk;
84 struct Scsi_Host *host; 83 struct Scsi_Host *host;
85 84
86 struct ide_atapi_pc *pc; /* Current packet command */
87 unsigned long transform; /* SCSI cmd translation layer */ 85 unsigned long transform; /* SCSI cmd translation layer */
88 unsigned long log; /* log flags */ 86 unsigned long log; /* log flags */
89} idescsi_scsi_t; 87} idescsi_scsi_t;
@@ -131,50 +129,6 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
131 return scsihost_to_idescsi(ide_drive->driver_data); 129 return scsihost_to_idescsi(ide_drive->driver_data);
132} 130}
133 131
134/*
135 * PIO data transfer routine using the scatter gather table.
136 */
137static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
138 unsigned int bcount, int write)
139{
140 ide_hwif_t *hwif = drive->hwif;
141 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
142 xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
143 char *buf;
144 int count;
145
146 while (bcount) {
147 count = min(pc->sg->length - pc->b_count, bcount);
148 if (PageHighMem(sg_page(pc->sg))) {
149 unsigned long flags;
150
151 local_irq_save(flags);
152 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
153 pc->sg->offset;
154 xf(drive, NULL, buf + pc->b_count, count);
155 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
156 local_irq_restore(flags);
157 } else {
158 buf = sg_virt(pc->sg);
159 xf(drive, NULL, buf + pc->b_count, count);
160 }
161 bcount -= count; pc->b_count += count;
162 if (pc->b_count == pc->sg->length) {
163 if (!--pc->sg_cnt)
164 break;
165 pc->sg = sg_next(pc->sg);
166 pc->b_count = 0;
167 }
168 }
169
170 if (bcount) {
171 printk(KERN_ERR "%s: scatter gather table too small, %s\n",
172 drive->name, write ? "padding with zeros"
173 : "discarding data");
174 ide_pad_transfer(drive, write, bcount);
175 }
176}
177
178static void ide_scsi_hex_dump(u8 *data, int len) 132static void ide_scsi_hex_dump(u8 *data, int len)
179{ 133{
180 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0); 134 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0);
@@ -182,10 +136,10 @@ static void ide_scsi_hex_dump(u8 *data, int len)
182 136
183static int idescsi_end_request(ide_drive_t *, int, int); 137static int idescsi_end_request(ide_drive_t *, int, int);
184 138
185static void ide_scsi_callback(ide_drive_t *drive) 139static void ide_scsi_callback(ide_drive_t *drive, int dsc)
186{ 140{
187 idescsi_scsi_t *scsi = drive_to_idescsi(drive); 141 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
188 struct ide_atapi_pc *pc = scsi->pc; 142 struct ide_atapi_pc *pc = drive->pc;
189 143
190 if (pc->flags & PC_FLAG_TIMEDOUT) 144 if (pc->flags & PC_FLAG_TIMEDOUT)
191 debug_log("%s: got timed out packet %lu at %lu\n", __func__, 145 debug_log("%s: got timed out packet %lu at %lu\n", __func__,
@@ -244,9 +198,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
244{ 198{
245 ide_hwif_t *hwif = drive->hwif; 199 ide_hwif_t *hwif = drive->hwif;
246 200
247 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT)) 201 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
248 /* force an abort */ 202 /* force an abort */
249 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE); 203 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
250 204
251 rq->errors++; 205 rq->errors++;
252 206
@@ -312,49 +266,10 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
312 spin_unlock_irqrestore(host->host_lock, flags); 266 spin_unlock_irqrestore(host->host_lock, flags);
313 kfree(pc); 267 kfree(pc);
314 blk_put_request(rq); 268 blk_put_request(rq);
315 scsi->pc = NULL; 269 drive->pc = NULL;
316 return 0; 270 return 0;
317} 271}
318 272
319static inline unsigned long get_timeout(struct ide_atapi_pc *pc)
320{
321 return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies);
322}
323
324static int idescsi_expiry(ide_drive_t *drive)
325{
326 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
327 struct ide_atapi_pc *pc = scsi->pc;
328
329 debug_log("%s called for %lu at %lu\n", __func__,
330 pc->scsi_cmd->serial_number, jiffies);
331
332 pc->flags |= PC_FLAG_TIMEDOUT;
333
334 return 0; /* we do not want the ide subsystem to retry */
335}
336
337/*
338 * Our interrupt handler.
339 */
340static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
341{
342 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
343 struct ide_atapi_pc *pc = scsi->pc;
344
345 return ide_pc_intr(drive, pc, idescsi_pc_intr, get_timeout(pc),
346 idescsi_expiry, NULL, NULL, NULL,
347 ide_scsi_io_buffers);
348}
349
350static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
351{
352 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
353
354 return ide_transfer_pc(drive, scsi->pc, idescsi_pc_intr,
355 get_timeout(scsi->pc), idescsi_expiry);
356}
357
358static inline int idescsi_set_direction(struct ide_atapi_pc *pc) 273static inline int idescsi_set_direction(struct ide_atapi_pc *pc)
359{ 274{
360 switch (pc->c[0]) { 275 switch (pc->c[0]) {
@@ -397,13 +312,10 @@ static int idescsi_map_sg(ide_drive_t *drive, struct ide_atapi_pc *pc)
397static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive, 312static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
398 struct ide_atapi_pc *pc) 313 struct ide_atapi_pc *pc)
399{ 314{
400 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
401
402 /* Set the current packet command */ 315 /* Set the current packet command */
403 scsi->pc = pc; 316 drive->pc = pc;
404 317
405 return ide_issue_pc(drive, pc, idescsi_transfer_pc, 318 return ide_issue_pc(drive, ide_scsi_get_timeout(pc), ide_scsi_expiry);
406 get_timeout(pc), idescsi_expiry);
407} 319}
408 320
409/* 321/*
@@ -419,7 +331,8 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
419 if (blk_sense_request(rq) || blk_special_request(rq)) { 331 if (blk_sense_request(rq) || blk_special_request(rq)) {
420 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; 332 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
421 333
422 if (drive->using_dma && !idescsi_map_sg(drive, pc)) 334 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) &&
335 idescsi_map_sg(drive, pc) == 0)
423 pc->flags |= PC_FLAG_DMA_OK; 336 pc->flags |= PC_FLAG_DMA_OK;
424 337
425 return idescsi_issue_pc(drive, pc); 338 return idescsi_issue_pc(drive, pc);
@@ -430,21 +343,56 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
430} 343}
431 344
432#ifdef CONFIG_IDE_PROC_FS 345#ifdef CONFIG_IDE_PROC_FS
433static void idescsi_add_settings(ide_drive_t *drive) 346static ide_proc_entry_t idescsi_proc[] = {
347 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
348 { NULL, 0, NULL, NULL }
349};
350
351#define ide_scsi_devset_get(name, field) \
352static int get_##name(ide_drive_t *drive) \
353{ \
354 idescsi_scsi_t *scsi = drive_to_idescsi(drive); \
355 return scsi->field; \
356}
357
358#define ide_scsi_devset_set(name, field) \
359static int set_##name(ide_drive_t *drive, int arg) \
360{ \
361 idescsi_scsi_t *scsi = drive_to_idescsi(drive); \
362 scsi->field = arg; \
363 return 0; \
364}
365
366#define ide_scsi_devset_rw_field(_name, _field) \
367ide_scsi_devset_get(_name, _field); \
368ide_scsi_devset_set(_name, _field); \
369IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name);
370
371ide_devset_rw_field(bios_cyl, bios_cyl);
372ide_devset_rw_field(bios_head, bios_head);
373ide_devset_rw_field(bios_sect, bios_sect);
374
375ide_scsi_devset_rw_field(transform, transform);
376ide_scsi_devset_rw_field(log, log);
377
378static const struct ide_proc_devset idescsi_settings[] = {
379 IDE_PROC_DEVSET(bios_cyl, 0, 1023),
380 IDE_PROC_DEVSET(bios_head, 0, 255),
381 IDE_PROC_DEVSET(bios_sect, 0, 63),
382 IDE_PROC_DEVSET(log, 0, 1),
383 IDE_PROC_DEVSET(transform, 0, 3),
384 { 0 },
385};
386
387static ide_proc_entry_t *ide_scsi_proc_entries(ide_drive_t *drive)
434{ 388{
435 idescsi_scsi_t *scsi = drive_to_idescsi(drive); 389 return idescsi_proc;
390}
436 391
437/* 392static const struct ide_proc_devset *ide_scsi_proc_devsets(ide_drive_t *drive)
438 * drive setting name read/write data type min max mul_factor div_factor data pointer set function 393{
439 */ 394 return idescsi_settings;
440 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
441 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
442 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
443 ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL);
444 ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL);
445} 395}
446#else
447static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
448#endif 396#endif
449 397
450/* 398/*
@@ -452,16 +400,16 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
452 */ 400 */
453static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) 401static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
454{ 402{
455 if (drive->id && (drive->id->config & 0x0060) == 0x20)
456 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
457 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); 403 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
458#if IDESCSI_DEBUG_LOG 404#if IDESCSI_DEBUG_LOG
459 set_bit(IDESCSI_LOG_CMD, &scsi->log); 405 set_bit(IDESCSI_LOG_CMD, &scsi->log);
460#endif /* IDESCSI_DEBUG_LOG */ 406#endif /* IDESCSI_DEBUG_LOG */
461 407
462 drive->pc_callback = ide_scsi_callback; 408 drive->pc_callback = ide_scsi_callback;
409 drive->pc_update_buffers = NULL;
410 drive->pc_io_buffers = ide_io_buffers;
463 411
464 idescsi_add_settings(drive); 412 ide_proc_register_driver(drive, scsi->driver);
465} 413}
466 414
467static void ide_scsi_remove(ide_drive_t *drive) 415static void ide_scsi_remove(ide_drive_t *drive)
@@ -481,18 +429,11 @@ static void ide_scsi_remove(ide_drive_t *drive)
481 429
482 ide_scsi_put(scsi); 430 ide_scsi_put(scsi);
483 431
484 drive->scsi = 0; 432 drive->dev_flags &= ~IDE_DFLAG_SCSI;
485} 433}
486 434
487static int ide_scsi_probe(ide_drive_t *); 435static int ide_scsi_probe(ide_drive_t *);
488 436
489#ifdef CONFIG_IDE_PROC_FS
490static ide_proc_entry_t idescsi_proc[] = {
491 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
492 { NULL, 0, NULL, NULL }
493};
494#endif
495
496static ide_driver_t idescsi_driver = { 437static ide_driver_t idescsi_driver = {
497 .gen_driver = { 438 .gen_driver = {
498 .owner = THIS_MODULE, 439 .owner = THIS_MODULE,
@@ -502,50 +443,43 @@ static ide_driver_t idescsi_driver = {
502 .probe = ide_scsi_probe, 443 .probe = ide_scsi_probe,
503 .remove = ide_scsi_remove, 444 .remove = ide_scsi_remove,
504 .version = IDESCSI_VERSION, 445 .version = IDESCSI_VERSION,
505 .media = ide_scsi,
506 .supports_dsc_overlap = 0,
507 .do_request = idescsi_do_request, 446 .do_request = idescsi_do_request,
508 .end_request = idescsi_end_request, 447 .end_request = idescsi_end_request,
509 .error = idescsi_atapi_error, 448 .error = idescsi_atapi_error,
510#ifdef CONFIG_IDE_PROC_FS 449#ifdef CONFIG_IDE_PROC_FS
511 .proc = idescsi_proc, 450 .proc_entries = ide_scsi_proc_entries,
451 .proc_devsets = ide_scsi_proc_devsets,
512#endif 452#endif
513}; 453};
514 454
515static int idescsi_ide_open(struct inode *inode, struct file *filp) 455static int idescsi_ide_open(struct block_device *bdev, fmode_t mode)
516{ 456{
517 struct gendisk *disk = inode->i_bdev->bd_disk; 457 struct ide_scsi_obj *scsi = ide_scsi_get(bdev->bd_disk);
518 struct ide_scsi_obj *scsi;
519 458
520 if (!(scsi = ide_scsi_get(disk))) 459 if (!scsi)
521 return -ENXIO; 460 return -ENXIO;
522 461
523 return 0; 462 return 0;
524} 463}
525 464
526static int idescsi_ide_release(struct inode *inode, struct file *filp) 465static int idescsi_ide_release(struct gendisk *disk, fmode_t mode)
527{ 466{
528 struct gendisk *disk = inode->i_bdev->bd_disk; 467 ide_scsi_put(ide_scsi_g(disk));
529 struct ide_scsi_obj *scsi = ide_scsi_g(disk);
530
531 ide_scsi_put(scsi);
532
533 return 0; 468 return 0;
534} 469}
535 470
536static int idescsi_ide_ioctl(struct inode *inode, struct file *file, 471static int idescsi_ide_ioctl(struct block_device *bdev, fmode_t mode,
537 unsigned int cmd, unsigned long arg) 472 unsigned int cmd, unsigned long arg)
538{ 473{
539 struct block_device *bdev = inode->i_bdev;
540 struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk); 474 struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk);
541 return generic_ide_ioctl(scsi->drive, file, bdev, cmd, arg); 475 return generic_ide_ioctl(scsi->drive, bdev, cmd, arg);
542} 476}
543 477
544static struct block_device_operations idescsi_ops = { 478static struct block_device_operations idescsi_ops = {
545 .owner = THIS_MODULE, 479 .owner = THIS_MODULE,
546 .open = idescsi_ide_open, 480 .open = idescsi_ide_open,
547 .release = idescsi_ide_release, 481 .release = idescsi_ide_release,
548 .ioctl = idescsi_ide_ioctl, 482 .locked_ioctl = idescsi_ide_ioctl,
549}; 483};
550 484
551static int idescsi_slave_configure(struct scsi_device * sdp) 485static int idescsi_slave_configure(struct scsi_device * sdp)
@@ -612,7 +546,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); 546 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
613 pc->scsi_cmd = cmd; 547 pc->scsi_cmd = cmd;
614 pc->done = done; 548 pc->done = done;
615 pc->timeout = jiffies + cmd->timeout_per_command; 549 pc->timeout = jiffies + cmd->request->timeout;
616 550
617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 551 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 552 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -647,6 +581,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
647 int busy; 581 int busy;
648 int ret = FAILED; 582 int ret = FAILED;
649 583
584 struct ide_atapi_pc *pc;
585
650 /* In idescsi_eh_abort we try to gently pry our command from the ide subsystem */ 586 /* In idescsi_eh_abort we try to gently pry our command from the ide subsystem */
651 587
652 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 588 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
@@ -667,26 +603,27 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
667 spin_lock_irq(&ide_lock); 603 spin_lock_irq(&ide_lock);
668 604
669 /* If there is no pc running we're done (our interrupt took care of it) */ 605 /* If there is no pc running we're done (our interrupt took care of it) */
670 if (!scsi->pc) { 606 pc = drive->pc;
607 if (pc == NULL) {
671 ret = SUCCESS; 608 ret = SUCCESS;
672 goto ide_unlock; 609 goto ide_unlock;
673 } 610 }
674 611
675 /* It's somewhere in flight. Does ide subsystem agree? */ 612 /* It's somewhere in flight. Does ide subsystem agree? */
676 if (scsi->pc->scsi_cmd->serial_number == cmd->serial_number && !busy && 613 if (pc->scsi_cmd->serial_number == cmd->serial_number && !busy &&
677 elv_queue_empty(drive->queue) && HWGROUP(drive)->rq != scsi->pc->rq) { 614 elv_queue_empty(drive->queue) && HWGROUP(drive)->rq != pc->rq) {
678 /* 615 /*
679 * FIXME - not sure this condition can ever occur 616 * FIXME - not sure this condition can ever occur
680 */ 617 */
681 printk (KERN_ERR "ide-scsi: cmd aborted!\n"); 618 printk (KERN_ERR "ide-scsi: cmd aborted!\n");
682 619
683 if (blk_sense_request(scsi->pc->rq)) 620 if (blk_sense_request(pc->rq))
684 kfree(scsi->pc->buf); 621 kfree(pc->buf);
685 /* we need to call blk_put_request twice. */ 622 /* we need to call blk_put_request twice. */
686 blk_put_request(scsi->pc->rq); 623 blk_put_request(pc->rq);
687 blk_put_request(scsi->pc->rq); 624 blk_put_request(pc->rq);
688 kfree(scsi->pc); 625 kfree(pc);
689 scsi->pc = NULL; 626 drive->pc = NULL;
690 627
691 ret = SUCCESS; 628 ret = SUCCESS;
692 } 629 }
@@ -708,6 +645,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
708 int ready = 0; 645 int ready = 0;
709 int ret = SUCCESS; 646 int ret = SUCCESS;
710 647
648 struct ide_atapi_pc *pc;
649
711 /* In idescsi_eh_reset we forcefully remove the command from the ide subsystem and reset the device. */ 650 /* In idescsi_eh_reset we forcefully remove the command from the ide subsystem and reset the device. */
712 651
713 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 652 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
@@ -722,7 +661,9 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
722 spin_lock_irq(cmd->device->host->host_lock); 661 spin_lock_irq(cmd->device->host->host_lock);
723 spin_lock(&ide_lock); 662 spin_lock(&ide_lock);
724 663
725 if (!scsi->pc || (req = scsi->pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) { 664 pc = drive->pc;
665
666 if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) {
726 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n"); 667 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
727 spin_unlock(&ide_lock); 668 spin_unlock(&ide_lock);
728 spin_unlock_irq(cmd->device->host->host_lock); 669 spin_unlock_irq(cmd->device->host->host_lock);
@@ -733,9 +674,9 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
733 if (__blk_end_request(req, -EIO, 0)) 674 if (__blk_end_request(req, -EIO, 0))
734 BUG(); 675 BUG();
735 if (blk_sense_request(req)) 676 if (blk_sense_request(req))
736 kfree(scsi->pc->buf); 677 kfree(pc->buf);
737 kfree(scsi->pc); 678 kfree(pc);
738 scsi->pc = NULL; 679 drive->pc = NULL;
739 blk_put_request(req); 680 blk_put_request(req);
740 681
741 /* now nuke the drive queue */ 682 /* now nuke the drive queue */
@@ -811,6 +752,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
811 struct gendisk *g; 752 struct gendisk *g;
812 static int warned; 753 static int warned;
813 int err = -ENOMEM; 754 int err = -ENOMEM;
755 u16 last_lun;
814 756
815 if (!warned && drive->media == ide_cdrom) { 757 if (!warned && drive->media == ide_cdrom) {
816 printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n"); 758 printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n");
@@ -821,12 +763,11 @@ static int ide_scsi_probe(ide_drive_t *drive)
821 return -ENODEV; 763 return -ENODEV;
822 764
823 if (!strstr("ide-scsi", drive->driver_req) || 765 if (!strstr("ide-scsi", drive->driver_req) ||
824 !drive->present ||
825 drive->media == ide_disk || 766 drive->media == ide_disk ||
826 !(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t)))) 767 !(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t))))
827 return -ENODEV; 768 return -ENODEV;
828 769
829 drive->scsi = 1; 770 drive->dev_flags |= IDE_DFLAG_SCSI;
830 771
831 g = alloc_disk(1 << PARTN_BITS); 772 g = alloc_disk(1 << PARTN_BITS);
832 if (!g) 773 if (!g)
@@ -836,12 +777,12 @@ static int ide_scsi_probe(ide_drive_t *drive)
836 777
837 host->max_id = 1; 778 host->max_id = 1;
838 779
839 if (drive->id->last_lun) 780 last_lun = drive->id[ATA_ID_LAST_LUN];
840 debug_log("%s: id->last_lun=%u\n", drive->name, 781 if (last_lun)
841 drive->id->last_lun); 782 debug_log("%s: last_lun=%u\n", drive->name, last_lun);
842 783
843 if ((drive->id->last_lun & 0x7) != 7) 784 if ((last_lun & 7) != 7)
844 host->max_lun = (drive->id->last_lun & 0x7) + 1; 785 host->max_lun = (last_lun & 7) + 1;
845 else 786 else
846 host->max_lun = 1; 787 host->max_lun = 1;
847 788
@@ -852,7 +793,6 @@ static int ide_scsi_probe(ide_drive_t *drive)
852 idescsi->host = host; 793 idescsi->host = host;
853 idescsi->disk = g; 794 idescsi->disk = g;
854 g->private_data = &idescsi->driver; 795 g->private_data = &idescsi->driver;
855 ide_proc_register_driver(drive, &idescsi_driver);
856 err = 0; 796 err = 0;
857 idescsi_setup(drive, idescsi); 797 idescsi_setup(drive, idescsi);
858 g->fops = &idescsi_ops; 798 g->fops = &idescsi_ops;
@@ -868,7 +808,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
868 808
869 put_disk(g); 809 put_disk(g);
870out_host_put: 810out_host_put:
871 drive->scsi = 0; 811 drive->dev_flags &= ~IDE_DFLAG_SCSI;
872 scsi_host_put(host); 812 scsi_host_put(host);
873 return err; 813 return err;
874} 814}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a6554425..ded854a6dd35 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2456,20 +2456,14 @@ static ssize_t ipr_read_trace(struct kobject *kobj,
2456 struct Scsi_Host *shost = class_to_shost(dev); 2456 struct Scsi_Host *shost = class_to_shost(dev);
2457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 2457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2458 unsigned long lock_flags = 0; 2458 unsigned long lock_flags = 0;
2459 int size = IPR_TRACE_SIZE; 2459 ssize_t ret;
2460 char *src = (char *)ioa_cfg->trace;
2461
2462 if (off > size)
2463 return 0;
2464 if (off + count > size) {
2465 size -= off;
2466 count = size;
2467 }
2468 2460
2469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2461 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2470 memcpy(buf, &src[off], count); 2462 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
2463 IPR_TRACE_SIZE);
2471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2464 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2472 return count; 2465
2466 return ret;
2473} 2467}
2474 2468
2475static struct bin_attribute ipr_trace_attr = { 2469static struct bin_attribute ipr_trace_attr = {
@@ -3670,7 +3664,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3670 sdev->no_uld_attach = 1; 3664 sdev->no_uld_attach = 1;
3671 } 3665 }
3672 if (ipr_is_vset_device(res)) { 3666 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT; 3667 blk_queue_rq_timeout(sdev->request_queue,
3668 IPR_VSET_RW_TIMEOUT);
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3669 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 } 3670 }
3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3671 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
@@ -7858,7 +7853,6 @@ static struct pci_driver ipr_driver = {
7858 .remove = ipr_remove, 7853 .remove = ipr_remove,
7859 .shutdown = ipr_shutdown, 7854 .shutdown = ipr_shutdown,
7860 .err_handler = &ipr_err_handler, 7855 .err_handler = &ipr_err_handler,
7861 .dynids.use_driver_data = 1
7862}; 7856};
7863 7857
7864/** 7858/**
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41df..ef683f0d2b5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3818 scb->cmd.dcdb.segment_4G = 0; 3818 scb->cmd.dcdb.segment_4G = 0;
3819 scb->cmd.dcdb.enhanced_sg = 0; 3819 scb->cmd.dcdb.enhanced_sg = 0;
3820 3820
3821 TimeOut = scb->scsi_cmd->timeout_per_command; 3821 TimeOut = scb->scsi_cmd->request->timeout;
3822 3822
3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ 3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
3824 if (!scb->sg_len) { 3824 if (!scb->sg_len) {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2a2f0094570f..ed6c54cae7b1 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
523} 523}
524 524
525/** 525/**
526 * iscsi_data_rsp - SCSI Data-In Response processing 526 * iscsi_data_in - SCSI Data-In Response processing
527 * @conn: iscsi connection 527 * @conn: iscsi connection
528 * @task: scsi command task 528 * @task: scsi command task
529 **/ 529 **/
530static int 530static int
531iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) 531iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
532{ 532{
533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
534 struct iscsi_tcp_task *tcp_task = task->dd_data; 534 struct iscsi_tcp_task *tcp_task = task->dd_data;
535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
536 struct iscsi_session *session = conn->session;
537 struct scsi_cmnd *sc = task->sc;
538 int datasn = be32_to_cpu(rhdr->datasn); 536 int datasn = be32_to_cpu(rhdr->datasn);
539 unsigned total_in_length = scsi_in(sc)->length; 537 unsigned total_in_length = scsi_in(task->sc)->length;
540 538
541 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 539 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
542 if (tcp_conn->in.datalen == 0) 540 if (tcp_conn->in.datalen == 0)
543 return 0; 541 return 0;
544 542
@@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
558 return ISCSI_ERR_DATA_OFFSET; 556 return ISCSI_ERR_DATA_OFFSET;
559 } 557 }
560 558
561 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
562 sc->result = (DID_OK << 16) | rhdr->cmd_status;
563 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
564 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
565 ISCSI_FLAG_DATA_OVERFLOW)) {
566 int res_count = be32_to_cpu(rhdr->residual_count);
567
568 if (res_count > 0 &&
569 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
570 res_count <= total_in_length))
571 scsi_in(sc)->resid = res_count;
572 else
573 sc->result = (DID_BAD_TARGET << 16) |
574 rhdr->cmd_status;
575 }
576 }
577
578 conn->datain_pdus_cnt++; 559 conn->datain_pdus_cnt++;
579 return 0; 560 return 0;
580} 561}
@@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
774 if (!task) 755 if (!task)
775 rc = ISCSI_ERR_BAD_ITT; 756 rc = ISCSI_ERR_BAD_ITT;
776 else 757 else
777 rc = iscsi_data_rsp(conn, task); 758 rc = iscsi_data_in(conn, task);
778 if (rc) { 759 if (rc) {
779 spin_unlock(&conn->session->lock); 760 spin_unlock(&conn->session->lock);
780 break; 761 break;
@@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
998 979
999error: 980error:
1000 debug_tcp("Error receiving PDU, errno=%d\n", rc); 981 debug_tcp("Error receiving PDU, errno=%d\n", rc);
1001 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 982 iscsi_conn_failure(conn, rc);
1002 return 0; 983 return 0;
1003} 984}
1004 985
@@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn)
1117 1098
1118 while (1) { 1099 while (1) {
1119 rc = iscsi_tcp_xmit_segment(tcp_conn, segment); 1100 rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
1120 if (rc < 0) 1101 if (rc < 0) {
1102 rc = ISCSI_ERR_XMIT_FAILED;
1121 goto error; 1103 goto error;
1104 }
1122 if (rc == 0) 1105 if (rc == 0)
1123 break; 1106 break;
1124 1107
@@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn)
1127 if (segment->total_copied >= segment->total_size) { 1110 if (segment->total_copied >= segment->total_size) {
1128 if (segment->done != NULL) { 1111 if (segment->done != NULL) {
1129 rc = segment->done(tcp_conn, segment); 1112 rc = segment->done(tcp_conn, segment);
1130 if (rc < 0) 1113 if (rc != 0)
1131 goto error; 1114 goto error;
1132 } 1115 }
1133 } 1116 }
@@ -1142,8 +1125,8 @@ error:
1142 /* Transmit error. We could initiate error recovery 1125 /* Transmit error. We could initiate error recovery
1143 * here. */ 1126 * here. */
1144 debug_tcp("Error sending PDU, errno=%d\n", rc); 1127 debug_tcp("Error sending PDU, errno=%d\n", rc);
1145 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1128 iscsi_conn_failure(conn, rc);
1146 return rc; 1129 return -EIO;
1147} 1130}
1148 1131
1149/** 1132/**
@@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1904 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1887 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1905 1888
1906 iscsi_r2tpool_free(cls_session->dd_data); 1889 iscsi_r2tpool_free(cls_session->dd_data);
1890 iscsi_session_teardown(cls_session);
1907 1891
1908 iscsi_host_remove(shost); 1892 iscsi_host_remove(shost);
1909 iscsi_host_free(shost); 1893 iscsi_host_free(shost);
@@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = {
1927 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 1911 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
1928 .eh_abort_handler = iscsi_eh_abort, 1912 .eh_abort_handler = iscsi_eh_abort,
1929 .eh_device_reset_handler= iscsi_eh_device_reset, 1913 .eh_device_reset_handler= iscsi_eh_device_reset,
1930 .eh_host_reset_handler = iscsi_eh_host_reset, 1914 .eh_target_reset_handler= iscsi_eh_target_reset,
1931 .use_clustering = DISABLE_CLUSTERING, 1915 .use_clustering = DISABLE_CLUSTERING,
1932 .slave_configure = iscsi_tcp_slave_configure, 1916 .slave_configure = iscsi_tcp_slave_configure,
1933 .proc_name = "iscsi_tcp", 1917 .proc_name = "iscsi_tcp",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b34..801c7cf54d2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -404,11 +404,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
404 conn->session->queued_cmdsn--; 404 conn->session->queued_cmdsn--;
405 else 405 else
406 conn->session->tt->cleanup_task(conn, task); 406 conn->session->tt->cleanup_task(conn, task);
407 /*
408 * Check if cleanup_task dropped the lock and the command completed,
409 */
410 if (!task->sc)
411 return;
412 407
413 sc->result = err; 408 sc->result = err;
414 if (!scsi_bidi_cmnd(sc)) 409 if (!scsi_bidi_cmnd(sc))
@@ -633,6 +628,40 @@ out:
633 __iscsi_put_task(task); 628 __iscsi_put_task(task);
634} 629}
635 630
631/**
632 * iscsi_data_in_rsp - SCSI Data-In Response processing
633 * @conn: iscsi connection
634 * @hdr: iscsi pdu
635 * @task: scsi command task
636 **/
637static void
638iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
639 struct iscsi_task *task)
640{
641 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
642 struct scsi_cmnd *sc = task->sc;
643
644 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
645 return;
646
647 sc->result = (DID_OK << 16) | rhdr->cmd_status;
648 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
649 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
650 ISCSI_FLAG_DATA_OVERFLOW)) {
651 int res_count = be32_to_cpu(rhdr->residual_count);
652
653 if (res_count > 0 &&
654 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
655 res_count <= scsi_in(sc)->length))
656 scsi_in(sc)->resid = res_count;
657 else
658 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
659 }
660
661 conn->scsirsp_pdus_cnt++;
662 __iscsi_put_task(task);
663}
664
636static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 665static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
637{ 666{
638 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; 667 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
@@ -818,12 +847,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
818 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); 847 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
819 break; 848 break;
820 case ISCSI_OP_SCSI_DATA_IN: 849 case ISCSI_OP_SCSI_DATA_IN:
821 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 850 iscsi_data_in_rsp(conn, hdr, task);
822 conn->scsirsp_pdus_cnt++;
823 iscsi_update_cmdsn(session,
824 (struct iscsi_nopin*) hdr);
825 __iscsi_put_task(task);
826 }
827 break; 851 break;
828 case ISCSI_OP_LOGOUT_RSP: 852 case ISCSI_OP_LOGOUT_RSP:
829 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 853 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -954,6 +978,38 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
954} 978}
955EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); 979EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
956 980
981void iscsi_session_failure(struct iscsi_cls_session *cls_session,
982 enum iscsi_err err)
983{
984 struct iscsi_session *session = cls_session->dd_data;
985 struct iscsi_conn *conn;
986 struct device *dev;
987 unsigned long flags;
988
989 spin_lock_irqsave(&session->lock, flags);
990 conn = session->leadconn;
991 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
992 spin_unlock_irqrestore(&session->lock, flags);
993 return;
994 }
995
996 dev = get_device(&conn->cls_conn->dev);
997 spin_unlock_irqrestore(&session->lock, flags);
998 if (!dev)
999 return;
1000 /*
1001 * if the host is being removed bypass the connection
1002 * recovery initialization because we are going to kill
1003 * the session.
1004 */
1005 if (err == ISCSI_ERR_INVALID_HOST)
1006 iscsi_conn_error_event(conn->cls_conn, err);
1007 else
1008 iscsi_conn_failure(conn, err);
1009 put_device(dev);
1010}
1011EXPORT_SYMBOL_GPL(iscsi_session_failure);
1012
957void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 1013void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
958{ 1014{
959 struct iscsi_session *session = conn->session; 1015 struct iscsi_session *session = conn->session;
@@ -968,9 +1024,10 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
968 if (conn->stop_stage == 0) 1024 if (conn->stop_stage == 0)
969 session->state = ISCSI_STATE_FAILED; 1025 session->state = ISCSI_STATE_FAILED;
970 spin_unlock_irqrestore(&session->lock, flags); 1026 spin_unlock_irqrestore(&session->lock, flags);
1027
971 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1028 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
972 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
973 iscsi_conn_error(conn->cls_conn, err); 1030 iscsi_conn_error_event(conn->cls_conn, err);
974} 1031}
975EXPORT_SYMBOL_GPL(iscsi_conn_failure); 1032EXPORT_SYMBOL_GPL(iscsi_conn_failure);
976 1033
@@ -1194,15 +1251,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1194 switch (session->state) { 1251 switch (session->state) {
1195 case ISCSI_STATE_IN_RECOVERY: 1252 case ISCSI_STATE_IN_RECOVERY:
1196 reason = FAILURE_SESSION_IN_RECOVERY; 1253 reason = FAILURE_SESSION_IN_RECOVERY;
1197 sc->result = DID_IMM_RETRY << 16; 1254 goto reject;
1198 break;
1199 case ISCSI_STATE_LOGGING_OUT: 1255 case ISCSI_STATE_LOGGING_OUT:
1200 reason = FAILURE_SESSION_LOGGING_OUT; 1256 reason = FAILURE_SESSION_LOGGING_OUT;
1201 sc->result = DID_IMM_RETRY << 16; 1257 goto reject;
1202 break;
1203 case ISCSI_STATE_RECOVERY_FAILED: 1258 case ISCSI_STATE_RECOVERY_FAILED:
1204 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1259 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1205 sc->result = DID_NO_CONNECT << 16; 1260 sc->result = DID_TRANSPORT_FAILFAST << 16;
1206 break; 1261 break;
1207 case ISCSI_STATE_TERMINATE: 1262 case ISCSI_STATE_TERMINATE:
1208 reason = FAILURE_SESSION_TERMINATE; 1263 reason = FAILURE_SESSION_TERMINATE;
@@ -1267,7 +1322,7 @@ reject:
1267 spin_unlock(&session->lock); 1322 spin_unlock(&session->lock);
1268 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1323 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1269 spin_lock(host->host_lock); 1324 spin_lock(host->host_lock);
1270 return SCSI_MLQUEUE_HOST_BUSY; 1325 return SCSI_MLQUEUE_TARGET_BUSY;
1271 1326
1272fault: 1327fault:
1273 spin_unlock(&session->lock); 1328 spin_unlock(&session->lock);
@@ -1307,7 +1362,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1307} 1362}
1308EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); 1363EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1309 1364
1310int iscsi_eh_host_reset(struct scsi_cmnd *sc) 1365int iscsi_eh_target_reset(struct scsi_cmnd *sc)
1311{ 1366{
1312 struct iscsi_cls_session *cls_session; 1367 struct iscsi_cls_session *cls_session;
1313 struct iscsi_session *session; 1368 struct iscsi_session *session;
@@ -1321,7 +1376,7 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1321 spin_lock_bh(&session->lock); 1376 spin_lock_bh(&session->lock);
1322 if (session->state == ISCSI_STATE_TERMINATE) { 1377 if (session->state == ISCSI_STATE_TERMINATE) {
1323failed: 1378failed:
1324 debug_scsi("failing host reset: session terminated " 1379 debug_scsi("failing target reset: session terminated "
1325 "[CID %d age %d]\n", conn->id, session->age); 1380 "[CID %d age %d]\n", conn->id, session->age);
1326 spin_unlock_bh(&session->lock); 1381 spin_unlock_bh(&session->lock);
1327 mutex_unlock(&session->eh_mutex); 1382 mutex_unlock(&session->eh_mutex);
@@ -1336,7 +1391,7 @@ failed:
1336 */ 1391 */
1337 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1392 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1338 1393
1339 debug_scsi("iscsi_eh_host_reset wait for relogin\n"); 1394 debug_scsi("iscsi_eh_target_reset wait for relogin\n");
1340 wait_event_interruptible(conn->ehwait, 1395 wait_event_interruptible(conn->ehwait,
1341 session->state == ISCSI_STATE_TERMINATE || 1396 session->state == ISCSI_STATE_TERMINATE ||
1342 session->state == ISCSI_STATE_LOGGED_IN || 1397 session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1348,14 +1403,14 @@ failed:
1348 spin_lock_bh(&session->lock); 1403 spin_lock_bh(&session->lock);
1349 if (session->state == ISCSI_STATE_LOGGED_IN) 1404 if (session->state == ISCSI_STATE_LOGGED_IN)
1350 iscsi_session_printk(KERN_INFO, session, 1405 iscsi_session_printk(KERN_INFO, session,
1351 "host reset succeeded\n"); 1406 "target reset succeeded\n");
1352 else 1407 else
1353 goto failed; 1408 goto failed;
1354 spin_unlock_bh(&session->lock); 1409 spin_unlock_bh(&session->lock);
1355 mutex_unlock(&session->eh_mutex); 1410 mutex_unlock(&session->eh_mutex);
1356 return SUCCESS; 1411 return SUCCESS;
1357} 1412}
1358EXPORT_SYMBOL_GPL(iscsi_eh_host_reset); 1413EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
1359 1414
1360static void iscsi_tmf_timedout(unsigned long data) 1415static void iscsi_tmf_timedout(unsigned long data)
1361{ 1416{
@@ -1456,7 +1511,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1456 if (lun == task->sc->device->lun || lun == -1) { 1511 if (lun == task->sc->device->lun || lun == -1) {
1457 debug_scsi("failing in progress sc %p itt 0x%x\n", 1512 debug_scsi("failing in progress sc %p itt 0x%x\n",
1458 task->sc, task->itt); 1513 task->sc, task->itt);
1459 fail_command(conn, task, DID_BUS_BUSY << 16); 1514 fail_command(conn, task, error << 16);
1460 } 1515 }
1461 } 1516 }
1462} 1517}
@@ -1476,12 +1531,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1476 scsi_queue_work(conn->session->host, &conn->xmitwork); 1531 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477} 1532}
1478 1533
1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1534static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480{ 1535{
1481 struct iscsi_cls_session *cls_session; 1536 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session; 1537 struct iscsi_session *session;
1483 struct iscsi_conn *conn; 1538 struct iscsi_conn *conn;
1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1539 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485 1540
1486 cls_session = starget_to_session(scsi_target(scmd->device)); 1541 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data; 1542 session = cls_session->dd_data;
@@ -1494,14 +1549,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1494 * We are probably in the middle of iscsi recovery so let 1549 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error. 1550 * that complete and handle the error.
1496 */ 1551 */
1497 rc = EH_RESET_TIMER; 1552 rc = BLK_EH_RESET_TIMER;
1498 goto done; 1553 goto done;
1499 } 1554 }
1500 1555
1501 conn = session->leadconn; 1556 conn = session->leadconn;
1502 if (!conn) { 1557 if (!conn) {
1503 /* In the middle of shuting down */ 1558 /* In the middle of shuting down */
1504 rc = EH_RESET_TIMER; 1559 rc = BLK_EH_RESET_TIMER;
1505 goto done; 1560 goto done;
1506 } 1561 }
1507 1562
@@ -1513,20 +1568,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1513 */ 1568 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1569 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies)) 1570 (conn->ping_timeout * HZ), jiffies))
1516 rc = EH_RESET_TIMER; 1571 rc = BLK_EH_RESET_TIMER;
1517 /* 1572 /*
1518 * if we are about to check the transport then give the command 1573 * if we are about to check the transport then give the command
1519 * more time 1574 * more time
1520 */ 1575 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1576 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies)) 1577 jiffies))
1523 rc = EH_RESET_TIMER; 1578 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */ 1579 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task) 1580 if (conn->ping_task)
1526 rc = EH_RESET_TIMER; 1581 rc = BLK_EH_RESET_TIMER;
1527done: 1582done:
1528 spin_unlock(&session->lock); 1583 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); 1584 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1585 "timer reset" : "nh");
1530 return rc; 1586 return rc;
1531} 1587}
1532 1588
@@ -1768,10 +1824,10 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1768 1824
1769 iscsi_suspend_tx(conn); 1825 iscsi_suspend_tx(conn);
1770 1826
1771 spin_lock(&session->lock); 1827 spin_lock_bh(&session->lock);
1772 fail_all_commands(conn, sc->device->lun, DID_ERROR); 1828 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1773 conn->tmf_state = TMF_INITIAL; 1829 conn->tmf_state = TMF_INITIAL;
1774 spin_unlock(&session->lock); 1830 spin_unlock_bh(&session->lock);
1775 1831
1776 iscsi_start_tx(conn); 1832 iscsi_start_tx(conn);
1777 goto done; 1833 goto done;
@@ -1877,6 +1933,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
1877 int dd_data_size, uint16_t qdepth) 1933 int dd_data_size, uint16_t qdepth)
1878{ 1934{
1879 struct Scsi_Host *shost; 1935 struct Scsi_Host *shost;
1936 struct iscsi_host *ihost;
1880 1937
1881 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); 1938 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
1882 if (!shost) 1939 if (!shost)
@@ -1891,22 +1948,43 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
1891 qdepth = ISCSI_DEF_CMD_PER_LUN; 1948 qdepth = ISCSI_DEF_CMD_PER_LUN;
1892 } 1949 }
1893 shost->cmd_per_lun = qdepth; 1950 shost->cmd_per_lun = qdepth;
1951
1952 ihost = shost_priv(shost);
1953 spin_lock_init(&ihost->lock);
1954 ihost->state = ISCSI_HOST_SETUP;
1955 ihost->num_sessions = 0;
1956 init_waitqueue_head(&ihost->session_removal_wq);
1894 return shost; 1957 return shost;
1895} 1958}
1896EXPORT_SYMBOL_GPL(iscsi_host_alloc); 1959EXPORT_SYMBOL_GPL(iscsi_host_alloc);
1897 1960
1961static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
1962{
1963 iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST);
1964}
1965
1898/** 1966/**
1899 * iscsi_host_remove - remove host and sessions 1967 * iscsi_host_remove - remove host and sessions
1900 * @shost: scsi host 1968 * @shost: scsi host
1901 * 1969 *
1902 * This will also remove any sessions attached to the host, but if userspace 1970 * If there are any sessions left, this will initiate the removal and wait
1903 * is managing the session at the same time this will break. TODO: add 1971 * for the completion.
1904 * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
1905 * does not remove the memory from under us.
1906 */ 1972 */
1907void iscsi_host_remove(struct Scsi_Host *shost) 1973void iscsi_host_remove(struct Scsi_Host *shost)
1908{ 1974{
1909 iscsi_host_for_each_session(shost, iscsi_session_teardown); 1975 struct iscsi_host *ihost = shost_priv(shost);
1976 unsigned long flags;
1977
1978 spin_lock_irqsave(&ihost->lock, flags);
1979 ihost->state = ISCSI_HOST_REMOVED;
1980 spin_unlock_irqrestore(&ihost->lock, flags);
1981
1982 iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
1983 wait_event_interruptible(ihost->session_removal_wq,
1984 ihost->num_sessions == 0);
1985 if (signal_pending(current))
1986 flush_signals(current);
1987
1910 scsi_remove_host(shost); 1988 scsi_remove_host(shost);
1911} 1989}
1912EXPORT_SYMBOL_GPL(iscsi_host_remove); 1990EXPORT_SYMBOL_GPL(iscsi_host_remove);
@@ -1922,6 +2000,27 @@ void iscsi_host_free(struct Scsi_Host *shost)
1922} 2000}
1923EXPORT_SYMBOL_GPL(iscsi_host_free); 2001EXPORT_SYMBOL_GPL(iscsi_host_free);
1924 2002
2003static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2004{
2005 struct iscsi_host *ihost = shost_priv(shost);
2006 unsigned long flags;
2007
2008 shost = scsi_host_get(shost);
2009 if (!shost) {
2010 printk(KERN_ERR "Invalid state. Cannot notify host removal "
2011 "of session teardown event because host already "
2012 "removed.\n");
2013 return;
2014 }
2015
2016 spin_lock_irqsave(&ihost->lock, flags);
2017 ihost->num_sessions--;
2018 if (ihost->num_sessions == 0)
2019 wake_up(&ihost->session_removal_wq);
2020 spin_unlock_irqrestore(&ihost->lock, flags);
2021 scsi_host_put(shost);
2022}
2023
1925/** 2024/**
1926 * iscsi_session_setup - create iscsi cls session and host and session 2025 * iscsi_session_setup - create iscsi cls session and host and session
1927 * @iscsit: iscsi transport template 2026 * @iscsit: iscsi transport template
@@ -1942,9 +2041,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1942 uint16_t cmds_max, int cmd_task_size, 2041 uint16_t cmds_max, int cmd_task_size,
1943 uint32_t initial_cmdsn, unsigned int id) 2042 uint32_t initial_cmdsn, unsigned int id)
1944{ 2043{
2044 struct iscsi_host *ihost = shost_priv(shost);
1945 struct iscsi_session *session; 2045 struct iscsi_session *session;
1946 struct iscsi_cls_session *cls_session; 2046 struct iscsi_cls_session *cls_session;
1947 int cmd_i, scsi_cmds, total_cmds = cmds_max; 2047 int cmd_i, scsi_cmds, total_cmds = cmds_max;
2048 unsigned long flags;
2049
2050 spin_lock_irqsave(&ihost->lock, flags);
2051 if (ihost->state == ISCSI_HOST_REMOVED) {
2052 spin_unlock_irqrestore(&ihost->lock, flags);
2053 return NULL;
2054 }
2055 ihost->num_sessions++;
2056 spin_unlock_irqrestore(&ihost->lock, flags);
1948 2057
1949 if (!total_cmds) 2058 if (!total_cmds)
1950 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; 2059 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
@@ -1957,7 +2066,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1957 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2066 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1958 "must be a power of two that is at least %d.\n", 2067 "must be a power of two that is at least %d.\n",
1959 total_cmds, ISCSI_TOTAL_CMDS_MIN); 2068 total_cmds, ISCSI_TOTAL_CMDS_MIN);
1960 return NULL; 2069 goto dec_session_count;
1961 } 2070 }
1962 2071
1963 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { 2072 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
@@ -1981,7 +2090,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1981 cls_session = iscsi_alloc_session(shost, iscsit, 2090 cls_session = iscsi_alloc_session(shost, iscsit,
1982 sizeof(struct iscsi_session)); 2091 sizeof(struct iscsi_session));
1983 if (!cls_session) 2092 if (!cls_session)
1984 return NULL; 2093 goto dec_session_count;
1985 session = cls_session->dd_data; 2094 session = cls_session->dd_data;
1986 session->cls_session = cls_session; 2095 session->cls_session = cls_session;
1987 session->host = shost; 2096 session->host = shost;
@@ -2020,6 +2129,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2020 2129
2021 if (iscsi_add_session(cls_session, id)) 2130 if (iscsi_add_session(cls_session, id))
2022 goto cls_session_fail; 2131 goto cls_session_fail;
2132
2023 return cls_session; 2133 return cls_session;
2024 2134
2025cls_session_fail: 2135cls_session_fail:
@@ -2028,6 +2138,8 @@ module_get_fail:
2028 iscsi_pool_free(&session->cmdpool); 2138 iscsi_pool_free(&session->cmdpool);
2029cmdpool_alloc_fail: 2139cmdpool_alloc_fail:
2030 iscsi_free_session(cls_session); 2140 iscsi_free_session(cls_session);
2141dec_session_count:
2142 iscsi_host_dec_session_cnt(shost);
2031 return NULL; 2143 return NULL;
2032} 2144}
2033EXPORT_SYMBOL_GPL(iscsi_session_setup); 2145EXPORT_SYMBOL_GPL(iscsi_session_setup);
@@ -2043,6 +2155,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2043{ 2155{
2044 struct iscsi_session *session = cls_session->dd_data; 2156 struct iscsi_session *session = cls_session->dd_data;
2045 struct module *owner = cls_session->transport->owner; 2157 struct module *owner = cls_session->transport->owner;
2158 struct Scsi_Host *shost = session->host;
2046 2159
2047 iscsi_pool_free(&session->cmdpool); 2160 iscsi_pool_free(&session->cmdpool);
2048 2161
@@ -2055,6 +2168,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2055 kfree(session->ifacename); 2168 kfree(session->ifacename);
2056 2169
2057 iscsi_destroy_session(cls_session); 2170 iscsi_destroy_session(cls_session);
2171 iscsi_host_dec_session_cnt(shost);
2058 module_put(owner); 2172 module_put(owner);
2059} 2173}
2060EXPORT_SYMBOL_GPL(iscsi_session_teardown); 2174EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -2334,8 +2448,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2334 * flush queues. 2448 * flush queues.
2335 */ 2449 */
2336 spin_lock_bh(&session->lock); 2450 spin_lock_bh(&session->lock);
2337 fail_all_commands(conn, -1, 2451 if (flag == STOP_CONN_RECOVER)
2338 STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR); 2452 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
2453 else
2454 fail_all_commands(conn, -1, DID_ERROR);
2339 flush_control_queues(session, conn); 2455 flush_control_queues(session, conn);
2340 spin_unlock_bh(&session->lock); 2456 spin_unlock_bh(&session->lock);
2341 mutex_unlock(&session->eh_mutex); 2457 mutex_unlock(&session->eh_mutex);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bdd..e15501170698 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
294 } 294 }
295} 295}
296 296
297static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 297static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
298 u32 val) 298 u32 val)
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = link->ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __func__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
319 return 0; 319 return 0;
320} 320}
321 321
322static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 322static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
323 u32 *val) 323 u32 *val)
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = link->ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __func__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
398 398
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 399 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 400 if (qc->scsicmd) {
401 scsi_req_abort_cmd(qc->scsicmd); 401 blk_abort_request(qc->scsicmd->request);
402 scsi_schedule_eh(qc->scsicmd->device->host); 402 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 403 return;
404 } 404 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116a..0001374bd6b2 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
55int sas_register_ports(struct sas_ha_struct *sas_ha); 55int sas_register_ports(struct sas_ha_struct *sas_ha);
56void sas_unregister_ports(struct sas_ha_struct *sas_ha); 56void sas_unregister_ports(struct sas_ha_struct *sas_ha);
57 57
58enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 58enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
59 59
60int sas_init_queue(struct sas_ha_struct *sas_ha); 60int sas_init_queue(struct sas_ha_struct *sas_ha);
61int sas_init_events(struct sas_ha_struct *sas_ha); 61int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef309070..744838780ada 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
673 return; 673 return;
674} 674}
675 675
676enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 676enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
677{ 677{
678 struct sas_task *task = TO_SAS_TASK(cmd); 678 struct sas_task *task = TO_SAS_TASK(cmd);
679 unsigned long flags; 679 unsigned long flags;
680 680
681 if (!task) { 681 if (!task) {
682 cmd->timeout_per_command /= 2; 682 cmd->request->timeout /= 2;
683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", 683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
684 cmd, task, (cmd->timeout_per_command ? 684 cmd, task, (cmd->request->timeout ?
685 "EH_RESET_TIMER" : "EH_NOT_HANDLED")); 685 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
686 if (!cmd->timeout_per_command) 686 if (!cmd->request->timeout)
687 return EH_NOT_HANDLED; 687 return BLK_EH_NOT_HANDLED;
688 return EH_RESET_TIMER; 688 return BLK_EH_RESET_TIMER;
689 } 689 }
690 690
691 spin_lock_irqsave(&task->task_state_lock, flags); 691 spin_lock_irqsave(&task->task_state_lock, flags);
692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); 692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
693 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 693 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
694 spin_unlock_irqrestore(&task->task_state_lock, flags); 694 spin_unlock_irqrestore(&task->task_state_lock, flags);
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
696 cmd, task); 696 "BLK_EH_HANDLED\n", cmd, task);
697 return EH_HANDLED; 697 return BLK_EH_HANDLED;
698 } 698 }
699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { 699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
700 spin_unlock_irqrestore(&task->task_state_lock, flags); 700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " 701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
702 "EH_RESET_TIMER\n", 702 "BLK_EH_RESET_TIMER\n",
703 cmd, task); 703 cmd, task);
704 return EH_RESET_TIMER; 704 return BLK_EH_RESET_TIMER;
705 } 705 }
706 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 706 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
707 spin_unlock_irqrestore(&task->task_state_lock, flags); 707 spin_unlock_irqrestore(&task->task_state_lock, flags);
708 708
709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", 709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
710 cmd, task); 710 cmd, task);
711 711
712 return EH_NOT_HANDLED; 712 return BLK_EH_NOT_HANDLED;
713} 713}
714 714
715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1039 return;
1040 } 1040 }
1041 1041
1042 scsi_req_abort_cmd(sc); 1042 blk_abort_request(sc->request);
1043 scsi_schedule_eh(sc->device->host); 1043 scsi_schedule_eh(sc->device->host);
1044} 1044}
1045 1045
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0e018d12653..60a9e6e9384b 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -34,7 +34,14 @@ struct lpfc_sli2_slim;
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
37 37#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
38 queue depth change in millisecs */
39#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
40#define LPFC_MIN_TGT_QDEPTH 100
41#define LPFC_MAX_TGT_QDEPTH 0xFFFF
42
43#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
44 collection. */
38/* 45/*
39 * Following time intervals are used of adjusting SCSI device 46 * Following time intervals are used of adjusting SCSI device
40 * queue depths when there are driver resource error or Firmware 47 * queue depths when there are driver resource error or Firmware
@@ -49,6 +56,9 @@ struct lpfc_sli2_slim;
49#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ 56#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
50#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ 57#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
51 58
59/* Error Attention event polling interval */
60#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
61
52/* Define macros for 64 bit support */ 62/* Define macros for 64 bit support */
53#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) 63#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
54#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) 64#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -60,6 +70,9 @@ struct lpfc_sli2_slim;
60 70
61#define MAX_HBAEVT 32 71#define MAX_HBAEVT 32
62 72
73/* Number of MSI-X vectors the driver uses */
74#define LPFC_MSIX_VECTORS 2
75
63/* lpfc wait event data ready flag */ 76/* lpfc wait event data ready flag */
64#define LPFC_DATA_READY (1<<0) 77#define LPFC_DATA_READY (1<<0)
65 78
@@ -357,6 +370,7 @@ struct lpfc_vport {
357 uint32_t cfg_log_verbose; 370 uint32_t cfg_log_verbose;
358 uint32_t cfg_max_luns; 371 uint32_t cfg_max_luns;
359 uint32_t cfg_enable_da_id; 372 uint32_t cfg_enable_da_id;
373 uint32_t cfg_max_scsicmpl_time;
360 374
361 uint32_t dev_loss_tmo_changed; 375 uint32_t dev_loss_tmo_changed;
362 376
@@ -369,6 +383,8 @@ struct lpfc_vport {
369 struct lpfc_debugfs_trc *disc_trc; 383 struct lpfc_debugfs_trc *disc_trc;
370 atomic_t disc_trc_cnt; 384 atomic_t disc_trc_cnt;
371#endif 385#endif
386 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked;
372}; 388};
373 389
374struct hbq_s { 390struct hbq_s {
@@ -407,10 +423,11 @@ struct lpfc_hba {
407 struct lpfc_sli sli; 423 struct lpfc_sli sli;
408 uint32_t sli_rev; /* SLI2 or SLI3 */ 424 uint32_t sli_rev; /* SLI2 or SLI3 */
409 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 425 uint32_t sli3_options; /* Mask of enabled SLI3 options */
410#define LPFC_SLI3_ENABLED 0x01 426#define LPFC_SLI3_HBQ_ENABLED 0x01
411#define LPFC_SLI3_HBQ_ENABLED 0x02 427#define LPFC_SLI3_NPIV_ENABLED 0x02
412#define LPFC_SLI3_NPIV_ENABLED 0x04 428#define LPFC_SLI3_VPORT_TEARDOWN 0x04
413#define LPFC_SLI3_VPORT_TEARDOWN 0x08 429#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10
414 uint32_t iocb_cmd_size; 431 uint32_t iocb_cmd_size;
415 uint32_t iocb_rsp_size; 432 uint32_t iocb_rsp_size;
416 433
@@ -422,10 +439,20 @@ struct lpfc_hba {
422#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 439#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
423#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 440#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
424 441
425 struct lpfc_sli2_slim *slim2p; 442 uint32_t hba_flag; /* hba generic flags */
426 struct lpfc_dmabuf hbqslimp; 443#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
444
445 struct lpfc_dmabuf slim2p;
427 446
428 dma_addr_t slim2p_mapping; 447 MAILBOX_t *mbox;
448 uint32_t *inb_ha_copy;
449 uint32_t *inb_counter;
450 uint32_t inb_last_counter;
451 uint32_t ha_copy;
452 struct _PCB *pcb;
453 struct _IOCB *IOCBs;
454
455 struct lpfc_dmabuf hbqslimp;
429 456
430 uint16_t pci_cfg_value; 457 uint16_t pci_cfg_value;
431 458
@@ -492,7 +519,7 @@ struct lpfc_hba {
492 519
493 wait_queue_head_t work_waitq; 520 wait_queue_head_t work_waitq;
494 struct task_struct *worker_thread; 521 struct task_struct *worker_thread;
495 long data_flags; 522 unsigned long data_flags;
496 523
497 uint32_t hbq_in_use; /* HBQs in use flag */ 524 uint32_t hbq_in_use; /* HBQs in use flag */
498 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 525 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -514,6 +541,7 @@ struct lpfc_hba {
514 void __iomem *HCregaddr; /* virtual address for host ctl reg */ 541 void __iomem *HCregaddr; /* virtual address for host ctl reg */
515 542
516 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ 543 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
544 struct lpfc_pgp *port_gp;
517 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ 545 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
518 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ 546 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
519 547
@@ -536,6 +564,7 @@ struct lpfc_hba {
536 uint8_t soft_wwn_enable; 564 uint8_t soft_wwn_enable;
537 565
538 struct timer_list fcp_poll_timer; 566 struct timer_list fcp_poll_timer;
567 struct timer_list eratt_poll;
539 568
540 /* 569 /*
541 * stat counters 570 * stat counters
@@ -565,7 +594,7 @@ struct lpfc_hba {
565 594
566 struct fc_host_statistics link_stats; 595 struct fc_host_statistics link_stats;
567 enum intr_type_t intr_type; 596 enum intr_type_t intr_type;
568 struct msix_entry msix_entries[1]; 597 struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
569 598
570 struct list_head port_list; 599 struct list_head port_list;
571 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 600 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@@ -605,6 +634,7 @@ struct lpfc_hba {
605 unsigned long last_completion_time; 634 unsigned long last_completion_time;
606 struct timer_list hb_tmofunc; 635 struct timer_list hb_tmofunc;
607 uint8_t hb_outstanding; 636 uint8_t hb_outstanding;
637 enum hba_temp_state over_temp_state;
608 /* ndlp reference management */ 638 /* ndlp reference management */
609 spinlock_t ndlp_lock; 639 spinlock_t ndlp_lock;
610 /* 640 /*
@@ -613,7 +643,19 @@ struct lpfc_hba {
613 */ 643 */
614#define QUE_BUFTAG_BIT (1<<31) 644#define QUE_BUFTAG_BIT (1<<31)
615 uint32_t buffer_tag_count; 645 uint32_t buffer_tag_count;
616 enum hba_temp_state over_temp_state; 646 int wait_4_mlo_maint_flg;
647 wait_queue_head_t wait_4_mlo_m_q;
648 /* data structure used for latency data collection */
649#define LPFC_NO_BUCKET 0
650#define LPFC_LINEAR_BUCKET 1
651#define LPFC_POWER2_BUCKET 2
652 uint8_t bucket_type;
653 uint32_t bucket_base;
654 uint32_t bucket_step;
655
656/* Maximum number of events that can be outstanding at any time*/
657#define LPFC_MAX_EVT_COUNT 512
658 atomic_t fast_event_count;
617}; 659};
618 660
619static inline struct Scsi_Host * 661static inline struct Scsi_Host *
@@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
650 return; 692 return;
651} 693}
652 694
653#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 695static inline void
654#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature 696lpfc_sli_read_hs(struct lpfc_hba *phba)
655 event */ 697{
698 /*
699 * There was a link/board error. Read the status register to retrieve
700 * the error event and process it.
701 */
702 phba->sli.slistat.err_attn_event++;
703
704 /* Save status info */
705 phba->work_hs = readl(phba->HSregaddr);
706 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
707 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
708
709 /* Clear chip Host Attention error bit */
710 writel(HA_ERATT, phba->HAregaddr);
711 readl(phba->HAregaddr); /* flush */
712 phba->pport->stopped = 1;
713
714 return;
715}
656 716
657struct temp_event {
658 uint32_t event_type;
659 uint32_t event_code;
660 uint32_t data;
661};
662#define LPFC_CRIT_TEMP 0x1
663#define LPFC_THRESHOLD_TEMP 0x2
664#define LPFC_NORMAL_TEMP 0x3
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 37bfa0bd1dae..aa3d6277581d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -32,6 +32,7 @@
32 32
33#include "lpfc_hw.h" 33#include "lpfc_hw.h"
34#include "lpfc_sli.h" 34#include "lpfc_sli.h"
35#include "lpfc_nl.h"
35#include "lpfc_disc.h" 36#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
37#include "lpfc.h" 38#include "lpfc.h"
@@ -49,6 +50,21 @@
49#define LPFC_LINK_SPEED_BITMAP 0x00000117 50#define LPFC_LINK_SPEED_BITMAP 0x00000117
50#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" 51#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
51 52
53/**
54 * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules.
55 * @incr: integer to convert.
56 * @hdw: ascii string holding converted integer plus a string terminator.
57 *
58 * Description:
59 * JEDEC Joint Electron Device Engineering Council.
60 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
61 * character string. The string is then terminated with a NULL in byte 9.
62 * Hex 0-9 becomes ascii '0' to '9'.
63 * Hex a-f becomes ascii '=' to 'B' capital B.
64 *
65 * Notes:
66 * Coded for 32 bit integers only.
67 **/
52static void 68static void
53lpfc_jedec_to_ascii(int incr, char hdw[]) 69lpfc_jedec_to_ascii(int incr, char hdw[])
54{ 70{
@@ -65,6 +81,14 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
65 return; 81 return;
66} 82}
67 83
84/**
85 * lpfc_drvr_version_show: Return the Emulex driver string with version number.
86 * @dev: class unused variable.
87 * @attr: device attribute, not used.
88 * @buf: on return contains the module description text.
89 *
90 * Returns: size of formatted string.
91 **/
68static ssize_t 92static ssize_t
69lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, 93lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
70 char *buf) 94 char *buf)
@@ -72,6 +96,14 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
72 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 96 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
73} 97}
74 98
99/**
100 * lpfc_info_show: Return some pci info about the host in ascii.
101 * @dev: class converted to a Scsi_host structure.
102 * @attr: device attribute, not used.
103 * @buf: on return contains the formatted text from lpfc_info().
104 *
105 * Returns: size of formatted string.
106 **/
75static ssize_t 107static ssize_t
76lpfc_info_show(struct device *dev, struct device_attribute *attr, 108lpfc_info_show(struct device *dev, struct device_attribute *attr,
77 char *buf) 109 char *buf)
@@ -81,6 +113,14 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
81 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); 113 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
82} 114}
83 115
116/**
117 * lpfc_serialnum_show: Return the hba serial number in ascii.
118 * @dev: class converted to a Scsi_host structure.
119 * @attr: device attribute, not used.
120 * @buf: on return contains the formatted text serial number.
121 *
122 * Returns: size of formatted string.
123 **/
84static ssize_t 124static ssize_t
85lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, 125lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
86 char *buf) 126 char *buf)
@@ -92,6 +132,18 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
92 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); 132 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
93} 133}
94 134
135/**
136 * lpfc_temp_sensor_show: Return the temperature sensor level.
137 * @dev: class converted to a Scsi_host structure.
138 * @attr: device attribute, not used.
139 * @buf: on return contains the formatted support level.
140 *
141 * Description:
142 * Returns a number indicating the temperature sensor level currently
143 * supported, zero or one in ascii.
144 *
145 * Returns: size of formatted string.
146 **/
95static ssize_t 147static ssize_t
96lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, 148lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
97 char *buf) 149 char *buf)
@@ -102,6 +154,14 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
102 return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); 154 return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
103} 155}
104 156
157/**
158 * lpfc_modeldesc_show: Return the model description of the hba.
159 * @dev: class converted to a Scsi_host structure.
160 * @attr: device attribute, not used.
161 * @buf: on return contains the scsi vpd model description.
162 *
163 * Returns: size of formatted string.
164 **/
105static ssize_t 165static ssize_t
106lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, 166lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
107 char *buf) 167 char *buf)
@@ -113,6 +173,14 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
113 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); 173 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
114} 174}
115 175
176/**
177 * lpfc_modelname_show: Return the model name of the hba.
178 * @dev: class converted to a Scsi_host structure.
179 * @attr: device attribute, not used.
180 * @buf: on return contains the scsi vpd model name.
181 *
182 * Returns: size of formatted string.
183 **/
116static ssize_t 184static ssize_t
117lpfc_modelname_show(struct device *dev, struct device_attribute *attr, 185lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
118 char *buf) 186 char *buf)
@@ -124,6 +192,14 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
124 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); 192 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
125} 193}
126 194
195/**
196 * lpfc_programtype_show: Return the program type of the hba.
197 * @dev: class converted to a Scsi_host structure.
198 * @attr: device attribute, not used.
199 * @buf: on return contains the scsi vpd program type.
200 *
201 * Returns: size of formatted string.
202 **/
127static ssize_t 203static ssize_t
128lpfc_programtype_show(struct device *dev, struct device_attribute *attr, 204lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
129 char *buf) 205 char *buf)
@@ -135,6 +211,33 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
135 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); 211 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
136} 212}
137 213
214/**
215 * lpfc_mlomgmt_show: Return the Menlo Maintenance sli flag.
216 * @dev: class converted to a Scsi_host structure.
217 * @attr: device attribute, not used.
218 * @buf: on return contains the Menlo Maintenance sli flag.
219 *
220 * Returns: size of formatted string.
221 **/
222static ssize_t
223lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
224{
225 struct Scsi_Host *shost = class_to_shost(dev);
226 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
227 struct lpfc_hba *phba = vport->phba;
228
229 return snprintf(buf, PAGE_SIZE, "%d\n",
230 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
231}
232
233/**
234 * lpfc_vportnum_show: Return the port number in ascii of the hba.
235 * @dev: class converted to a Scsi_host structure.
236 * @attr: device attribute, not used.
237 * @buf: on return contains scsi vpd program type.
238 *
239 * Returns: size of formatted string.
240 **/
138static ssize_t 241static ssize_t
139lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, 242lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
140 char *buf) 243 char *buf)
@@ -146,6 +249,14 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
146 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); 249 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
147} 250}
148 251
252/**
253 * lpfc_fwrev_show: Return the firmware rev running in the hba.
254 * @dev: class converted to a Scsi_host structure.
255 * @attr: device attribute, not used.
256 * @buf: on return contains the scsi vpd program type.
257 *
258 * Returns: size of formatted string.
259 **/
149static ssize_t 260static ssize_t
150lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, 261lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
151 char *buf) 262 char *buf)
@@ -159,6 +270,14 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
159 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); 270 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
160} 271}
161 272
273/**
274 * lpfc_hdw_show: Return the jedec information about the hba.
275 * @dev: class converted to a Scsi_host structure.
276 * @attr: device attribute, not used.
277 * @buf: on return contains the scsi vpd program type.
278 *
279 * Returns: size of formatted string.
280 **/
162static ssize_t 281static ssize_t
163lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) 282lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
164{ 283{
@@ -171,6 +290,15 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
171 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 290 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
172 return snprintf(buf, PAGE_SIZE, "%s\n", hdw); 291 return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
173} 292}
293
294/**
295 * lpfc_option_rom_version_show: Return the adapter ROM FCode version.
296 * @dev: class converted to a Scsi_host structure.
297 * @attr: device attribute, not used.
298 * @buf: on return contains the ROM and FCode ascii strings.
299 *
300 * Returns: size of formatted string.
301 **/
174static ssize_t 302static ssize_t
175lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, 303lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
176 char *buf) 304 char *buf)
@@ -181,6 +309,18 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
181 309
182 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); 310 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
183} 311}
312
313/**
314 * lpfc_state_show: Return the link state of the port.
315 * @dev: class converted to a Scsi_host structure.
316 * @attr: device attribute, not used.
317 * @buf: on return contains text describing the state of the link.
318 *
319 * Notes:
320 * The switch statement has no default so zero will be returned.
321 *
322 * Returns: size of formatted string.
323 **/
184static ssize_t 324static ssize_t
185lpfc_link_state_show(struct device *dev, struct device_attribute *attr, 325lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
186 char *buf) 326 char *buf)
@@ -232,8 +372,10 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
232 "Unknown\n"); 372 "Unknown\n");
233 break; 373 break;
234 } 374 }
235 375 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
236 if (phba->fc_topology == TOPOLOGY_LOOP) { 376 len += snprintf(buf + len, PAGE_SIZE-len,
377 " Menlo Maint Mode\n");
378 else if (phba->fc_topology == TOPOLOGY_LOOP) {
237 if (vport->fc_flag & FC_PUBLIC_LOOP) 379 if (vport->fc_flag & FC_PUBLIC_LOOP)
238 len += snprintf(buf + len, PAGE_SIZE-len, 380 len += snprintf(buf + len, PAGE_SIZE-len,
239 " Public Loop\n"); 381 " Public Loop\n");
@@ -253,6 +395,18 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
253 return len; 395 return len;
254} 396}
255 397
398/**
399 * lpfc_num_discovered_ports_show: Return sum of mapped and unmapped vports.
400 * @dev: class device that is converted into a Scsi_host.
401 * @attr: device attribute, not used.
402 * @buf: on return contains the sum of fc mapped and unmapped.
403 *
404 * Description:
405 * Returns the ascii text number of the sum of the fc mapped and unmapped
406 * vport counts.
407 *
408 * Returns: size of formatted string.
409 **/
256static ssize_t 410static ssize_t
257lpfc_num_discovered_ports_show(struct device *dev, 411lpfc_num_discovered_ports_show(struct device *dev,
258 struct device_attribute *attr, char *buf) 412 struct device_attribute *attr, char *buf)
@@ -264,7 +418,20 @@ lpfc_num_discovered_ports_show(struct device *dev,
264 vport->fc_map_cnt + vport->fc_unmap_cnt); 418 vport->fc_map_cnt + vport->fc_unmap_cnt);
265} 419}
266 420
267 421/**
422 * lpfc_issue_lip: Misnomer, name carried over from long ago.
423 * @shost: Scsi_Host pointer.
424 *
425 * Description:
426 * Bring the link down gracefully then re-init the link. The firmware will
427 * re-init the fiber channel interface as required. Does not issue a LIP.
428 *
429 * Returns:
430 * -EPERM port offline or management commands are being blocked
431 * -ENOMEM cannot allocate memory for the mailbox command
432 * -EIO error sending the mailbox command
433 * zero for success
434 **/
268static int 435static int
269lpfc_issue_lip(struct Scsi_Host *shost) 436lpfc_issue_lip(struct Scsi_Host *shost)
270{ 437{
@@ -306,6 +473,21 @@ lpfc_issue_lip(struct Scsi_Host *shost)
306 return 0; 473 return 0;
307} 474}
308 475
476/**
477 * lpfc_do_offline: Issues a mailbox command to bring the link down.
478 * @phba: lpfc_hba pointer.
479 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
480 *
481 * Notes:
482 * Assumes any error from lpfc_do_offline() will be negative.
483 * Can wait up to 5 seconds for the port ring buffers count
484 * to reach zero, prints a warning if it is not zero and continues.
485 * lpfc_workq_post_event() returns a non-zero return coce if call fails.
486 *
487 * Returns:
488 * -EIO error posting the event
489 * zero for success
490 **/
309static int 491static int
310lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) 492lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
311{ 493{
@@ -353,6 +535,22 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
353 return 0; 535 return 0;
354} 536}
355 537
538/**
539 * lpfc_selective_reset: Offline then onlines the port.
540 * @phba: lpfc_hba pointer.
541 *
542 * Description:
543 * If the port is configured to allow a reset then the hba is brought
544 * offline then online.
545 *
546 * Notes:
547 * Assumes any error from lpfc_do_offline() will be negative.
548 *
549 * Returns:
550 * lpfc_do_offline() return code if not zero
551 * -EIO reset not configured or error posting the event
552 * zero for success
553 **/
356static int 554static int
357lpfc_selective_reset(struct lpfc_hba *phba) 555lpfc_selective_reset(struct lpfc_hba *phba)
358{ 556{
@@ -378,6 +576,27 @@ lpfc_selective_reset(struct lpfc_hba *phba)
378 return 0; 576 return 0;
379} 577}
380 578
579/**
580 * lpfc_issue_reset: Selectively resets an adapter.
581 * @dev: class device that is converted into a Scsi_host.
582 * @attr: device attribute, not used.
583 * @buf: containing the string "selective".
584 * @count: unused variable.
585 *
586 * Description:
587 * If the buf contains the string "selective" then lpfc_selective_reset()
588 * is called to perform the reset.
589 *
590 * Notes:
591 * Assumes any error from lpfc_selective_reset() will be negative.
592 * If lpfc_selective_reset() returns zero then the length of the buffer
593 * is returned which indicates succcess
594 *
595 * Returns:
596 * -EINVAL if the buffer does not contain the string "selective"
597 * length of buf if lpfc-selective_reset() if the call succeeds
598 * return value of lpfc_selective_reset() if the call fails
599**/
381static ssize_t 600static ssize_t
382lpfc_issue_reset(struct device *dev, struct device_attribute *attr, 601lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
383 const char *buf, size_t count) 602 const char *buf, size_t count)
@@ -397,6 +616,14 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
397 return status; 616 return status;
398} 617}
399 618
619/**
620 * lpfc_nport_evt_cnt_show: Return the number of nport events.
621 * @dev: class device that is converted into a Scsi_host.
622 * @attr: device attribute, not used.
623 * @buf: on return contains the ascii number of nport events.
624 *
625 * Returns: size of formatted string.
626 **/
400static ssize_t 627static ssize_t
401lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, 628lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
402 char *buf) 629 char *buf)
@@ -408,6 +635,14 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
408 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 635 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
409} 636}
410 637
638/**
639 * lpfc_board_mode_show: Return the state of the board.
640 * @dev: class device that is converted into a Scsi_host.
641 * @attr: device attribute, not used.
642 * @buf: on return contains the state of the adapter.
643 *
644 * Returns: size of formatted string.
645 **/
411static ssize_t 646static ssize_t
412lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, 647lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
413 char *buf) 648 char *buf)
@@ -429,6 +664,19 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
429 return snprintf(buf, PAGE_SIZE, "%s\n", state); 664 return snprintf(buf, PAGE_SIZE, "%s\n", state);
430} 665}
431 666
667/**
668 * lpfc_board_mode_store: Puts the hba in online, offline, warm or error state.
669 * @dev: class device that is converted into a Scsi_host.
670 * @attr: device attribute, not used.
671 * @buf: containing one of the strings "online", "offline", "warm" or "error".
672 * @count: unused variable.
673 *
674 * Returns:
675 * -EACCES if enable hba reset not enabled
676 * -EINVAL if the buffer does not contain a valid string (see above)
677 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
678 * buf length greater than zero indicates success
679 **/
432static ssize_t 680static ssize_t
433lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, 681lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
434 const char *buf, size_t count) 682 const char *buf, size_t count)
@@ -462,6 +710,24 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
462 return -EIO; 710 return -EIO;
463} 711}
464 712
713/**
714 * lpfc_get_hba_info: Return various bits of informaton about the adapter.
715 * @phba: pointer to the adapter structure.
716 * @mxri max xri count.
717 * @axri available xri count.
718 * @mrpi max rpi count.
719 * @arpi available rpi count.
720 * @mvpi max vpi count.
721 * @avpi available vpi count.
722 *
723 * Description:
724 * If an integer pointer for an count is not null then the value for the
725 * count is returned.
726 *
727 * Returns:
728 * zero on error
729 * one for success
730 **/
465static int 731static int
466lpfc_get_hba_info(struct lpfc_hba *phba, 732lpfc_get_hba_info(struct lpfc_hba *phba,
467 uint32_t *mxri, uint32_t *axri, 733 uint32_t *mxri, uint32_t *axri,
@@ -524,6 +790,20 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
524 return 1; 790 return 1;
525} 791}
526 792
793/**
794 * lpfc_max_rpi_show: Return maximum rpi.
795 * @dev: class device that is converted into a Scsi_host.
796 * @attr: device attribute, not used.
797 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
798 *
799 * Description:
800 * Calls lpfc_get_hba_info() asking for just the mrpi count.
801 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
802 * to "Unknown" and the buffer length is returned, therefore the caller
803 * must check for "Unknown" in the buffer to detect a failure.
804 *
805 * Returns: size of formatted string.
806 **/
527static ssize_t 807static ssize_t
528lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, 808lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
529 char *buf) 809 char *buf)
@@ -538,6 +818,20 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
538 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 818 return snprintf(buf, PAGE_SIZE, "Unknown\n");
539} 819}
540 820
821/**
822 * lpfc_used_rpi_show: Return maximum rpi minus available rpi.
823 * @dev: class device that is converted into a Scsi_host.
824 * @attr: device attribute, not used.
825 * @buf: containing the used rpi count in decimal or "Unknown".
826 *
827 * Description:
828 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
829 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
830 * to "Unknown" and the buffer length is returned, therefore the caller
831 * must check for "Unknown" in the buffer to detect a failure.
832 *
833 * Returns: size of formatted string.
834 **/
541static ssize_t 835static ssize_t
542lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, 836lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
543 char *buf) 837 char *buf)
@@ -552,6 +846,20 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
552 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 846 return snprintf(buf, PAGE_SIZE, "Unknown\n");
553} 847}
554 848
849/**
850 * lpfc_max_xri_show: Return maximum xri.
851 * @dev: class device that is converted into a Scsi_host.
852 * @attr: device attribute, not used.
853 * @buf: on return contains the maximum xri count in decimal or "Unknown".
854 *
855 * Description:
856 * Calls lpfc_get_hba_info() asking for just the mrpi count.
857 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
858 * to "Unknown" and the buffer length is returned, therefore the caller
859 * must check for "Unknown" in the buffer to detect a failure.
860 *
861 * Returns: size of formatted string.
862 **/
555static ssize_t 863static ssize_t
556lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, 864lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
557 char *buf) 865 char *buf)
@@ -566,6 +874,20 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
566 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 874 return snprintf(buf, PAGE_SIZE, "Unknown\n");
567} 875}
568 876
877/**
878 * lpfc_used_xri_show: Return maximum xpi minus the available xpi.
879 * @dev: class device that is converted into a Scsi_host.
880 * @attr: device attribute, not used.
881 * @buf: on return contains the used xri count in decimal or "Unknown".
882 *
883 * Description:
884 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
885 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
886 * to "Unknown" and the buffer length is returned, therefore the caller
887 * must check for "Unknown" in the buffer to detect a failure.
888 *
889 * Returns: size of formatted string.
890 **/
569static ssize_t 891static ssize_t
570lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, 892lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
571 char *buf) 893 char *buf)
@@ -580,6 +902,20 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
580 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 902 return snprintf(buf, PAGE_SIZE, "Unknown\n");
581} 903}
582 904
905/**
906 * lpfc_max_vpi_show: Return maximum vpi.
907 * @dev: class device that is converted into a Scsi_host.
908 * @attr: device attribute, not used.
909 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
910 *
911 * Description:
912 * Calls lpfc_get_hba_info() asking for just the mvpi count.
913 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
914 * to "Unknown" and the buffer length is returned, therefore the caller
915 * must check for "Unknown" in the buffer to detect a failure.
916 *
917 * Returns: size of formatted string.
918 **/
583static ssize_t 919static ssize_t
584lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, 920lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
585 char *buf) 921 char *buf)
@@ -594,6 +930,20 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
594 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 930 return snprintf(buf, PAGE_SIZE, "Unknown\n");
595} 931}
596 932
933/**
934 * lpfc_used_vpi_show: Return maximum vpi minus the available vpi.
935 * @dev: class device that is converted into a Scsi_host.
936 * @attr: device attribute, not used.
937 * @buf: on return contains the used vpi count in decimal or "Unknown".
938 *
939 * Description:
940 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
941 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
942 * to "Unknown" and the buffer length is returned, therefore the caller
943 * must check for "Unknown" in the buffer to detect a failure.
944 *
945 * Returns: size of formatted string.
946 **/
597static ssize_t 947static ssize_t
598lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, 948lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
599 char *buf) 949 char *buf)
@@ -608,6 +958,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
608 return snprintf(buf, PAGE_SIZE, "Unknown\n"); 958 return snprintf(buf, PAGE_SIZE, "Unknown\n");
609} 959}
610 960
961/**
962 * lpfc_npiv_info_show: Return text about NPIV support for the adapter.
963 * @dev: class device that is converted into a Scsi_host.
964 * @attr: device attribute, not used.
965 * @buf: text that must be interpreted to determine if npiv is supported.
966 *
967 * Description:
968 * Buffer will contain text indicating npiv is not suppoerted on the port,
969 * the port is an NPIV physical port, or it is an npiv virtual port with
970 * the id of the vport.
971 *
972 * Returns: size of formatted string.
973 **/
611static ssize_t 974static ssize_t
612lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, 975lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
613 char *buf) 976 char *buf)
@@ -623,6 +986,17 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
623 return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); 986 return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
624} 987}
625 988
989/**
990 * lpfc_poll_show: Return text about poll support for the adapter.
991 * @dev: class device that is converted into a Scsi_host.
992 * @attr: device attribute, not used.
993 * @buf: on return contains the cfg_poll in hex.
994 *
995 * Notes:
996 * cfg_poll should be a lpfc_polling_flags type.
997 *
998 * Returns: size of formatted string.
999 **/
626static ssize_t 1000static ssize_t
627lpfc_poll_show(struct device *dev, struct device_attribute *attr, 1001lpfc_poll_show(struct device *dev, struct device_attribute *attr,
628 char *buf) 1002 char *buf)
@@ -634,6 +1008,20 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
634 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 1008 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
635} 1009}
636 1010
1011/**
1012 * lpfc_poll_store: Set the value of cfg_poll for the adapter.
1013 * @dev: class device that is converted into a Scsi_host.
1014 * @attr: device attribute, not used.
1015 * @buf: one or more lpfc_polling_flags values.
1016 * @count: not used.
1017 *
1018 * Notes:
1019 * buf contents converted to integer and checked for a valid value.
1020 *
1021 * Returns:
1022 * -EINVAL if the buffer connot be converted or is out of range
1023 * length of the buf on success
1024 **/
637static ssize_t 1025static ssize_t
638lpfc_poll_store(struct device *dev, struct device_attribute *attr, 1026lpfc_poll_store(struct device *dev, struct device_attribute *attr,
639 const char *buf, size_t count) 1027 const char *buf, size_t count)
@@ -692,6 +1080,20 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
692 return strlen(buf); 1080 return strlen(buf);
693} 1081}
694 1082
1083/**
1084 * lpfc_param_show: Return a cfg attribute value in decimal.
1085 *
1086 * Description:
1087 * Macro that given an attr e.g. hba_queue_depth expands
1088 * into a function with the name lpfc_hba_queue_depth_show.
1089 *
1090 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
1091 * @dev: class device that is converted into a Scsi_host.
1092 * @attr: device attribute, not used.
1093 * @buf: on return contains the attribute value in decimal.
1094 *
1095 * Returns: size of formatted string.
1096 **/
695#define lpfc_param_show(attr) \ 1097#define lpfc_param_show(attr) \
696static ssize_t \ 1098static ssize_t \
697lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 1099lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -706,6 +1108,20 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
706 phba->cfg_##attr);\ 1108 phba->cfg_##attr);\
707} 1109}
708 1110
1111/**
1112 * lpfc_param_hex_show: Return a cfg attribute value in hex.
1113 *
1114 * Description:
1115 * Macro that given an attr e.g. hba_queue_depth expands
1116 * into a function with the name lpfc_hba_queue_depth_show
1117 *
1118 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
1119 * @dev: class device that is converted into a Scsi_host.
1120 * @attr: device attribute, not used.
1121 * @buf: on return contains the attribute value in hexidecimal.
1122 *
1123 * Returns: size of formatted string.
1124 **/
709#define lpfc_param_hex_show(attr) \ 1125#define lpfc_param_hex_show(attr) \
710static ssize_t \ 1126static ssize_t \
711lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 1127lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -720,6 +1136,25 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
720 phba->cfg_##attr);\ 1136 phba->cfg_##attr);\
721} 1137}
722 1138
1139/**
1140 * lpfc_param_init: Intializes a cfg attribute.
1141 *
1142 * Description:
1143 * Macro that given an attr e.g. hba_queue_depth expands
1144 * into a function with the name lpfc_hba_queue_depth_init. The macro also
1145 * takes a default argument, a minimum and maximum argument.
1146 *
1147 * lpfc_##attr##_init: Initializes an attribute.
1148 * @phba: pointer the the adapter structure.
1149 * @val: integer attribute value.
1150 *
1151 * Validates the min and max values then sets the adapter config field
1152 * accordingly, or uses the default if out of range and prints an error message.
1153 *
1154 * Returns:
1155 * zero on success
1156 * -EINVAL if default used
1157 **/
723#define lpfc_param_init(attr, default, minval, maxval) \ 1158#define lpfc_param_init(attr, default, minval, maxval) \
724static int \ 1159static int \
725lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ 1160lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
@@ -735,6 +1170,26 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
735 return -EINVAL;\ 1170 return -EINVAL;\
736} 1171}
737 1172
1173/**
1174 * lpfc_param_set: Set a cfg attribute value.
1175 *
1176 * Description:
1177 * Macro that given an attr e.g. hba_queue_depth expands
1178 * into a function with the name lpfc_hba_queue_depth_set
1179 *
1180 * lpfc_##attr##_set: Sets an attribute value.
1181 * @phba: pointer the the adapter structure.
1182 * @val: integer attribute value.
1183 *
1184 * Description:
1185 * Validates the min and max values then sets the
1186 * adapter config field if in the valid range. prints error message
1187 * and does not set the parameter if invalid.
1188 *
1189 * Returns:
1190 * zero on success
1191 * -EINVAL if val is invalid
1192 **/
738#define lpfc_param_set(attr, default, minval, maxval) \ 1193#define lpfc_param_set(attr, default, minval, maxval) \
739static int \ 1194static int \
740lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ 1195lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
@@ -749,6 +1204,27 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
749 return -EINVAL;\ 1204 return -EINVAL;\
750} 1205}
751 1206
1207/**
1208 * lpfc_param_store: Set a vport attribute value.
1209 *
1210 * Description:
1211 * Macro that given an attr e.g. hba_queue_depth expands
1212 * into a function with the name lpfc_hba_queue_depth_store.
1213 *
1214 * lpfc_##attr##_store: Set an sttribute value.
1215 * @dev: class device that is converted into a Scsi_host.
1216 * @attr: device attribute, not used.
1217 * @buf: contains the attribute value in ascii.
1218 * @count: not used.
1219 *
1220 * Description:
1221 * Convert the ascii text number to an integer, then
1222 * use the lpfc_##attr##_set function to set the value.
1223 *
1224 * Returns:
1225 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
1226 * length of buffer upon success.
1227 **/
752#define lpfc_param_store(attr) \ 1228#define lpfc_param_store(attr) \
753static ssize_t \ 1229static ssize_t \
754lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 1230lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -768,6 +1244,20 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
768 return -EINVAL;\ 1244 return -EINVAL;\
769} 1245}
770 1246
1247/**
1248 * lpfc_vport_param_show: Return decimal formatted cfg attribute value.
1249 *
1250 * Description:
1251 * Macro that given an attr e.g. hba_queue_depth expands
1252 * into a function with the name lpfc_hba_queue_depth_show
1253 *
1254 * lpfc_##attr##_show: prints the attribute value in decimal.
1255 * @dev: class device that is converted into a Scsi_host.
1256 * @attr: device attribute, not used.
1257 * @buf: on return contains the attribute value in decimal.
1258 *
1259 * Returns: length of formatted string.
1260 **/
771#define lpfc_vport_param_show(attr) \ 1261#define lpfc_vport_param_show(attr) \
772static ssize_t \ 1262static ssize_t \
773lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 1263lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -780,6 +1270,21 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
780 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 1270 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
781} 1271}
782 1272
1273/**
1274 * lpfc_vport_param_hex_show: Return hex formatted attribute value.
1275 *
1276 * Description:
1277 * Macro that given an attr e.g.
1278 * hba_queue_depth expands into a function with the name
1279 * lpfc_hba_queue_depth_show
1280 *
1281 * lpfc_##attr##_show: prints the attribute value in hexidecimal.
1282 * @dev: class device that is converted into a Scsi_host.
1283 * @attr: device attribute, not used.
1284 * @buf: on return contains the attribute value in hexidecimal.
1285 *
1286 * Returns: length of formatted string.
1287 **/
783#define lpfc_vport_param_hex_show(attr) \ 1288#define lpfc_vport_param_hex_show(attr) \
784static ssize_t \ 1289static ssize_t \
785lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 1290lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -792,6 +1297,24 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
792 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 1297 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
793} 1298}
794 1299
1300/**
1301 * lpfc_vport_param_init: Initialize a vport cfg attribute.
1302 *
1303 * Description:
1304 * Macro that given an attr e.g. hba_queue_depth expands
1305 * into a function with the name lpfc_hba_queue_depth_init. The macro also
1306 * takes a default argument, a minimum and maximum argument.
1307 *
1308 * lpfc_##attr##_init: validates the min and max values then sets the
1309 * adapter config field accordingly, or uses the default if out of range
1310 * and prints an error message.
1311 * @phba: pointer the the adapter structure.
1312 * @val: integer attribute value.
1313 *
1314 * Returns:
1315 * zero on success
1316 * -EINVAL if default used
1317 **/
795#define lpfc_vport_param_init(attr, default, minval, maxval) \ 1318#define lpfc_vport_param_init(attr, default, minval, maxval) \
796static int \ 1319static int \
797lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ 1320lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
@@ -801,12 +1324,29 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
801 return 0;\ 1324 return 0;\
802 }\ 1325 }\
803 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 1326 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
804 "0449 lpfc_"#attr" attribute cannot be set to %d, "\ 1327 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
805 "allowed range is ["#minval", "#maxval"]\n", val); \ 1328 "allowed range is ["#minval", "#maxval"]\n", val); \
806 vport->cfg_##attr = default;\ 1329 vport->cfg_##attr = default;\
807 return -EINVAL;\ 1330 return -EINVAL;\
808} 1331}
809 1332
1333/**
1334 * lpfc_vport_param_set: Set a vport cfg attribute.
1335 *
1336 * Description:
1337 * Macro that given an attr e.g. hba_queue_depth expands
1338 * into a function with the name lpfc_hba_queue_depth_set
1339 *
1340 * lpfc_##attr##_set: validates the min and max values then sets the
1341 * adapter config field if in the valid range. prints error message
1342 * and does not set the parameter if invalid.
1343 * @phba: pointer the the adapter structure.
1344 * @val: integer attribute value.
1345 *
1346 * Returns:
1347 * zero on success
1348 * -EINVAL if val is invalid
1349 **/
810#define lpfc_vport_param_set(attr, default, minval, maxval) \ 1350#define lpfc_vport_param_set(attr, default, minval, maxval) \
811static int \ 1351static int \
812lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ 1352lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
@@ -816,11 +1356,28 @@ lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
816 return 0;\ 1356 return 0;\
817 }\ 1357 }\
818 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 1358 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
819 "0450 lpfc_"#attr" attribute cannot be set to %d, "\ 1359 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
820 "allowed range is ["#minval", "#maxval"]\n", val); \ 1360 "allowed range is ["#minval", "#maxval"]\n", val); \
821 return -EINVAL;\ 1361 return -EINVAL;\
822} 1362}
823 1363
1364/**
1365 * lpfc_vport_param_store: Set a vport attribute.
1366 *
1367 * Description:
1368 * Macro that given an attr e.g. hba_queue_depth
1369 * expands into a function with the name lpfc_hba_queue_depth_store
1370 *
1371 * lpfc_##attr##_store: convert the ascii text number to an integer, then
1372 * use the lpfc_##attr##_set function to set the value.
1373 * @cdev: class device that is converted into a Scsi_host.
1374 * @buf: contains the attribute value in decimal.
1375 * @count: not used.
1376 *
1377 * Returns:
1378 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
1379 * length of buffer upon success.
1380 **/
824#define lpfc_vport_param_store(attr) \ 1381#define lpfc_vport_param_store(attr) \
825static ssize_t \ 1382static ssize_t \
826lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 1383lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -941,6 +1498,7 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
941 lpfc_option_rom_version_show, NULL); 1498 lpfc_option_rom_version_show, NULL);
942static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 1499static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
943 lpfc_num_discovered_ports_show, NULL); 1500 lpfc_num_discovered_ports_show, NULL);
1501static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
944static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 1502static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
945static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); 1503static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
946static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 1504static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
@@ -958,6 +1516,17 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
958 1516
959static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 1517static char *lpfc_soft_wwn_key = "C99G71SL8032A";
960 1518
1519/**
1520 * lpfc_soft_wwn_enable_store: Allows setting of the wwn if the key is valid.
1521 * @dev: class device that is converted into a Scsi_host.
1522 * @attr: device attribute, not used.
1523 * @buf: containing the string lpfc_soft_wwn_key.
1524 * @count: must be size of lpfc_soft_wwn_key.
1525 *
1526 * Returns:
1527 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
1528 * length of buf indicates success
1529 **/
961static ssize_t 1530static ssize_t
962lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, 1531lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
963 const char *buf, size_t count) 1532 const char *buf, size_t count)
@@ -994,6 +1563,14 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
994static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, 1563static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
995 lpfc_soft_wwn_enable_store); 1564 lpfc_soft_wwn_enable_store);
996 1565
1566/**
1567 * lpfc_soft_wwpn_show: Return the cfg soft ww port name of the adapter.
1568 * @dev: class device that is converted into a Scsi_host.
1569 * @attr: device attribute, not used.
1570 * @buf: on return contains the wwpn in hexidecimal.
1571 *
1572 * Returns: size of formatted string.
1573 **/
997static ssize_t 1574static ssize_t
998lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, 1575lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
999 char *buf) 1576 char *buf)
@@ -1006,7 +1583,19 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
1006 (unsigned long long)phba->cfg_soft_wwpn); 1583 (unsigned long long)phba->cfg_soft_wwpn);
1007} 1584}
1008 1585
1009 1586/**
1587 * lpfc_soft_wwpn_store: Set the ww port name of the adapter.
1588 * @dev class device that is converted into a Scsi_host.
1589 * @attr: device attribute, not used.
1590 * @buf: contains the wwpn in hexidecimal.
1591 * @count: number of wwpn bytes in buf
1592 *
1593 * Returns:
1594 * -EACCES hba reset not enabled, adapter over temp
1595 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
1596 * -EIO error taking adapter offline or online
1597 * value of count on success
1598 **/
1010static ssize_t 1599static ssize_t
1011lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, 1600lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
1012 const char *buf, size_t count) 1601 const char *buf, size_t count)
@@ -1080,6 +1669,14 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
1080static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ 1669static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
1081 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); 1670 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
1082 1671
1672/**
1673 * lpfc_soft_wwnn_show: Return the cfg soft ww node name for the adapter.
1674 * @dev: class device that is converted into a Scsi_host.
1675 * @attr: device attribute, not used.
1676 * @buf: on return contains the wwnn in hexidecimal.
1677 *
1678 * Returns: size of formatted string.
1679 **/
1083static ssize_t 1680static ssize_t
1084lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, 1681lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
1085 char *buf) 1682 char *buf)
@@ -1090,7 +1687,16 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
1090 (unsigned long long)phba->cfg_soft_wwnn); 1687 (unsigned long long)phba->cfg_soft_wwnn);
1091} 1688}
1092 1689
1093 1690/**
1691 * lpfc_soft_wwnn_store: sets the ww node name of the adapter.
1692 * @cdev: class device that is converted into a Scsi_host.
1693 * @buf: contains the ww node name in hexidecimal.
1694 * @count: number of wwnn bytes in buf.
1695 *
1696 * Returns:
1697 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
1698 * value of count on success
1699 **/
1094static ssize_t 1700static ssize_t
1095lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, 1701lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
1096 const char *buf, size_t count) 1702 const char *buf, size_t count)
@@ -1178,6 +1784,15 @@ module_param(lpfc_nodev_tmo, int, 0);
1178MODULE_PARM_DESC(lpfc_nodev_tmo, 1784MODULE_PARM_DESC(lpfc_nodev_tmo,
1179 "Seconds driver will hold I/O waiting " 1785 "Seconds driver will hold I/O waiting "
1180 "for a device to come back"); 1786 "for a device to come back");
1787
1788/**
1789 * lpfc_nodev_tmo_show: Return the hba dev loss timeout value.
1790 * @dev: class converted to a Scsi_host structure.
1791 * @attr: device attribute, not used.
1792 * @buf: on return contains the dev loss timeout in decimal.
1793 *
1794 * Returns: size of formatted string.
1795 **/
1181static ssize_t 1796static ssize_t
1182lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, 1797lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1183 char *buf) 1798 char *buf)
@@ -1189,6 +1804,21 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1189 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1804 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1190} 1805}
1191 1806
1807/**
1808 * lpfc_nodev_tmo_init: Set the hba nodev timeout value.
1809 * @vport: lpfc vport structure pointer.
1810 * @val: contains the nodev timeout value.
1811 *
1812 * Description:
1813 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
1814 * a kernel error message is printed and zero is returned.
1815 * Else if val is in range then nodev tmo and devloss tmo are set to val.
1816 * Otherwise nodev tmo is set to the default value.
1817 *
1818 * Returns:
1819 * zero if already set or if val is in range
1820 * -EINVAL val out of range
1821 **/
1192static int 1822static int
1193lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) 1823lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
1194{ 1824{
@@ -1196,7 +1826,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
1196 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; 1826 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
1197 if (val != LPFC_DEF_DEVLOSS_TMO) 1827 if (val != LPFC_DEF_DEVLOSS_TMO)
1198 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1828 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1199 "0402 Ignoring nodev_tmo module " 1829 "0407 Ignoring nodev_tmo module "
1200 "parameter because devloss_tmo is " 1830 "parameter because devloss_tmo is "
1201 "set.\n"); 1831 "set.\n");
1202 return 0; 1832 return 0;
@@ -1215,6 +1845,13 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
1215 return -EINVAL; 1845 return -EINVAL;
1216} 1846}
1217 1847
1848/**
1849 * lpfc_update_rport_devloss_tmo: Update dev loss tmo value.
1850 * @vport: lpfc vport structure pointer.
1851 *
1852 * Description:
1853 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
1854 **/
1218static void 1855static void
1219lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) 1856lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
1220{ 1857{
@@ -1229,6 +1866,21 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
1229 spin_unlock_irq(shost->host_lock); 1866 spin_unlock_irq(shost->host_lock);
1230} 1867}
1231 1868
1869/**
1870 * lpfc_nodev_tmo_set: Set the vport nodev tmo and devloss tmo values.
1871 * @vport: lpfc vport structure pointer.
1872 * @val: contains the tmo value.
1873 *
1874 * Description:
1875 * If the devloss tmo is already set or the vport dev loss tmo has changed
1876 * then a kernel error message is printed and zero is returned.
1877 * Else if val is in range then nodev tmo and devloss tmo are set to val.
1878 * Otherwise nodev tmo is set to the default value.
1879 *
1880 * Returns:
1881 * zero if already set or if val is in range
1882 * -EINVAL val out of range
1883 **/
1232static int 1884static int
1233lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) 1885lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
1234{ 1886{
@@ -1269,6 +1921,21 @@ MODULE_PARM_DESC(lpfc_devloss_tmo,
1269lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, 1921lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
1270 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) 1922 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
1271lpfc_vport_param_show(devloss_tmo) 1923lpfc_vport_param_show(devloss_tmo)
1924
1925/**
1926 * lpfc_devloss_tmo_set: Sets vport nodev tmo, devloss tmo values, changed bit.
1927 * @vport: lpfc vport structure pointer.
1928 * @val: contains the tmo value.
1929 *
1930 * Description:
1931 * If val is in a valid range then set the vport nodev tmo,
1932 * devloss tmo, also set the vport dev loss tmo changed flag.
1933 * Else a kernel error message is printed.
1934 *
1935 * Returns:
1936 * zero if val is in range
1937 * -EINVAL val out of range
1938 **/
1272static int 1939static int
1273lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) 1940lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
1274{ 1941{
@@ -1366,12 +2033,27 @@ MODULE_PARM_DESC(lpfc_restrict_login,
1366 "Restrict virtual ports login to remote initiators."); 2033 "Restrict virtual ports login to remote initiators.");
1367lpfc_vport_param_show(restrict_login); 2034lpfc_vport_param_show(restrict_login);
1368 2035
2036/**
2037 * lpfc_restrict_login_init: Set the vport restrict login flag.
2038 * @vport: lpfc vport structure pointer.
2039 * @val: contains the restrict login value.
2040 *
2041 * Description:
2042 * If val is not in a valid range then log a kernel error message and set
2043 * the vport restrict login to one.
2044 * If the port type is physical clear the restrict login flag and return.
2045 * Else set the restrict login flag to val.
2046 *
2047 * Returns:
2048 * zero if val is in range
2049 * -EINVAL val out of range
2050 **/
1369static int 2051static int
1370lpfc_restrict_login_init(struct lpfc_vport *vport, int val) 2052lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
1371{ 2053{
1372 if (val < 0 || val > 1) { 2054 if (val < 0 || val > 1) {
1373 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2055 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1374 "0449 lpfc_restrict_login attribute cannot " 2056 "0422 lpfc_restrict_login attribute cannot "
1375 "be set to %d, allowed range is [0, 1]\n", 2057 "be set to %d, allowed range is [0, 1]\n",
1376 val); 2058 val);
1377 vport->cfg_restrict_login = 1; 2059 vport->cfg_restrict_login = 1;
@@ -1385,12 +2067,28 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
1385 return 0; 2067 return 0;
1386} 2068}
1387 2069
2070/**
2071 * lpfc_restrict_login_set: Set the vport restrict login flag.
2072 * @vport: lpfc vport structure pointer.
2073 * @val: contains the restrict login value.
2074 *
2075 * Description:
2076 * If val is not in a valid range then log a kernel error message and set
2077 * the vport restrict login to one.
2078 * If the port type is physical and the val is not zero log a kernel
2079 * error message, clear the restrict login flag and return zero.
2080 * Else set the restrict login flag to val.
2081 *
2082 * Returns:
2083 * zero if val is in range
2084 * -EINVAL val out of range
2085 **/
1388static int 2086static int
1389lpfc_restrict_login_set(struct lpfc_vport *vport, int val) 2087lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
1390{ 2088{
1391 if (val < 0 || val > 1) { 2089 if (val < 0 || val > 1) {
1392 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2090 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1393 "0450 lpfc_restrict_login attribute cannot " 2091 "0425 lpfc_restrict_login attribute cannot "
1394 "be set to %d, allowed range is [0, 1]\n", 2092 "be set to %d, allowed range is [0, 1]\n",
1395 val); 2093 val);
1396 vport->cfg_restrict_login = 1; 2094 vport->cfg_restrict_login = 1;
@@ -1441,6 +2139,23 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
1441# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 2139# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
1442# Default value is 0. 2140# Default value is 0.
1443*/ 2141*/
2142
2143/**
2144 * lpfc_topology_set: Set the adapters topology field.
2145 * @phba: lpfc_hba pointer.
2146 * @val: topology value.
2147 *
2148 * Description:
2149 * If val is in a valid range then set the adapter's topology field and
2150 * issue a lip; if the lip fails reset the topology to the old value.
2151 *
2152 * If the value is not in range log a kernel error message and return an error.
2153 *
2154 * Returns:
2155 * zero if val is in range and lip okay
2156 * non-zero return value from lpfc_issue_lip()
2157 * -EINVAL val out of range
2158 **/
1444static int 2159static int
1445lpfc_topology_set(struct lpfc_hba *phba, int val) 2160lpfc_topology_set(struct lpfc_hba *phba, int val)
1446{ 2161{
@@ -1469,6 +2184,335 @@ lpfc_param_store(topology)
1469static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2184static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
1470 lpfc_topology_show, lpfc_topology_store); 2185 lpfc_topology_show, lpfc_topology_store);
1471 2186
2187
2188/**
2189 * lpfc_stat_data_ctrl_store: write call back for lpfc_stat_data_ctrl
2190 * sysfs file.
2191 * @dev: Pointer to class device.
2192 * @buf: Data buffer.
2193 * @count: Size of the data buffer.
2194 *
2195 * This function get called when an user write to the lpfc_stat_data_ctrl
2196 * sysfs file. This function parse the command written to the sysfs file
2197 * and take appropriate action. These commands are used for controlling
2198 * driver statistical data collection.
2199 * Following are the command this function handles.
2200 *
2201 * setbucket <bucket_type> <base> <step>
2202 * = Set the latency buckets.
2203 * destroybucket = destroy all the buckets.
2204 * start = start data collection
2205 * stop = stop data collection
2206 * reset = reset the collected data
2207 **/
2208static ssize_t
2209lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2210 const char *buf, size_t count)
2211{
2212 struct Scsi_Host *shost = class_to_shost(dev);
2213 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2214 struct lpfc_hba *phba = vport->phba;
2215#define LPFC_MAX_DATA_CTRL_LEN 1024
2216 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
2217 unsigned long i;
2218 char *str_ptr, *token;
2219 struct lpfc_vport **vports;
2220 struct Scsi_Host *v_shost;
2221 char *bucket_type_str, *base_str, *step_str;
2222 unsigned long base, step, bucket_type;
2223
2224 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
2225 if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN)
2226 return -EINVAL;
2227
2228 strcpy(bucket_data, buf);
2229 str_ptr = &bucket_data[0];
2230 /* Ignore this token - this is command token */
2231 token = strsep(&str_ptr, "\t ");
2232 if (!token)
2233 return -EINVAL;
2234
2235 bucket_type_str = strsep(&str_ptr, "\t ");
2236 if (!bucket_type_str)
2237 return -EINVAL;
2238
2239 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
2240 bucket_type = LPFC_LINEAR_BUCKET;
2241 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
2242 bucket_type = LPFC_POWER2_BUCKET;
2243 else
2244 return -EINVAL;
2245
2246 base_str = strsep(&str_ptr, "\t ");
2247 if (!base_str)
2248 return -EINVAL;
2249 base = simple_strtoul(base_str, NULL, 0);
2250
2251 step_str = strsep(&str_ptr, "\t ");
2252 if (!step_str)
2253 return -EINVAL;
2254 step = simple_strtoul(step_str, NULL, 0);
2255 if (!step)
2256 return -EINVAL;
2257
2258 /* Block the data collection for every vport */
2259 vports = lpfc_create_vport_work_array(phba);
2260 if (vports == NULL)
2261 return -ENOMEM;
2262
2263 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
2264 v_shost = lpfc_shost_from_vport(vports[i]);
2265 spin_lock_irq(v_shost->host_lock);
2266 /* Block and reset data collection */
2267 vports[i]->stat_data_blocked = 1;
2268 if (vports[i]->stat_data_enabled)
2269 lpfc_vport_reset_stat_data(vports[i]);
2270 spin_unlock_irq(v_shost->host_lock);
2271 }
2272
2273 /* Set the bucket attributes */
2274 phba->bucket_type = bucket_type;
2275 phba->bucket_base = base;
2276 phba->bucket_step = step;
2277
2278 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
2279 v_shost = lpfc_shost_from_vport(vports[i]);
2280
2281 /* Unblock data collection */
2282 spin_lock_irq(v_shost->host_lock);
2283 vports[i]->stat_data_blocked = 0;
2284 spin_unlock_irq(v_shost->host_lock);
2285 }
2286 lpfc_destroy_vport_work_array(phba, vports);
2287 return strlen(buf);
2288 }
2289
2290 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
2291 vports = lpfc_create_vport_work_array(phba);
2292 if (vports == NULL)
2293 return -ENOMEM;
2294
2295 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
2296 v_shost = lpfc_shost_from_vport(vports[i]);
2297 spin_lock_irq(shost->host_lock);
2298 vports[i]->stat_data_blocked = 1;
2299 lpfc_free_bucket(vport);
2300 vport->stat_data_enabled = 0;
2301 vports[i]->stat_data_blocked = 0;
2302 spin_unlock_irq(shost->host_lock);
2303 }
2304 lpfc_destroy_vport_work_array(phba, vports);
2305 phba->bucket_type = LPFC_NO_BUCKET;
2306 phba->bucket_base = 0;
2307 phba->bucket_step = 0;
2308 return strlen(buf);
2309 }
2310
2311 if (!strncmp(buf, "start", strlen("start"))) {
2312 /* If no buckets configured return error */
2313 if (phba->bucket_type == LPFC_NO_BUCKET)
2314 return -EINVAL;
2315 spin_lock_irq(shost->host_lock);
2316 if (vport->stat_data_enabled) {
2317 spin_unlock_irq(shost->host_lock);
2318 return strlen(buf);
2319 }
2320 lpfc_alloc_bucket(vport);
2321 vport->stat_data_enabled = 1;
2322 spin_unlock_irq(shost->host_lock);
2323 return strlen(buf);
2324 }
2325
2326 if (!strncmp(buf, "stop", strlen("stop"))) {
2327 spin_lock_irq(shost->host_lock);
2328 if (vport->stat_data_enabled == 0) {
2329 spin_unlock_irq(shost->host_lock);
2330 return strlen(buf);
2331 }
2332 lpfc_free_bucket(vport);
2333 vport->stat_data_enabled = 0;
2334 spin_unlock_irq(shost->host_lock);
2335 return strlen(buf);
2336 }
2337
2338 if (!strncmp(buf, "reset", strlen("reset"))) {
2339 if ((phba->bucket_type == LPFC_NO_BUCKET)
2340 || !vport->stat_data_enabled)
2341 return strlen(buf);
2342 spin_lock_irq(shost->host_lock);
2343 vport->stat_data_blocked = 1;
2344 lpfc_vport_reset_stat_data(vport);
2345 vport->stat_data_blocked = 0;
2346 spin_unlock_irq(shost->host_lock);
2347 return strlen(buf);
2348 }
2349 return -EINVAL;
2350}
2351
2352
2353/**
2354 * lpfc_stat_data_ctrl_show: Read callback function for
2355 * lpfc_stat_data_ctrl sysfs file.
2356 * @dev: Pointer to class device object.
2357 * @buf: Data buffer.
2358 *
2359 * This function is the read call back function for
2360 * lpfc_stat_data_ctrl sysfs file. This function report the
2361 * current statistical data collection state.
2362 **/
2363static ssize_t
2364lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
2365 char *buf)
2366{
2367 struct Scsi_Host *shost = class_to_shost(dev);
2368 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2369 struct lpfc_hba *phba = vport->phba;
2370 int index = 0;
2371 int i;
2372 char *bucket_type;
2373 unsigned long bucket_value;
2374
2375 switch (phba->bucket_type) {
2376 case LPFC_LINEAR_BUCKET:
2377 bucket_type = "linear";
2378 break;
2379 case LPFC_POWER2_BUCKET:
2380 bucket_type = "power2";
2381 break;
2382 default:
2383 bucket_type = "No Bucket";
2384 break;
2385 }
2386
2387 sprintf(&buf[index], "Statistical Data enabled :%d, "
2388 "blocked :%d, Bucket type :%s, Bucket base :%d,"
2389 " Bucket step :%d\nLatency Ranges :",
2390 vport->stat_data_enabled, vport->stat_data_blocked,
2391 bucket_type, phba->bucket_base, phba->bucket_step);
2392 index = strlen(buf);
2393 if (phba->bucket_type != LPFC_NO_BUCKET) {
2394 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
2395 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
2396 bucket_value = phba->bucket_base +
2397 phba->bucket_step * i;
2398 else
2399 bucket_value = phba->bucket_base +
2400 (1 << i) * phba->bucket_step;
2401
2402 if (index + 10 > PAGE_SIZE)
2403 break;
2404 sprintf(&buf[index], "%08ld ", bucket_value);
2405 index = strlen(buf);
2406 }
2407 }
2408 sprintf(&buf[index], "\n");
2409 return strlen(buf);
2410}
2411
2412/*
2413 * Sysfs attribute to control the statistical data collection.
2414 */
2415static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
2416 lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
2417
2418/*
2419 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
2420 */
2421
2422/*
2423 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
2424 * for each target.
2425 */
2426#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
2427#define MAX_STAT_DATA_SIZE_PER_TARGET \
2428 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
2429
2430
2431/**
2432 * sysfs_drvr_stat_data_read: Read callback function for lpfc_drvr_stat_data
2433 * sysfs attribute.
2434 * @kobj: Pointer to the kernel object
2435 * @bin_attr: Attribute object
2436 * @buff: Buffer pointer
2437 * @off: File offset
2438 * @count: Buffer size
2439 *
2440 * This function is the read call back function for lpfc_drvr_stat_data
2441 * sysfs file. This function export the statistical data to user
2442 * applications.
2443 **/
2444static ssize_t
2445sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
2446 char *buf, loff_t off, size_t count)
2447{
2448 struct device *dev = container_of(kobj, struct device,
2449 kobj);
2450 struct Scsi_Host *shost = class_to_shost(dev);
2451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2452 struct lpfc_hba *phba = vport->phba;
2453 int i = 0, index = 0;
2454 unsigned long nport_index;
2455 struct lpfc_nodelist *ndlp = NULL;
2456 nport_index = (unsigned long)off /
2457 MAX_STAT_DATA_SIZE_PER_TARGET;
2458
2459 if (!vport->stat_data_enabled || vport->stat_data_blocked
2460 || (phba->bucket_type == LPFC_NO_BUCKET))
2461 return 0;
2462
2463 spin_lock_irq(shost->host_lock);
2464 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2465 if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
2466 continue;
2467
2468 if (nport_index > 0) {
2469 nport_index--;
2470 continue;
2471 }
2472
2473 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
2474 > count)
2475 break;
2476
2477 if (!ndlp->lat_data)
2478 continue;
2479
2480 /* Print the WWN */
2481 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
2482 ndlp->nlp_portname.u.wwn[0],
2483 ndlp->nlp_portname.u.wwn[1],
2484 ndlp->nlp_portname.u.wwn[2],
2485 ndlp->nlp_portname.u.wwn[3],
2486 ndlp->nlp_portname.u.wwn[4],
2487 ndlp->nlp_portname.u.wwn[5],
2488 ndlp->nlp_portname.u.wwn[6],
2489 ndlp->nlp_portname.u.wwn[7]);
2490
2491 index = strlen(buf);
2492
2493 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
2494 sprintf(&buf[index], "%010u,",
2495 ndlp->lat_data[i].cmd_count);
2496 index = strlen(buf);
2497 }
2498 sprintf(&buf[index], "\n");
2499 index = strlen(buf);
2500 }
2501 spin_unlock_irq(shost->host_lock);
2502 return index;
2503}
2504
2505static struct bin_attribute sysfs_drvr_stat_data_attr = {
2506 .attr = {
2507 .name = "lpfc_drvr_stat_data",
2508 .mode = S_IRUSR,
2509 .owner = THIS_MODULE,
2510 },
2511 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
2512 .read = sysfs_drvr_stat_data_read,
2513 .write = NULL,
2514};
2515
1472/* 2516/*
1473# lpfc_link_speed: Link speed selection for initializing the Fibre Channel 2517# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
1474# connection. 2518# connection.
@@ -1479,6 +2523,24 @@ static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
1479# 8 = 8 Gigabaud 2523# 8 = 8 Gigabaud
1480# Value range is [0,8]. Default value is 0. 2524# Value range is [0,8]. Default value is 0.
1481*/ 2525*/
2526
2527/**
2528 * lpfc_link_speed_set: Set the adapters link speed.
2529 * @phba: lpfc_hba pointer.
2530 * @val: link speed value.
2531 *
2532 * Description:
2533 * If val is in a valid range then set the adapter's link speed field and
2534 * issue a lip; if the lip fails reset the link speed to the old value.
2535 *
2536 * Notes:
2537 * If the value is not in range log a kernel error message and return an error.
2538 *
2539 * Returns:
2540 * zero if val is in range and lip okay.
2541 * non-zero return value from lpfc_issue_lip()
2542 * -EINVAL val out of range
2543 **/
1482static int 2544static int
1483lpfc_link_speed_set(struct lpfc_hba *phba, int val) 2545lpfc_link_speed_set(struct lpfc_hba *phba, int val)
1484{ 2546{
@@ -1513,6 +2575,23 @@ static int lpfc_link_speed = 0;
1513module_param(lpfc_link_speed, int, 0); 2575module_param(lpfc_link_speed, int, 0);
1514MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); 2576MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
1515lpfc_param_show(link_speed) 2577lpfc_param_show(link_speed)
2578
2579/**
2580 * lpfc_link_speed_init: Set the adapters link speed.
2581 * @phba: lpfc_hba pointer.
2582 * @val: link speed value.
2583 *
2584 * Description:
2585 * If val is in a valid range then set the adapter's link speed field.
2586 *
2587 * Notes:
2588 * If the value is not in range log a kernel error message, clear the link
2589 * speed and return an error.
2590 *
2591 * Returns:
2592 * zero if val saved.
2593 * -EINVAL val out of range
2594 **/
1516static int 2595static int
1517lpfc_link_speed_init(struct lpfc_hba *phba, int val) 2596lpfc_link_speed_init(struct lpfc_hba *phba, int val)
1518{ 2597{
@@ -1522,7 +2601,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
1522 return 0; 2601 return 0;
1523 } 2602 }
1524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1525 "0454 lpfc_link_speed attribute cannot " 2604 "0405 lpfc_link_speed attribute cannot "
1526 "be set to %d, allowed values are " 2605 "be set to %d, allowed values are "
1527 "["LPFC_LINK_SPEED_STRING"]\n", val); 2606 "["LPFC_LINK_SPEED_STRING"]\n", val);
1528 phba->cfg_link_speed = 0; 2607 phba->cfg_link_speed = 0;
@@ -1548,6 +2627,48 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
1548 "Use ADISC on rediscovery to authenticate FCP devices"); 2627 "Use ADISC on rediscovery to authenticate FCP devices");
1549 2628
1550/* 2629/*
2630# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
2631# depth. Default value is 0. When the value of this parameter is zero the
2632# SCSI command completion time is not used for controlling I/O queue depth. When
2633# the parameter is set to a non-zero value, the I/O queue depth is controlled
2634# to limit the I/O completion time to the parameter value.
2635# The value is set in milliseconds.
2636*/
2637static int lpfc_max_scsicmpl_time;
2638module_param(lpfc_max_scsicmpl_time, int, 0);
2639MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
2640 "Use command completion time to control queue depth");
2641lpfc_vport_param_show(max_scsicmpl_time);
2642lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
2643static int
2644lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
2645{
2646 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2647 struct lpfc_nodelist *ndlp, *next_ndlp;
2648
2649 if (val == vport->cfg_max_scsicmpl_time)
2650 return 0;
2651 if ((val < 0) || (val > 60000))
2652 return -EINVAL;
2653 vport->cfg_max_scsicmpl_time = val;
2654
2655 spin_lock_irq(shost->host_lock);
2656 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2657 if (!NLP_CHK_NODE_ACT(ndlp))
2658 continue;
2659 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2660 continue;
2661 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2662 }
2663 spin_unlock_irq(shost->host_lock);
2664 return 0;
2665}
2666lpfc_vport_param_store(max_scsicmpl_time);
2667static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
2668 lpfc_max_scsicmpl_time_show,
2669 lpfc_max_scsicmpl_time_store);
2670
2671/*
1551# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value 2672# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
1552# range is [0,1]. Default value is 0. 2673# range is [0,1]. Default value is 0.
1553*/ 2674*/
@@ -1623,12 +2744,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1623/* 2744/*
1624# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2745# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
1625# support this feature 2746# support this feature
1626# 0 = MSI disabled (default) 2747# 0 = MSI disabled
1627# 1 = MSI enabled 2748# 1 = MSI enabled
1628# 2 = MSI-X enabled 2749# 2 = MSI-X enabled (default)
1629# Value range is [0,2]. Default value is 0. 2750# Value range is [0,2]. Default value is 2.
1630*/ 2751*/
1631LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " 2752LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
1632 "MSI-X (2), if possible"); 2753 "MSI-X (2), if possible");
1633 2754
1634/* 2755/*
@@ -1668,6 +2789,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
1668 &dev_attr_option_rom_version, 2789 &dev_attr_option_rom_version,
1669 &dev_attr_link_state, 2790 &dev_attr_link_state,
1670 &dev_attr_num_discovered_ports, 2791 &dev_attr_num_discovered_ports,
2792 &dev_attr_menlo_mgmt_mode,
1671 &dev_attr_lpfc_drvr_version, 2793 &dev_attr_lpfc_drvr_version,
1672 &dev_attr_lpfc_temp_sensor, 2794 &dev_attr_lpfc_temp_sensor,
1673 &dev_attr_lpfc_log_verbose, 2795 &dev_attr_lpfc_log_verbose,
@@ -1709,6 +2831,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
1709 &dev_attr_lpfc_enable_hba_reset, 2831 &dev_attr_lpfc_enable_hba_reset,
1710 &dev_attr_lpfc_enable_hba_heartbeat, 2832 &dev_attr_lpfc_enable_hba_heartbeat,
1711 &dev_attr_lpfc_sg_seg_cnt, 2833 &dev_attr_lpfc_sg_seg_cnt,
2834 &dev_attr_lpfc_max_scsicmpl_time,
2835 &dev_attr_lpfc_stat_data_ctrl,
1712 NULL, 2836 NULL,
1713}; 2837};
1714 2838
@@ -1731,9 +2855,29 @@ struct device_attribute *lpfc_vport_attrs[] = {
1731 &dev_attr_nport_evt_cnt, 2855 &dev_attr_nport_evt_cnt,
1732 &dev_attr_npiv_info, 2856 &dev_attr_npiv_info,
1733 &dev_attr_lpfc_enable_da_id, 2857 &dev_attr_lpfc_enable_da_id,
2858 &dev_attr_lpfc_max_scsicmpl_time,
2859 &dev_attr_lpfc_stat_data_ctrl,
1734 NULL, 2860 NULL,
1735}; 2861};
1736 2862
2863/**
2864 * sysfs_ctlreg_write: Write method for writing to ctlreg.
2865 * @kobj: kernel kobject that contains the kernel class device.
2866 * @bin_attr: kernel attributes passed to us.
2867 * @buf: contains the data to be written to the adapter IOREG space.
2868 * @off: offset into buffer to beginning of data.
2869 * @count: bytes to transfer.
2870 *
2871 * Description:
2872 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
2873 * Uses the adapter io control registers to send buf contents to the adapter.
2874 *
2875 * Returns:
2876 * -ERANGE off and count combo out of range
2877 * -EINVAL off, count or buff address invalid
2878 * -EPERM adapter is offline
2879 * value of count, buf contents written
2880 **/
1737static ssize_t 2881static ssize_t
1738sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, 2882sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1739 char *buf, loff_t off, size_t count) 2883 char *buf, loff_t off, size_t count)
@@ -1766,6 +2910,23 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1766 return count; 2910 return count;
1767} 2911}
1768 2912
2913/**
2914 * sysfs_ctlreg_read: Read method for reading from ctlreg.
2915 * @kobj: kernel kobject that contains the kernel class device.
2916 * @bin_attr: kernel attributes passed to us.
2917 * @buf: if succesful contains the data from the adapter IOREG space.
2918 * @off: offset into buffer to beginning of data.
2919 * @count: bytes to transfer.
2920 *
2921 * Description:
2922 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
2923 * Uses the adapter io control registers to read data into buf.
2924 *
2925 * Returns:
2926 * -ERANGE off and count combo out of range
2927 * -EINVAL off, count or buff address invalid
2928 * value of count, buf contents read
2929 **/
1769static ssize_t 2930static ssize_t
1770sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, 2931sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1771 char *buf, loff_t off, size_t count) 2932 char *buf, loff_t off, size_t count)
@@ -1810,7 +2971,10 @@ static struct bin_attribute sysfs_ctlreg_attr = {
1810 .write = sysfs_ctlreg_write, 2971 .write = sysfs_ctlreg_write,
1811}; 2972};
1812 2973
1813 2974/**
2975 * sysfs_mbox_idle: frees the sysfs mailbox.
2976 * @phba: lpfc_hba pointer
2977 **/
1814static void 2978static void
1815sysfs_mbox_idle(struct lpfc_hba *phba) 2979sysfs_mbox_idle(struct lpfc_hba *phba)
1816{ 2980{
@@ -1824,6 +2988,27 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
1824 } 2988 }
1825} 2989}
1826 2990
2991/**
2992 * sysfs_mbox_write: Write method for writing information via mbox.
2993 * @kobj: kernel kobject that contains the kernel class device.
2994 * @bin_attr: kernel attributes passed to us.
2995 * @buf: contains the data to be written to sysfs mbox.
2996 * @off: offset into buffer to beginning of data.
2997 * @count: bytes to transfer.
2998 *
2999 * Description:
3000 * Accessed via /sys/class/scsi_host/hostxxx/mbox.
3001 * Uses the sysfs mbox to send buf contents to the adapter.
3002 *
3003 * Returns:
3004 * -ERANGE off and count combo out of range
3005 * -EINVAL off, count or buff address invalid
3006 * zero if count is zero
3007 * -EPERM adapter is offline
3008 * -ENOMEM failed to allocate memory for the mail box
3009 * -EAGAIN offset, state or mbox is NULL
3010 * count number of bytes transferred
3011 **/
1827static ssize_t 3012static ssize_t
1828sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, 3013sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1829 char *buf, loff_t off, size_t count) 3014 char *buf, loff_t off, size_t count)
@@ -1878,6 +3063,29 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1878 return count; 3063 return count;
1879} 3064}
1880 3065
3066/**
3067 * sysfs_mbox_read: Read method for reading information via mbox.
3068 * @kobj: kernel kobject that contains the kernel class device.
3069 * @bin_attr: kernel attributes passed to us.
3070 * @buf: contains the data to be read from sysfs mbox.
3071 * @off: offset into buffer to beginning of data.
3072 * @count: bytes to transfer.
3073 *
3074 * Description:
3075 * Accessed via /sys/class/scsi_host/hostxxx/mbox.
3076 * Uses the sysfs mbox to receive data from to the adapter.
3077 *
3078 * Returns:
3079 * -ERANGE off greater than mailbox command size
3080 * -EINVAL off, count or buff address invalid
3081 * zero if off and count are zero
3082 * -EACCES adapter over temp
3083 * -EPERM garbage can value to catch a multitude of errors
3084 * -EAGAIN management IO not permitted, state or off error
3085 * -ETIME mailbox timeout
3086 * -ENODEV mailbox error
3087 * count number of bytes transferred
3088 **/
1881static ssize_t 3089static ssize_t
1882sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, 3090sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1883 char *buf, loff_t off, size_t count) 3091 char *buf, loff_t off, size_t count)
@@ -1954,6 +3162,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1954 case MBX_DEL_LD_ENTRY: 3162 case MBX_DEL_LD_ENTRY:
1955 case MBX_SET_VARIABLE: 3163 case MBX_SET_VARIABLE:
1956 case MBX_WRITE_WWN: 3164 case MBX_WRITE_WWN:
3165 case MBX_PORT_CAPABILITIES:
3166 case MBX_PORT_IOV_CONTROL:
1957 break; 3167 break;
1958 case MBX_READ_SPARM64: 3168 case MBX_READ_SPARM64:
1959 case MBX_READ_LA: 3169 case MBX_READ_LA:
@@ -1978,17 +3188,15 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1978 /* If HBA encountered an error attention, allow only DUMP 3188 /* If HBA encountered an error attention, allow only DUMP
1979 * or RESTART mailbox commands until the HBA is restarted. 3189 * or RESTART mailbox commands until the HBA is restarted.
1980 */ 3190 */
1981 if ((phba->pport->stopped) && 3191 if (phba->pport->stopped &&
1982 (phba->sysfs_mbox.mbox->mb.mbxCommand != 3192 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
1983 MBX_DUMP_MEMORY && 3193 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
1984 phba->sysfs_mbox.mbox->mb.mbxCommand != 3194 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
1985 MBX_RESTART && 3195 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
1986 phba->sysfs_mbox.mbox->mb.mbxCommand != 3196 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1987 MBX_WRITE_VPARMS)) { 3197 "1259 mbox: Issued mailbox cmd "
1988 sysfs_mbox_idle(phba); 3198 "0x%x while in stopped state.\n",
1989 spin_unlock_irq(&phba->hbalock); 3199 phba->sysfs_mbox.mbox->mb.mbxCommand);
1990 return -EPERM;
1991 }
1992 3200
1993 phba->sysfs_mbox.mbox->vport = vport; 3201 phba->sysfs_mbox.mbox->vport = vport;
1994 3202
@@ -2059,6 +3267,14 @@ static struct bin_attribute sysfs_mbox_attr = {
2059 .write = sysfs_mbox_write, 3267 .write = sysfs_mbox_write,
2060}; 3268};
2061 3269
3270/**
3271 * lpfc_alloc_sysfs_attr: Creates the ctlreg and mbox entries.
3272 * @vport: address of lpfc vport structure.
3273 *
3274 * Return codes:
3275 * zero on success
3276 * error return code from sysfs_create_bin_file()
3277 **/
2062int 3278int
2063lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) 3279lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
2064{ 3280{
@@ -2075,18 +3291,30 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
2075 if (error) 3291 if (error)
2076 goto out_remove_ctlreg_attr; 3292 goto out_remove_ctlreg_attr;
2077 3293
3294 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3295 &sysfs_drvr_stat_data_attr);
3296 if (error)
3297 goto out_remove_mbox_attr;
3298
2078 return 0; 3299 return 0;
3300out_remove_mbox_attr:
3301 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
2079out_remove_ctlreg_attr: 3302out_remove_ctlreg_attr:
2080 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3303 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
2081out: 3304out:
2082 return error; 3305 return error;
2083} 3306}
2084 3307
3308/**
3309 * lpfc_free_sysfs_attr: Removes the ctlreg and mbox entries.
3310 * @vport: address of lpfc vport structure.
3311 **/
2085void 3312void
2086lpfc_free_sysfs_attr(struct lpfc_vport *vport) 3313lpfc_free_sysfs_attr(struct lpfc_vport *vport)
2087{ 3314{
2088 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2089 3316 sysfs_remove_bin_file(&shost->shost_dev.kobj,
3317 &sysfs_drvr_stat_data_attr);
2090 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 3318 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
2091 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3319 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
2092} 3320}
@@ -2096,6 +3324,10 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
2096 * Dynamic FC Host Attributes Support 3324 * Dynamic FC Host Attributes Support
2097 */ 3325 */
2098 3326
3327/**
3328 * lpfc_get_host_port_id: Copy the vport DID into the scsi host port id.
3329 * @shost: kernel scsi host pointer.
3330 **/
2099static void 3331static void
2100lpfc_get_host_port_id(struct Scsi_Host *shost) 3332lpfc_get_host_port_id(struct Scsi_Host *shost)
2101{ 3333{
@@ -2105,6 +3337,10 @@ lpfc_get_host_port_id(struct Scsi_Host *shost)
2105 fc_host_port_id(shost) = vport->fc_myDID; 3337 fc_host_port_id(shost) = vport->fc_myDID;
2106} 3338}
2107 3339
3340/**
3341 * lpfc_get_host_port_type: Set the value of the scsi host port type.
3342 * @shost: kernel scsi host pointer.
3343 **/
2108static void 3344static void
2109lpfc_get_host_port_type(struct Scsi_Host *shost) 3345lpfc_get_host_port_type(struct Scsi_Host *shost)
2110{ 3346{
@@ -2133,6 +3369,10 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
2133 spin_unlock_irq(shost->host_lock); 3369 spin_unlock_irq(shost->host_lock);
2134} 3370}
2135 3371
3372/**
3373 * lpfc_get_host_port_state: Set the value of the scsi host port state.
3374 * @shost: kernel scsi host pointer.
3375 **/
2136static void 3376static void
2137lpfc_get_host_port_state(struct Scsi_Host *shost) 3377lpfc_get_host_port_state(struct Scsi_Host *shost)
2138{ 3378{
@@ -2167,6 +3407,10 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
2167 spin_unlock_irq(shost->host_lock); 3407 spin_unlock_irq(shost->host_lock);
2168} 3408}
2169 3409
3410/**
3411 * lpfc_get_host_speed: Set the value of the scsi host speed.
3412 * @shost: kernel scsi host pointer.
3413 **/
2170static void 3414static void
2171lpfc_get_host_speed(struct Scsi_Host *shost) 3415lpfc_get_host_speed(struct Scsi_Host *shost)
2172{ 3416{
@@ -2199,6 +3443,10 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
2199 spin_unlock_irq(shost->host_lock); 3443 spin_unlock_irq(shost->host_lock);
2200} 3444}
2201 3445
3446/**
3447 * lpfc_get_host_fabric_name: Set the value of the scsi host fabric name.
3448 * @shost: kernel scsi host pointer.
3449 **/
2202static void 3450static void
2203lpfc_get_host_fabric_name (struct Scsi_Host *shost) 3451lpfc_get_host_fabric_name (struct Scsi_Host *shost)
2204{ 3452{
@@ -2221,6 +3469,18 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
2221 fc_host_fabric_name(shost) = node_name; 3469 fc_host_fabric_name(shost) = node_name;
2222} 3470}
2223 3471
3472/**
3473 * lpfc_get_stats: Return statistical information about the adapter.
3474 * @shost: kernel scsi host pointer.
3475 *
3476 * Notes:
3477 * NULL on error for link down, no mbox pool, sli2 active,
3478 * management not allowed, memory allocation error, or mbox error.
3479 *
3480 * Returns:
3481 * NULL for error
3482 * address of the adapter host statistics
3483 **/
2224static struct fc_host_statistics * 3484static struct fc_host_statistics *
2225lpfc_get_stats(struct Scsi_Host *shost) 3485lpfc_get_stats(struct Scsi_Host *shost)
2226{ 3486{
@@ -2334,6 +3594,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
2334 return hs; 3594 return hs;
2335} 3595}
2336 3596
3597/**
3598 * lpfc_reset_stats: Copy the adapter link stats information.
3599 * @shost: kernel scsi host pointer.
3600 **/
2337static void 3601static void
2338lpfc_reset_stats(struct Scsi_Host *shost) 3602lpfc_reset_stats(struct Scsi_Host *shost)
2339{ 3603{
@@ -2411,6 +3675,14 @@ lpfc_reset_stats(struct Scsi_Host *shost)
2411 * are no sysfs handlers for link_down_tmo. 3675 * are no sysfs handlers for link_down_tmo.
2412 */ 3676 */
2413 3677
3678/**
3679 * lpfc_get_node_by_target: Return the nodelist for a target.
3680 * @starget: kernel scsi target pointer.
3681 *
3682 * Returns:
3683 * address of the node list if found
3684 * NULL target not found
3685 **/
2414static struct lpfc_nodelist * 3686static struct lpfc_nodelist *
2415lpfc_get_node_by_target(struct scsi_target *starget) 3687lpfc_get_node_by_target(struct scsi_target *starget)
2416{ 3688{
@@ -2432,6 +3704,10 @@ lpfc_get_node_by_target(struct scsi_target *starget)
2432 return NULL; 3704 return NULL;
2433} 3705}
2434 3706
3707/**
3708 * lpfc_get_starget_port_id: Set the target port id to the ndlp DID or -1.
3709 * @starget: kernel scsi target pointer.
3710 **/
2435static void 3711static void
2436lpfc_get_starget_port_id(struct scsi_target *starget) 3712lpfc_get_starget_port_id(struct scsi_target *starget)
2437{ 3713{
@@ -2440,6 +3716,12 @@ lpfc_get_starget_port_id(struct scsi_target *starget)
2440 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; 3716 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
2441} 3717}
2442 3718
3719/**
3720 * lpfc_get_starget_node_name: Set the target node name.
3721 * @starget: kernel scsi target pointer.
3722 *
3723 * Description: Set the target node name to the ndlp node name wwn or zero.
3724 **/
2443static void 3725static void
2444lpfc_get_starget_node_name(struct scsi_target *starget) 3726lpfc_get_starget_node_name(struct scsi_target *starget)
2445{ 3727{
@@ -2449,6 +3731,12 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
2449 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; 3731 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
2450} 3732}
2451 3733
3734/**
3735 * lpfc_get_starget_port_name: Set the target port name.
3736 * @starget: kernel scsi target pointer.
3737 *
3738 * Description: set the target port name to the ndlp port name wwn or zero.
3739 **/
2452static void 3740static void
2453lpfc_get_starget_port_name(struct scsi_target *starget) 3741lpfc_get_starget_port_name(struct scsi_target *starget)
2454{ 3742{
@@ -2458,6 +3746,15 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
2458 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; 3746 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
2459} 3747}
2460 3748
3749/**
3750 * lpfc_set_rport_loss_tmo: Set the rport dev loss tmo.
3751 * @rport: fc rport address.
3752 * @timeout: new value for dev loss tmo.
3753 *
3754 * Description:
3755 * If timeout is non zero set the dev_loss_tmo to timeout, else set
3756 * dev_loss_tmo to one.
3757 **/
2461static void 3758static void
2462lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 3759lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2463{ 3760{
@@ -2467,7 +3764,18 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2467 rport->dev_loss_tmo = 1; 3764 rport->dev_loss_tmo = 1;
2468} 3765}
2469 3766
2470 3767/**
3768 * lpfc_rport_show_function: Return rport target information.
3769 *
3770 * Description:
3771 * Macro that uses field to generate a function with the name lpfc_show_rport_
3772 *
3773 * lpfc_show_rport_##field: returns the bytes formatted in buf
3774 * @cdev: class converted to an fc_rport.
3775 * @buf: on return contains the target_field or zero.
3776 *
3777 * Returns: size of formatted string.
3778 **/
2471#define lpfc_rport_show_function(field, format_string, sz, cast) \ 3779#define lpfc_rport_show_function(field, format_string, sz, cast) \
2472static ssize_t \ 3780static ssize_t \
2473lpfc_show_rport_##field (struct device *dev, \ 3781lpfc_show_rport_##field (struct device *dev, \
@@ -2602,6 +3910,10 @@ struct fc_function_template lpfc_vport_transport_functions = {
2602 .vport_disable = lpfc_vport_disable, 3910 .vport_disable = lpfc_vport_disable,
2603}; 3911};
2604 3912
3913/**
3914 * lpfc_get_cfgparam: Used during probe_one to init the adapter structure.
3915 * @phba: lpfc_hba pointer.
3916 **/
2605void 3917void
2606lpfc_get_cfgparam(struct lpfc_hba *phba) 3918lpfc_get_cfgparam(struct lpfc_hba *phba)
2607{ 3919{
@@ -2637,6 +3949,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
2637 return; 3949 return;
2638} 3950}
2639 3951
3952/**
3953 * lpfc_get_vport_cfgparam: Used during port create, init the vport structure.
3954 * @vport: lpfc_vport pointer.
3955 **/
2640void 3956void
2641lpfc_get_vport_cfgparam(struct lpfc_vport *vport) 3957lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
2642{ 3958{
@@ -2648,6 +3964,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
2648 lpfc_restrict_login_init(vport, lpfc_restrict_login); 3964 lpfc_restrict_login_init(vport, lpfc_restrict_login);
2649 lpfc_fcp_class_init(vport, lpfc_fcp_class); 3965 lpfc_fcp_class_init(vport, lpfc_fcp_class);
2650 lpfc_use_adisc_init(vport, lpfc_use_adisc); 3966 lpfc_use_adisc_init(vport, lpfc_use_adisc);
3967 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
2651 lpfc_fdmi_on_init(vport, lpfc_fdmi_on); 3968 lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
2652 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 3969 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
2653 lpfc_max_luns_init(vport, lpfc_max_luns); 3970 lpfc_max_luns_init(vport, lpfc_max_luns);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 1b8245213b83..044ef4057d28 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); 21typedef int (*node_filter)(struct lpfc_nodelist *, void *);
22 22
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
@@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 26void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
27 27
28void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
29int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 29int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
30 struct lpfc_dmabuf *mp);
31void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 30void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
32void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport); 31void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
33void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 32void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
33int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
34int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 34int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
35void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 35void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
36void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 36void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
43void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 43void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
44 44
45struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 45struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
46void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); 46void lpfc_cleanup_rpis(struct lpfc_vport *, int);
47int lpfc_linkdown(struct lpfc_hba *); 47int lpfc_linkdown(struct lpfc_hba *);
48void lpfc_port_link_failure(struct lpfc_vport *); 48void lpfc_port_link_failure(struct lpfc_vport *);
49void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 49void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
135int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 135int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
136int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 136int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
137void lpfc_fdmi_tmo(unsigned long); 137void lpfc_fdmi_tmo(unsigned long);
138void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport); 138void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
139 139
140int lpfc_config_port_prep(struct lpfc_hba *); 140int lpfc_config_port_prep(struct lpfc_hba *);
141int lpfc_config_port_post(struct lpfc_hba *); 141int lpfc_config_port_post(struct lpfc_hba *);
@@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *);
155void lpfc_handle_eratt(struct lpfc_hba *); 155void lpfc_handle_eratt(struct lpfc_hba *);
156void lpfc_handle_latt(struct lpfc_hba *); 156void lpfc_handle_latt(struct lpfc_hba *);
157irqreturn_t lpfc_intr_handler(int, void *); 157irqreturn_t lpfc_intr_handler(int, void *);
158irqreturn_t lpfc_sp_intr_handler(int, void *);
159irqreturn_t lpfc_fp_intr_handler(int, void *);
158 160
159void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 161void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
160void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 162void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *);
175void lpfc_stop_vport_timers(struct lpfc_vport *); 177void lpfc_stop_vport_timers(struct lpfc_vport *);
176 178
177void lpfc_poll_timeout(unsigned long ptr); 179void lpfc_poll_timeout(unsigned long ptr);
178void lpfc_poll_start_timer(struct lpfc_hba * phba); 180void lpfc_poll_start_timer(struct lpfc_hba *);
179void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); 181void lpfc_poll_eratt(unsigned long);
182void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
180struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 183struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
181void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 184void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
182uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 185uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
183 186
184void lpfc_reset_barrier(struct lpfc_hba * phba); 187void lpfc_reset_barrier(struct lpfc_hba * phba);
185int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 188int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
187int lpfc_sli_brdreset(struct lpfc_hba *); 190int lpfc_sli_brdreset(struct lpfc_hba *);
188int lpfc_sli_brdrestart(struct lpfc_hba *); 191int lpfc_sli_brdrestart(struct lpfc_hba *);
189int lpfc_sli_hba_setup(struct lpfc_hba *); 192int lpfc_sli_hba_setup(struct lpfc_hba *);
193int lpfc_sli_config_port(struct lpfc_hba *, int);
190int lpfc_sli_host_down(struct lpfc_vport *); 194int lpfc_sli_host_down(struct lpfc_vport *);
191int lpfc_sli_hba_down(struct lpfc_hba *); 195int lpfc_sli_hba_down(struct lpfc_hba *);
192int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 196int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
193int lpfc_sli_handle_mb_event(struct lpfc_hba *); 197int lpfc_sli_handle_mb_event(struct lpfc_hba *);
194int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 198int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
199int lpfc_sli_check_eratt(struct lpfc_hba *);
195int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 200int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
196 struct lpfc_sli_ring *, uint32_t); 201 struct lpfc_sli_ring *, uint32_t);
197void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 202void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
199 struct lpfc_iocbq *, uint32_t); 204 struct lpfc_iocbq *, uint32_t);
200void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 205void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
201void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 206void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
207void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
202int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 208int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
203 struct lpfc_dmabuf *); 209 struct lpfc_dmabuf *);
204struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 210struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
226struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, 232struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
227 struct lpfc_name *); 233 struct lpfc_name *);
228 234
229int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 235int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
230 uint32_t timeout);
231 236
232int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 237int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
233 struct lpfc_sli_ring * pring, 238 struct lpfc_iocbq *, struct lpfc_iocbq *,
234 struct lpfc_iocbq * piocb, 239 uint32_t);
235 struct lpfc_iocbq * prspiocbq, 240void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
236 uint32_t timeout); 241 struct lpfc_iocbq *);
237void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
238 struct lpfc_iocbq * cmdiocb,
239 struct lpfc_iocbq * rspiocb);
240 242
241void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); 243void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
242 244
@@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
269 271
270struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); 272struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
271int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); 273int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
272void lpfc_mbx_unreg_vpi(struct lpfc_vport *); 274int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
273void destroy_port(struct lpfc_vport *); 275void destroy_port(struct lpfc_vport *);
274int lpfc_get_instance(void); 276int lpfc_get_instance(void);
275void lpfc_host_attrib_init(struct Scsi_Host *); 277void lpfc_host_attrib_init(struct Scsi_Host *);
@@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
290void lpfc_adjust_queue_depth(struct lpfc_hba *); 292void lpfc_adjust_queue_depth(struct lpfc_hba *);
291void lpfc_ramp_down_queue_handler(struct lpfc_hba *); 293void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
292void lpfc_ramp_up_queue_handler(struct lpfc_hba *); 294void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
295void lpfc_scsi_dev_block(struct lpfc_hba *);
296
297void
298lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
299 struct lpfc_iocbq *);
300struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
301void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
293 302
294#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 303#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
295#define HBA_EVENT_RSCN 5 304#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7fc74cf5823b..26dae8bae2d1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -34,6 +34,7 @@
34 34
35#include "lpfc_hw.h" 35#include "lpfc_hw.h"
36#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_nl.h"
37#include "lpfc_disc.h" 38#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
39#include "lpfc.h" 40#include "lpfc.h"
@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
134 } 135 }
135 list_del(&head); 136 list_del(&head);
136 } else { 137 } else {
137 struct lpfc_iocbq *next; 138 INIT_LIST_HEAD(&head);
138 139 list_add_tail(&head, &piocbq->list);
139 list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { 140 list_for_each_entry(iocbq, &head, list) {
140 icmd = &iocbq->iocb; 141 icmd = &iocbq->iocb;
141 if (icmd->ulpBdeCount == 0) 142 if (icmd->ulpBdeCount == 0)
142 lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0); 143 lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
143 for (i = 0; i < icmd->ulpBdeCount; i++) { 144 for (i = 0; i < icmd->ulpBdeCount; i++) {
144 paddr = getPaddr(icmd->un.cont64[i].addrHigh, 145 paddr = getPaddr(icmd->un.cont64[i].addrHigh,
145 icmd->un.cont64[i].addrLow); 146 icmd->un.cont64[i].addrLow);
146 mp = lpfc_sli_ringpostbuf_get(phba, pring, 147 mp = lpfc_sli_ringpostbuf_get(phba, pring,
147 paddr); 148 paddr);
148 size = icmd->un.cont64[i].tus.f.bdeSize; 149 size = icmd->un.cont64[i].tus.f.bdeSize;
149 lpfc_ct_unsol_buffer(phba, piocbq, mp, size); 150 lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
150 lpfc_in_buf_free(phba, mp); 151 lpfc_in_buf_free(phba, mp);
151 } 152 }
152 list_del(&iocbq->list);
153 lpfc_sli_release_iocbq(phba, iocbq);
154 lpfc_post_buffer(phba, pring, i); 153 lpfc_post_buffer(phba, pring, i);
155 } 154 }
155 list_del(&head);
156 } 156 }
157} 157}
158 158
@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
212 else 212 else
213 list_add_tail(&mp->list, &mlist->list); 213 list_add_tail(&mp->list, &mlist->list);
214 214
215 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 215 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
216 /* build buffer ptr list for IOCB */ 216 /* build buffer ptr list for IOCB */
217 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); 217 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
218 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); 218 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
283 icmd->un.genreq64.bdl.ulpIoTag32 = 0; 283 icmd->un.genreq64.bdl.ulpIoTag32 = 0;
284 icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 284 icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
285 icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 285 icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
286 icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 286 icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
287 icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); 287 icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
288 288
289 if (usr_flg) 289 if (usr_flg)
@@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
861 861
862 retry++; 862 retry++;
863 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 863 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
864 "0216 Retrying NS cmd %x\n", cmdcode); 864 "0250 Retrying NS cmd %x\n", cmdcode);
865 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); 865 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
866 if (rc == 0) 866 if (rc == 0)
867 goto out; 867 goto out;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 094b47e94b29..771920bdde44 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -35,6 +35,7 @@
35 35
36#include "lpfc_hw.h" 36#include "lpfc_hw.h"
37#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_nl.h"
38#include "lpfc_disc.h" 39#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
40#include "lpfc.h" 41#include "lpfc.h"
@@ -46,13 +47,14 @@
46#include "lpfc_debugfs.h" 47#include "lpfc_debugfs.h"
47 48
48#ifdef CONFIG_LPFC_DEBUG_FS 49#ifdef CONFIG_LPFC_DEBUG_FS
49/* debugfs interface 50/**
51 * debugfs interface
50 * 52 *
51 * To access this interface the user should: 53 * To access this interface the user should:
52 * # mkdir /debug 54 * # mkdir /debug
53 * # mount -t debugfs none /debug 55 * # mount -t debugfs none /debug
54 * 56 *
55 * The lpfc debugfs directory hierachy is: 57 * The lpfc debugfs directory hierarchy is:
56 * lpfc/lpfcX/vportY 58 * lpfc/lpfcX/vportY
57 * where X is the lpfc hba unique_id 59 * where X is the lpfc hba unique_id
58 * where Y is the vport VPI on that hba 60 * where Y is the vport VPI on that hba
@@ -61,14 +63,21 @@
61 * discovery_trace 63 * discovery_trace
62 * This is an ACSII readable file that contains a trace of the last 64 * This is an ACSII readable file that contains a trace of the last
63 * lpfc_debugfs_max_disc_trc events that happened on a specific vport. 65 * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
64 * See lpfc_debugfs.h for different categories of 66 * See lpfc_debugfs.h for different categories of discovery events.
65 * discovery events. To enable the discovery trace, the following 67 * To enable the discovery trace, the following module parameters must be set:
66 * module parameters must be set:
67 * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support 68 * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
68 * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for 69 * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
69 * EACH vport. X MUST also be a power of 2. 70 * EACH vport. X MUST also be a power of 2.
70 * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in 71 * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
71 * lpfc_debugfs.h . 72 * lpfc_debugfs.h .
73 *
74 * slow_ring_trace
75 * This is an ACSII readable file that contains a trace of the last
76 * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
77 * To enable the slow ring trace, the following module parameters must be set:
78 * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
79 * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
80 * the HBA. X MUST also be a power of 2.
72 */ 81 */
73static int lpfc_debugfs_enable = 1; 82static int lpfc_debugfs_enable = 1;
74module_param(lpfc_debugfs_enable, int, 0); 83module_param(lpfc_debugfs_enable, int, 0);
@@ -117,6 +126,25 @@ struct lpfc_debug {
117static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); 126static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
118static unsigned long lpfc_debugfs_start_time = 0L; 127static unsigned long lpfc_debugfs_start_time = 0L;
119 128
129/**
130 * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer.
131 * @vport: The vport to gather the log info from.
132 * @buf: The buffer to dump log into.
133 * @size: The maximum amount of data to process.
134 *
135 * Description:
136 * This routine gathers the lpfc discovery debugfs data from the @vport and
137 * dumps it to @buf up to @size number of bytes. It will start at the next entry
138 * in the log and process the log until the end of the buffer. Then it will
139 * gather from the beginning of the log and process until the current entry.
140 *
141 * Notes:
142 * Discovery logging will be disabled while while this routine dumps the log.
143 *
144 * Return Value:
145 * This routine returns the amount of bytes that were dumped into @buf and will
146 * not exceed @size.
147 **/
120static int 148static int
121lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) 149lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
122{ 150{
@@ -125,7 +153,6 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
125 struct lpfc_debugfs_trc *dtp; 153 struct lpfc_debugfs_trc *dtp;
126 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 154 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
127 155
128
129 enable = lpfc_debugfs_enable; 156 enable = lpfc_debugfs_enable;
130 lpfc_debugfs_enable = 0; 157 lpfc_debugfs_enable = 0;
131 158
@@ -159,6 +186,25 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
159 return len; 186 return len;
160} 187}
161 188
189/**
190 * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer.
191 * @phba: The HBA to gather the log info from.
192 * @buf: The buffer to dump log into.
193 * @size: The maximum amount of data to process.
194 *
195 * Description:
196 * This routine gathers the lpfc slow ring debugfs data from the @phba and
197 * dumps it to @buf up to @size number of bytes. It will start at the next entry
198 * in the log and process the log until the end of the buffer. Then it will
199 * gather from the beginning of the log and process until the current entry.
200 *
201 * Notes:
202 * Slow ring logging will be disabled while while this routine dumps the log.
203 *
204 * Return Value:
205 * This routine returns the amount of bytes that were dumped into @buf and will
206 * not exceed @size.
207 **/
162static int 208static int
163lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) 209lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
164{ 210{
@@ -203,6 +249,25 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
203 249
204static int lpfc_debugfs_last_hbq = -1; 250static int lpfc_debugfs_last_hbq = -1;
205 251
252/**
253 * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer.
254 * @phba: The HBA to gather host buffer info from.
255 * @buf: The buffer to dump log into.
256 * @size: The maximum amount of data to process.
257 *
258 * Description:
259 * This routine dumps the host buffer queue info from the @phba to @buf up to
260 * @size number of bytes. A header that describes the current hbq state will be
261 * dumped to @buf first and then info on each hbq entry will be dumped to @buf
262 * until @size bytes have been dumped or all the hbq info has been dumped.
263 *
264 * Notes:
265 * This routine will rotate through each configured HBQ each time called.
266 *
267 * Return Value:
268 * This routine returns the amount of bytes that were dumped into @buf and will
269 * not exceed @size.
270 **/
206static int 271static int
207lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) 272lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
208{ 273{
@@ -303,6 +368,24 @@ skipit:
303 368
304static int lpfc_debugfs_last_hba_slim_off; 369static int lpfc_debugfs_last_hba_slim_off;
305 370
371/**
372 * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer.
373 * @phba: The HBA to gather SLIM info from.
374 * @buf: The buffer to dump log into.
375 * @size: The maximum amount of data to process.
376 *
377 * Description:
378 * This routine dumps the current contents of HBA SLIM for the HBA associated
379 * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
380 *
381 * Notes:
382 * This routine will only dump up to 1024 bytes of data each time called and
383 * should be called multiple times to dump the entire HBA SLIM.
384 *
385 * Return Value:
386 * This routine returns the amount of bytes that were dumped into @buf and will
387 * not exceed @size.
388 **/
306static int 389static int
307lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) 390lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
308{ 391{
@@ -342,6 +425,21 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
342 return len; 425 return len;
343} 426}
344 427
428/**
429 * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer.
430 * @phba: The HBA to gather Host SLIM info from.
431 * @buf: The buffer to dump log into.
432 * @size: The maximum amount of data to process.
433 *
434 * Description:
435 * This routine dumps the current contents of host SLIM for the host associated
436 * with @phba to @buf up to @size bytes of data. The dump will contain the
437 * Mailbox, PCB, Rings, and Registers that are located in host memory.
438 *
439 * Return Value:
440 * This routine returns the amount of bytes that were dumped into @buf and will
441 * not exceed @size.
442 **/
345static int 443static int
346lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) 444lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
347{ 445{
@@ -357,7 +455,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
357 spin_lock_irq(&phba->hbalock); 455 spin_lock_irq(&phba->hbalock);
358 456
359 len += snprintf(buf+len, size-len, "SLIM Mailbox\n"); 457 len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
360 ptr = (uint32_t *)phba->slim2p; 458 ptr = (uint32_t *)phba->slim2p.virt;
361 i = sizeof(MAILBOX_t); 459 i = sizeof(MAILBOX_t);
362 while (i > 0) { 460 while (i > 0) {
363 len += snprintf(buf+len, size-len, 461 len += snprintf(buf+len, size-len,
@@ -370,7 +468,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
370 } 468 }
371 469
372 len += snprintf(buf+len, size-len, "SLIM PCB\n"); 470 len += snprintf(buf+len, size-len, "SLIM PCB\n");
373 ptr = (uint32_t *)&phba->slim2p->pcb; 471 ptr = (uint32_t *)phba->pcb;
374 i = sizeof(PCB_t); 472 i = sizeof(PCB_t);
375 while (i > 0) { 473 while (i > 0) {
376 len += snprintf(buf+len, size-len, 474 len += snprintf(buf+len, size-len,
@@ -382,44 +480,16 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
382 off += (8 * sizeof(uint32_t)); 480 off += (8 * sizeof(uint32_t));
383 } 481 }
384 482
385 pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port; 483 for (i = 0; i < 4; i++) {
386 pring = &psli->ring[0]; 484 pgpp = &phba->port_gp[i];
387 len += snprintf(buf+len, size-len, 485 pring = &psli->ring[i];
388 "Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) " 486 len += snprintf(buf+len, size-len,
389 "RSP PutInx:%d Max:%d\n", 487 "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
390 pgpp->cmdGetInx, pring->numCiocb, 488 "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
391 pring->next_cmdidx, pring->local_getidx, pring->flag, 489 i, pgpp->cmdGetInx, pring->numCiocb,
392 pgpp->rspPutInx, pring->numRiocb); 490 pring->next_cmdidx, pring->local_getidx,
393 pgpp++; 491 pring->flag, pgpp->rspPutInx, pring->numRiocb);
394 492 }
395 pring = &psli->ring[1];
396 len += snprintf(buf+len, size-len,
397 "Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
398 "RSP PutInx:%d Max:%d\n",
399 pgpp->cmdGetInx, pring->numCiocb,
400 pring->next_cmdidx, pring->local_getidx, pring->flag,
401 pgpp->rspPutInx, pring->numRiocb);
402 pgpp++;
403
404 pring = &psli->ring[2];
405 len += snprintf(buf+len, size-len,
406 "Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
407 "RSP PutInx:%d Max:%d\n",
408 pgpp->cmdGetInx, pring->numCiocb,
409 pring->next_cmdidx, pring->local_getidx, pring->flag,
410 pgpp->rspPutInx, pring->numRiocb);
411 pgpp++;
412
413 pring = &psli->ring[3];
414 len += snprintf(buf+len, size-len,
415 "Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
416 "RSP PutInx:%d Max:%d\n",
417 pgpp->cmdGetInx, pring->numCiocb,
418 pring->next_cmdidx, pring->local_getidx, pring->flag,
419 pgpp->rspPutInx, pring->numRiocb);
420
421
422 ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get;
423 word0 = readl(phba->HAregaddr); 493 word0 = readl(phba->HAregaddr);
424 word1 = readl(phba->CAregaddr); 494 word1 = readl(phba->CAregaddr);
425 word2 = readl(phba->HSregaddr); 495 word2 = readl(phba->HSregaddr);
@@ -430,6 +500,21 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
430 return len; 500 return len;
431} 501}
432 502
503/**
504 * lpfc_debugfs_nodelist_data - Dump target node list to a buffer.
505 * @vport: The vport to gather target node info from.
506 * @buf: The buffer to dump log into.
507 * @size: The maximum amount of data to process.
508 *
509 * Description:
510 * This routine dumps the current target node list associated with @vport to
511 * @buf up to @size bytes of data. Each node entry in the dump will contain a
512 * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
513 *
514 * Return Value:
515 * This routine returns the amount of bytes that were dumped into @buf and will
516 * not exceed @size.
517 **/
433static int 518static int
434lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) 519lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
435{ 520{
@@ -513,7 +598,22 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
513} 598}
514#endif 599#endif
515 600
516 601/**
602 * lpfc_debugfs_disc_trc - Store discovery trace log.
603 * @vport: The vport to associate this trace string with for retrieval.
604 * @mask: Log entry classification.
605 * @fmt: Format string to be displayed when dumping the log.
606 * @data1: 1st data parameter to be applied to @fmt.
607 * @data2: 2nd data parameter to be applied to @fmt.
608 * @data3: 3rd data parameter to be applied to @fmt.
609 *
610 * Description:
611 * This routine is used by the driver code to add a debugfs log entry to the
612 * discovery trace buffer associated with @vport. Only entries with a @mask that
613 * match the current debugfs discovery mask will be saved. Entries that do not
614 * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
615 * printf when displaying the log.
616 **/
517inline void 617inline void
518lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, 618lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
519 uint32_t data1, uint32_t data2, uint32_t data3) 619 uint32_t data1, uint32_t data2, uint32_t data3)
@@ -542,6 +642,19 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
542 return; 642 return;
543} 643}
544 644
645/**
646 * lpfc_debugfs_slow_ring_trc - Store slow ring trace log.
647 * @phba: The phba to associate this trace string with for retrieval.
648 * @fmt: Format string to be displayed when dumping the log.
649 * @data1: 1st data parameter to be applied to @fmt.
650 * @data2: 2nd data parameter to be applied to @fmt.
651 * @data3: 3rd data parameter to be applied to @fmt.
652 *
653 * Description:
654 * This routine is used by the driver code to add a debugfs log entry to the
655 * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
656 * @data3 are used like printf when displaying the log.
657 **/
545inline void 658inline void
546lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, 659lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
547 uint32_t data1, uint32_t data2, uint32_t data3) 660 uint32_t data1, uint32_t data2, uint32_t data3)
@@ -568,6 +681,21 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
568} 681}
569 682
570#ifdef CONFIG_LPFC_DEBUG_FS 683#ifdef CONFIG_LPFC_DEBUG_FS
684/**
685 * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
686 * @inode: The inode pointer that contains a vport pointer.
687 * @file: The file pointer to attach the log output.
688 *
689 * Description:
690 * This routine is the entry point for the debugfs open file operation. It gets
691 * the vport from the i_private field in @inode, allocates the necessary buffer
692 * for the log, fills the buffer from the in-memory log for this vport, and then
693 * returns a pointer to that log in the private_data field in @file.
694 *
695 * Returns:
696 * This function returns zero if successful. On error it will return an negative
697 * error value.
698 **/
571static int 699static int
572lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) 700lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
573{ 701{
@@ -585,7 +713,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
585 if (!debug) 713 if (!debug)
586 goto out; 714 goto out;
587 715
588 /* Round to page boundry */ 716 /* Round to page boundary */
589 size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); 717 size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
590 size = PAGE_ALIGN(size); 718 size = PAGE_ALIGN(size);
591 719
@@ -603,6 +731,21 @@ out:
603 return rc; 731 return rc;
604} 732}
605 733
734/**
735 * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log.
736 * @inode: The inode pointer that contains a vport pointer.
737 * @file: The file pointer to attach the log output.
738 *
739 * Description:
740 * This routine is the entry point for the debugfs open file operation. It gets
741 * the vport from the i_private field in @inode, allocates the necessary buffer
742 * for the log, fills the buffer from the in-memory log for this vport, and then
743 * returns a pointer to that log in the private_data field in @file.
744 *
745 * Returns:
746 * This function returns zero if successful. On error it will return an negative
747 * error value.
748 **/
606static int 749static int
607lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) 750lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
608{ 751{
@@ -620,7 +763,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
620 if (!debug) 763 if (!debug)
621 goto out; 764 goto out;
622 765
623 /* Round to page boundry */ 766 /* Round to page boundary */
624 size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); 767 size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
625 size = PAGE_ALIGN(size); 768 size = PAGE_ALIGN(size);
626 769
@@ -638,6 +781,21 @@ out:
638 return rc; 781 return rc;
639} 782}
640 783
784/**
785 * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer.
786 * @inode: The inode pointer that contains a vport pointer.
787 * @file: The file pointer to attach the log output.
788 *
789 * Description:
790 * This routine is the entry point for the debugfs open file operation. It gets
791 * the vport from the i_private field in @inode, allocates the necessary buffer
792 * for the log, fills the buffer from the in-memory log for this vport, and then
793 * returns a pointer to that log in the private_data field in @file.
794 *
795 * Returns:
796 * This function returns zero if successful. On error it will return an negative
797 * error value.
798 **/
641static int 799static int
642lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) 800lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
643{ 801{
@@ -649,7 +807,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
649 if (!debug) 807 if (!debug)
650 goto out; 808 goto out;
651 809
652 /* Round to page boundry */ 810 /* Round to page boundary */
653 debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); 811 debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
654 if (!debug->buffer) { 812 if (!debug->buffer) {
655 kfree(debug); 813 kfree(debug);
@@ -665,6 +823,21 @@ out:
665 return rc; 823 return rc;
666} 824}
667 825
826/**
827 * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer.
828 * @inode: The inode pointer that contains a vport pointer.
829 * @file: The file pointer to attach the log output.
830 *
831 * Description:
832 * This routine is the entry point for the debugfs open file operation. It gets
833 * the vport from the i_private field in @inode, allocates the necessary buffer
834 * for the log, fills the buffer from the in-memory log for this vport, and then
835 * returns a pointer to that log in the private_data field in @file.
836 *
837 * Returns:
838 * This function returns zero if successful. On error it will return an negative
839 * error value.
840 **/
668static int 841static int
669lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) 842lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
670{ 843{
@@ -676,7 +849,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
676 if (!debug) 849 if (!debug)
677 goto out; 850 goto out;
678 851
679 /* Round to page boundry */ 852 /* Round to page boundary */
680 debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); 853 debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
681 if (!debug->buffer) { 854 if (!debug->buffer) {
682 kfree(debug); 855 kfree(debug);
@@ -692,6 +865,21 @@ out:
692 return rc; 865 return rc;
693} 866}
694 867
868/**
869 * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer.
870 * @inode: The inode pointer that contains a vport pointer.
871 * @file: The file pointer to attach the log output.
872 *
873 * Description:
874 * This routine is the entry point for the debugfs open file operation. It gets
875 * the vport from the i_private field in @inode, allocates the necessary buffer
876 * for the log, fills the buffer from the in-memory log for this vport, and then
877 * returns a pointer to that log in the private_data field in @file.
878 *
879 * Returns:
880 * This function returns zero if successful. On error it will return an negative
881 * error value.
882 **/
695static int 883static int
696lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) 884lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
697{ 885{
@@ -703,7 +891,7 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
703 if (!debug) 891 if (!debug)
704 goto out; 892 goto out;
705 893
706 /* Round to page boundry */ 894 /* Round to page boundary */
707 debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); 895 debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
708 if (!debug->buffer) { 896 if (!debug->buffer) {
709 kfree(debug); 897 kfree(debug);
@@ -719,6 +907,21 @@ out:
719 return rc; 907 return rc;
720} 908}
721 909
910/**
911 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
912 * @inode: The inode pointer that contains a vport pointer.
913 * @file: The file pointer to attach the log output.
914 *
915 * Description:
916 * This routine is the entry point for the debugfs open file operation. It gets
917 * the vport from the i_private field in @inode, allocates the necessary buffer
918 * for the log, fills the buffer from the in-memory log for this vport, and then
919 * returns a pointer to that log in the private_data field in @file.
920 *
921 * Returns:
922 * This function returns zero if successful. On error it will return an negative
923 * error value.
924 **/
722static int 925static int
723lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) 926lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
724{ 927{
@@ -730,7 +933,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
730 if (!debug) 933 if (!debug)
731 goto out; 934 goto out;
732 935
733 /* Round to page boundry */ 936 /* Round to page boundary */
734 debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); 937 debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
735 if (!debug->buffer) { 938 if (!debug->buffer) {
736 kfree(debug); 939 kfree(debug);
@@ -746,6 +949,23 @@ out:
746 return rc; 949 return rc;
747} 950}
748 951
952/**
953 * lpfc_debugfs_lseek - Seek through a debugfs file.
954 * @file: The file pointer to seek through.
955 * @off: The offset to seek to or the amount to seek by.
956 * @whence: Indicates how to seek.
957 *
958 * Description:
959 * This routine is the entry point for the debugfs lseek file operation. The
960 * @whence parameter indicates whether @off is the offset to directly seek to,
961 * or if it is a value to seek forward or reverse by. This function figures out
962 * what the new offset of the debugfs file will be and assigns that value to the
963 * f_pos field of @file.
964 *
965 * Returns:
966 * This function returns the new offset if successful and returns a negative
967 * error if unable to process the seek.
968 **/
749static loff_t 969static loff_t
750lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) 970lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
751{ 971{
@@ -767,6 +987,22 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
767 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); 987 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
768} 988}
769 989
990/**
991 * lpfc_debugfs_read - Read a debugfs file.
992 * @file: The file pointer to read from.
993 * @buf: The buffer to copy the data to.
994 * @nbytes: The number of bytes to read.
995 * @ppos: The position in the file to start reading from.
996 *
997 * Description:
998 * This routine reads data from from the buffer indicated in the private_data
999 * field of @file. It will start reading at @ppos and copy up to @nbytes of
1000 * data to @buf.
1001 *
1002 * Returns:
1003 * This function returns the amount of data that was read (this could be less
1004 * than @nbytes if the end of the file was reached) or a negative error value.
1005 **/
770static ssize_t 1006static ssize_t
771lpfc_debugfs_read(struct file *file, char __user *buf, 1007lpfc_debugfs_read(struct file *file, char __user *buf,
772 size_t nbytes, loff_t *ppos) 1008 size_t nbytes, loff_t *ppos)
@@ -776,6 +1012,18 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
776 debug->len); 1012 debug->len);
777} 1013}
778 1014
1015/**
1016 * lpfc_debugfs_release - Release the buffer used to store debugfs file data.
1017 * @inode: The inode pointer that contains a vport pointer. (unused)
1018 * @file: The file pointer that contains the buffer to release.
1019 *
1020 * Description:
1021 * This routine frees the buffer that was allocated when the debugfs file was
1022 * opened.
1023 *
1024 * Returns:
1025 * This function returns zero.
1026 **/
779static int 1027static int
780lpfc_debugfs_release(struct inode *inode, struct file *file) 1028lpfc_debugfs_release(struct inode *inode, struct file *file)
781{ 1029{
@@ -845,6 +1093,16 @@ static struct dentry *lpfc_debugfs_root = NULL;
845static atomic_t lpfc_debugfs_hba_count; 1093static atomic_t lpfc_debugfs_hba_count;
846#endif 1094#endif
847 1095
1096/**
1097 * lpfc_debugfs_initialize - Initialize debugfs for a vport.
1098 * @vport: The vport pointer to initialize.
1099 *
1100 * Description:
1101 * When Debugfs is configured this routine sets up the lpfc debugfs file system.
1102 * If not already created, this routine will create the lpfc directory, and
1103 * lpfcX directory (for this HBA), and vportX directory for this vport. It will
1104 * also create each file used to access lpfc specific debugfs information.
1105 **/
848inline void 1106inline void
849lpfc_debugfs_initialize(struct lpfc_vport *vport) 1107lpfc_debugfs_initialize(struct lpfc_vport *vport)
850{ 1108{
@@ -862,7 +1120,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
862 atomic_set(&lpfc_debugfs_hba_count, 0); 1120 atomic_set(&lpfc_debugfs_hba_count, 0);
863 if (!lpfc_debugfs_root) { 1121 if (!lpfc_debugfs_root) {
864 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
865 "0409 Cannot create debugfs root\n"); 1123 "0408 Cannot create debugfs root\n");
866 goto debug_failed; 1124 goto debug_failed;
867 } 1125 }
868 } 1126 }
@@ -876,7 +1134,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
876 debugfs_create_dir(name, lpfc_debugfs_root); 1134 debugfs_create_dir(name, lpfc_debugfs_root);
877 if (!phba->hba_debugfs_root) { 1135 if (!phba->hba_debugfs_root) {
878 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
879 "0409 Cannot create debugfs hba\n"); 1137 "0412 Cannot create debugfs hba\n");
880 goto debug_failed; 1138 goto debug_failed;
881 } 1139 }
882 atomic_inc(&lpfc_debugfs_hba_count); 1140 atomic_inc(&lpfc_debugfs_hba_count);
@@ -890,7 +1148,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
890 phba, &lpfc_debugfs_op_hbqinfo); 1148 phba, &lpfc_debugfs_op_hbqinfo);
891 if (!phba->debug_hbqinfo) { 1149 if (!phba->debug_hbqinfo) {
892 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1150 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
893 "0409 Cannot create debugfs hbqinfo\n"); 1151 "0411 Cannot create debugfs hbqinfo\n");
894 goto debug_failed; 1152 goto debug_failed;
895 } 1153 }
896 1154
@@ -902,7 +1160,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
902 phba, &lpfc_debugfs_op_dumpHBASlim); 1160 phba, &lpfc_debugfs_op_dumpHBASlim);
903 if (!phba->debug_dumpHBASlim) { 1161 if (!phba->debug_dumpHBASlim) {
904 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1162 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
905 "0409 Cannot create debugfs dumpHBASlim\n"); 1163 "0413 Cannot create debugfs dumpHBASlim\n");
906 goto debug_failed; 1164 goto debug_failed;
907 } 1165 }
908 1166
@@ -914,7 +1172,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
914 phba, &lpfc_debugfs_op_dumpHostSlim); 1172 phba, &lpfc_debugfs_op_dumpHostSlim);
915 if (!phba->debug_dumpHostSlim) { 1173 if (!phba->debug_dumpHostSlim) {
916 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1174 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
917 "0409 Cannot create debugfs dumpHostSlim\n"); 1175 "0414 Cannot create debugfs dumpHostSlim\n");
918 goto debug_failed; 1176 goto debug_failed;
919 } 1177 }
920 1178
@@ -944,7 +1202,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
944 phba, &lpfc_debugfs_op_slow_ring_trc); 1202 phba, &lpfc_debugfs_op_slow_ring_trc);
945 if (!phba->debug_slow_ring_trc) { 1203 if (!phba->debug_slow_ring_trc) {
946 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1204 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
947 "0409 Cannot create debugfs " 1205 "0415 Cannot create debugfs "
948 "slow_ring_trace\n"); 1206 "slow_ring_trace\n");
949 goto debug_failed; 1207 goto debug_failed;
950 } 1208 }
@@ -955,7 +1213,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
955 GFP_KERNEL); 1213 GFP_KERNEL);
956 if (!phba->slow_ring_trc) { 1214 if (!phba->slow_ring_trc) {
957 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1215 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
958 "0409 Cannot create debugfs " 1216 "0416 Cannot create debugfs "
959 "slow_ring buffer\n"); 1217 "slow_ring buffer\n");
960 goto debug_failed; 1218 goto debug_failed;
961 } 1219 }
@@ -972,7 +1230,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
972 debugfs_create_dir(name, phba->hba_debugfs_root); 1230 debugfs_create_dir(name, phba->hba_debugfs_root);
973 if (!vport->vport_debugfs_root) { 1231 if (!vport->vport_debugfs_root) {
974 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1232 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
975 "0409 Cant create debugfs"); 1233 "0417 Cant create debugfs");
976 goto debug_failed; 1234 goto debug_failed;
977 } 1235 }
978 atomic_inc(&phba->debugfs_vport_count); 1236 atomic_inc(&phba->debugfs_vport_count);
@@ -1001,7 +1259,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1001 1259
1002 if (!vport->disc_trc) { 1260 if (!vport->disc_trc) {
1003 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1261 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1004 "0409 Cannot create debugfs disc trace " 1262 "0418 Cannot create debugfs disc trace "
1005 "buffer\n"); 1263 "buffer\n");
1006 goto debug_failed; 1264 goto debug_failed;
1007 } 1265 }
@@ -1014,7 +1272,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1014 vport, &lpfc_debugfs_op_disc_trc); 1272 vport, &lpfc_debugfs_op_disc_trc);
1015 if (!vport->debug_disc_trc) { 1273 if (!vport->debug_disc_trc) {
1016 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1274 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1017 "0409 Cannot create debugfs " 1275 "0419 Cannot create debugfs "
1018 "discovery_trace\n"); 1276 "discovery_trace\n");
1019 goto debug_failed; 1277 goto debug_failed;
1020 } 1278 }
@@ -1033,7 +1291,17 @@ debug_failed:
1033#endif 1291#endif
1034} 1292}
1035 1293
1036 1294/**
1295 * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport.
1296 * @vport: The vport pointer to remove from debugfs.
1297 *
1298 * Description:
1299 * When Debugfs is configured this routine removes debugfs file system elements
1300 * that are specific to this vport. It also checks to see if there are any
1301 * users left for the debugfs directories associated with the HBA and driver. If
1302 * this is the last user of the HBA directory or driver directory then it will
1303 * remove those from the debugfs infrastructure as well.
1304 **/
1037inline void 1305inline void
1038lpfc_debugfs_terminate(struct lpfc_vport *vport) 1306lpfc_debugfs_terminate(struct lpfc_vport *vport)
1039{ 1307{
@@ -1096,5 +1364,3 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
1096#endif 1364#endif
1097 return; 1365 return;
1098} 1366}
1099
1100
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2db0b74b6fad..f29e548a90d1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -37,6 +37,7 @@ enum lpfc_work_type {
37 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS, 39 LPFC_EVT_DEV_LOSS,
40 LPFC_EVT_FASTPATH_MGMT_EVT,
40}; 41};
41 42
42/* structure used to queue event to the discovery tasklet */ 43/* structure used to queue event to the discovery tasklet */
@@ -47,6 +48,24 @@ struct lpfc_work_evt {
47 enum lpfc_work_type evt; 48 enum lpfc_work_type evt;
48}; 49};
49 50
51struct lpfc_scsi_check_condition_event;
52struct lpfc_scsi_varqueuedepth_event;
53struct lpfc_scsi_event_header;
54struct lpfc_fabric_event_header;
55struct lpfc_fcprdchkerr_event;
56
57/* structure used for sending events from fast path */
58struct lpfc_fast_path_event {
59 struct lpfc_work_evt work_evt;
60 struct lpfc_vport *vport;
61 union {
62 struct lpfc_scsi_check_condition_event check_cond_evt;
63 struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
64 struct lpfc_scsi_event_header scsi_evt;
65 struct lpfc_fabric_event_header fabric_evt;
66 struct lpfc_fcprdchkerr_event read_check_error;
67 } un;
68};
50 69
51struct lpfc_nodelist { 70struct lpfc_nodelist {
52 struct list_head nlp_listp; 71 struct list_head nlp_listp;
@@ -88,6 +107,10 @@ struct lpfc_nodelist {
88 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 107 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
89 unsigned long last_q_full_time; /* jiffy of last queue full */ 108 unsigned long last_q_full_time; /* jiffy of last queue full */
90 struct kref kref; 109 struct kref kref;
110 atomic_t cmd_pending;
111 uint32_t cmd_qdepth;
112 unsigned long last_change_time;
113 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
91}; 114};
92 115
93/* Defines for nlp_flag (uint32) */ 116/* Defines for nlp_flag (uint32) */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f54e0f7eaee3..630bd28fb997 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -30,6 +30,7 @@
30 30
31#include "lpfc_hw.h" 31#include "lpfc_hw.h"
32#include "lpfc_sli.h" 32#include "lpfc_sli.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_scsi.h" 35#include "lpfc_scsi.h"
35#include "lpfc.h" 36#include "lpfc.h"
@@ -53,6 +54,28 @@ static void lpfc_register_new_vport(struct lpfc_hba *phba,
53 54
54static int lpfc_max_els_tries = 3; 55static int lpfc_max_els_tries = 3;
55 56
57/**
58 * lpfc_els_chk_latt: Check host link attention event for a vport.
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
78 **/
56int 79int
57lpfc_els_chk_latt(struct lpfc_vport *vport) 80lpfc_els_chk_latt(struct lpfc_vport *vport)
58{ 81{
@@ -92,6 +115,34 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
92 return 1; 115 return 1;
93} 116}
94 117
118/**
119 * lpfc_prep_els_iocb: Allocate and prepare a lpfc iocb data structure.
120 * @vport: pointer to a host virtual N_Port data structure.
121 * @expectRsp: flag indicating whether response is expected.
122 * @cmdSize: size of the ELS command.
123 * @retry: number of retries to the command IOCB when it fails.
124 * @ndlp: pointer to a node-list data structure.
125 * @did: destination identifier.
126 * @elscmd: the ELS command code.
127 *
128 * This routine is used for allocating a lpfc-IOCB data structure from
129 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
130 * passed into the routine for discovery state machine to issue an Extended
131 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
132 * and preparation routine that is used by all the discovery state machine
133 * routines and the ELS command-specific fields will be later set up by
134 * the individual discovery machine routines after calling this routine
135 * allocating and preparing a generic IOCB data structure. It fills in the
136 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
137 * payload and response payload (if expected). The reference count on the
138 * ndlp is incremented by 1 and the reference to the ndlp is put into
139 * context1 of the IOCB data structure for this IOCB to hold the ndlp
140 * reference for the command's callback function to access later.
141 *
142 * Return code
143 * Pointer to the newly allocated/prepared els iocb data structure
144 * NULL - when els iocb data structure allocation/preparation failed
145 **/
95static struct lpfc_iocbq * 146static struct lpfc_iocbq *
96lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 147lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
97 uint16_t cmdSize, uint8_t retry, 148 uint16_t cmdSize, uint8_t retry,
@@ -150,7 +201,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
150 201
151 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 202 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
152 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 203 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
153 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 204 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
154 icmd->un.elsreq64.remoteID = did; /* DID */ 205 icmd->un.elsreq64.remoteID = did; /* DID */
155 if (expectRsp) { 206 if (expectRsp) {
156 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 207 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
@@ -185,7 +236,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
185 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 236 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
186 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 237 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
187 bpl->tus.f.bdeSize = FCELSSIZE; 238 bpl->tus.f.bdeSize = FCELSSIZE;
188 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 239 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
189 bpl->tus.w = le32_to_cpu(bpl->tus.w); 240 bpl->tus.w = le32_to_cpu(bpl->tus.w);
190 } 241 }
191 242
@@ -233,6 +284,22 @@ els_iocb_free_pcmb_exit:
233 return NULL; 284 return NULL;
234} 285}
235 286
287/**
288 * lpfc_issue_fabric_reglogin: Issue fabric registration login for a vport.
289 * @vport: pointer to a host virtual N_Port data structure.
290 *
291 * This routine issues a fabric registration login for a @vport. An
292 * active ndlp node with Fabric_DID must already exist for this @vport.
293 * The routine invokes two mailbox commands to carry out fabric registration
294 * login through the HBA firmware: the first mailbox command requests the
295 * HBA to perform link configuration for the @vport; and the second mailbox
296 * command requests the HBA to perform the actual fabric registration login
297 * with the @vport.
298 *
299 * Return code
300 * 0 - successfully issued fabric registration login for @vport
301 * -ENXIO -- failed to issue fabric registration login for @vport
302 **/
236static int 303static int
237lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 304lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
238{ 305{
@@ -313,6 +380,26 @@ fail:
313 return -ENXIO; 380 return -ENXIO;
314} 381}
315 382
383/**
384 * lpfc_cmpl_els_flogi_fabric: Completion function for flogi to a fabric port.
385 * @vport: pointer to a host virtual N_Port data structure.
386 * @ndlp: pointer to a node-list data structure.
387 * @sp: pointer to service parameter data structure.
388 * @irsp: pointer to the IOCB within the lpfc response IOCB.
389 *
390 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
391 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
392 * port in a fabric topology. It properly sets up the parameters to the @ndlp
393 * from the IOCB response. It also check the newly assigned N_Port ID to the
394 * @vport against the previously assigned N_Port ID. If it is different from
395 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
396 * is invoked on all the remaining nodes with the @vport to unregister the
397 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
398 * is invoked to register login to the fabric.
399 *
400 * Return code
401 * 0 - Success (currently, always return 0)
402 **/
316static int 403static int
317lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 404lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
318 struct serv_parm *sp, IOCB_t *irsp) 405 struct serv_parm *sp, IOCB_t *irsp)
@@ -387,7 +474,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
387 */ 474 */
388 list_for_each_entry_safe(np, next_np, 475 list_for_each_entry_safe(np, next_np,
389 &vport->fc_nodes, nlp_listp) { 476 &vport->fc_nodes, nlp_listp) {
390 if (!NLP_CHK_NODE_ACT(ndlp)) 477 if (!NLP_CHK_NODE_ACT(np))
391 continue; 478 continue;
392 if ((np->nlp_state != NLP_STE_NPR_NODE) || 479 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
393 !(np->nlp_flag & NLP_NPR_ADISC)) 480 !(np->nlp_flag & NLP_NPR_ADISC))
@@ -416,9 +503,26 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
416 return 0; 503 return 0;
417} 504}
418 505
419/* 506/**
420 * We FLOGIed into an NPort, initiate pt2pt protocol 507 * lpfc_cmpl_els_flogi_nport: Completion function for flogi to an N_Port.
421 */ 508 * @vport: pointer to a host virtual N_Port data structure.
509 * @ndlp: pointer to a node-list data structure.
510 * @sp: pointer to service parameter data structure.
511 *
512 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
513 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
514 * in a point-to-point topology. First, the @vport's N_Port Name is compared
515 * with the received N_Port Name: if the @vport's N_Port Name is greater than
516 * the received N_Port Name lexicographically, this node shall assign local
517 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
518 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
519 * this node shall just wait for the remote node to issue PLOGI and assign
520 * N_Port IDs.
521 *
522 * Return code
523 * 0 - Success
524 * -ENXIO - Fail
525 **/
422static int 526static int
423lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 527lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
424 struct serv_parm *sp) 528 struct serv_parm *sp)
@@ -516,6 +620,29 @@ fail:
516 return -ENXIO; 620 return -ENXIO;
517} 621}
518 622
623/**
624 * lpfc_cmpl_els_flogi: Completion callback function for flogi.
625 * @phba: pointer to lpfc hba data structure.
626 * @cmdiocb: pointer to lpfc command iocb data structure.
627 * @rspiocb: pointer to lpfc response iocb data structure.
628 *
629 * This routine is the top-level completion callback function for issuing
630 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
631 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
632 * retry has been made (either immediately or delayed with lpfc_els_retry()
633 * returning 1), the command IOCB will be released and function returned.
634 * If the retry attempt has been given up (possibly reach the maximum
635 * number of retries), one additional decrement of ndlp reference shall be
636 * invoked before going out after releasing the command IOCB. This will
637 * actually release the remote node (Note, lpfc_els_free_iocb() will also
638 * invoke one decrement of ndlp reference count). If no error reported in
639 * the IOCB status, the command Port ID field is used to determine whether
640 * this is a point-to-point topology or a fabric topology: if the Port ID
641 * field is assigned, it is a fabric topology; otherwise, it is a
642 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
643 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
644 * specific topology completion conditions.
645 **/
519static void 646static void
520lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 647lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
521 struct lpfc_iocbq *rspiocb) 648 struct lpfc_iocbq *rspiocb)
@@ -618,6 +745,28 @@ out:
618 lpfc_els_free_iocb(phba, cmdiocb); 745 lpfc_els_free_iocb(phba, cmdiocb);
619} 746}
620 747
748/**
749 * lpfc_issue_els_flogi: Issue an flogi iocb command for a vport.
750 * @vport: pointer to a host virtual N_Port data structure.
751 * @ndlp: pointer to a node-list data structure.
752 * @retry: number of retries to the command IOCB.
753 *
754 * This routine issues a Fabric Login (FLOGI) Request ELS command
755 * for a @vport. The initiator service parameters are put into the payload
756 * of the FLOGI Request IOCB and the top-level callback function pointer
757 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
758 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
759 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
760 *
761 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
762 * will be incremented by 1 for holding the ndlp and the reference to ndlp
763 * will be stored into the context1 field of the IOCB for the completion
764 * callback function to the FLOGI ELS command.
765 *
766 * Return code
767 * 0 - successfully issued flogi iocb for @vport
768 * 1 - failed to issue flogi iocb for @vport
769 **/
621static int 770static int
622lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 771lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
623 uint8_t retry) 772 uint8_t retry)
@@ -694,6 +843,20 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
694 return 0; 843 return 0;
695} 844}
696 845
846/**
847 * lpfc_els_abort_flogi: Abort all outstanding flogi iocbs.
848 * @phba: pointer to lpfc hba data structure.
849 *
850 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
851 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
852 * list and issues an abort IOCB commond on each outstanding IOCB that
853 * contains a active Fabric_DID ndlp. Note that this function is to issue
854 * the abort IOCB command on all the outstanding IOCBs, thus when this
855 * function returns, it does not guarantee all the IOCBs are actually aborted.
856 *
857 * Return code
858 * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0)
859 **/
697int 860int
698lpfc_els_abort_flogi(struct lpfc_hba *phba) 861lpfc_els_abort_flogi(struct lpfc_hba *phba)
699{ 862{
@@ -729,6 +892,22 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
729 return 0; 892 return 0;
730} 893}
731 894
895/**
896 * lpfc_initial_flogi: Issue an initial fabric login for a vport.
897 * @vport: pointer to a host virtual N_Port data structure.
898 *
899 * This routine issues an initial Fabric Login (FLOGI) for the @vport
900 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
901 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
902 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
903 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
904 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
905 * @vport.
906 *
907 * Return code
908 * 0 - failed to issue initial flogi for @vport
909 * 1 - successfully issued initial flogi for @vport
910 **/
732int 911int
733lpfc_initial_flogi(struct lpfc_vport *vport) 912lpfc_initial_flogi(struct lpfc_vport *vport)
734{ 913{
@@ -764,6 +943,22 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
764 return 1; 943 return 1;
765} 944}
766 945
946/**
947 * lpfc_initial_fdisc: Issue an initial fabric discovery for a vport.
948 * @vport: pointer to a host virtual N_Port data structure.
949 *
950 * This routine issues an initial Fabric Discover (FDISC) for the @vport
951 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
952 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
953 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
954 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
955 * is then invoked with the @vport and the ndlp to perform the FDISC for the
956 * @vport.
957 *
958 * Return code
959 * 0 - failed to issue initial fdisc for @vport
960 * 1 - successfully issued initial fdisc for @vport
961 **/
767int 962int
768lpfc_initial_fdisc(struct lpfc_vport *vport) 963lpfc_initial_fdisc(struct lpfc_vport *vport)
769{ 964{
@@ -797,6 +992,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
797 return 1; 992 return 1;
798} 993}
799 994
995/**
996 * lpfc_more_plogi: Check and issue remaining plogis for a vport.
997 * @vport: pointer to a host virtual N_Port data structure.
998 *
999 * This routine checks whether there are more remaining Port Logins
1000 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1001 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1002 * to issue ELS PLOGIs up to the configured discover threads with the
1003 * @vport (@vport->cfg_discovery_threads). The function also decrement
1004 * the @vport's num_disc_node by 1 if it is not already 0.
1005 **/
800void 1006void
801lpfc_more_plogi(struct lpfc_vport *vport) 1007lpfc_more_plogi(struct lpfc_vport *vport)
802{ 1008{
@@ -819,6 +1025,37 @@ lpfc_more_plogi(struct lpfc_vport *vport)
819 return; 1025 return;
820} 1026}
821 1027
1028/**
1029 * lpfc_plogi_confirm_nport: Confirm pologi wwpn matches stored ndlp.
1030 * @phba: pointer to lpfc hba data structure.
1031 * @prsp: pointer to response IOCB payload.
1032 * @ndlp: pointer to a node-list data structure.
1033 *
1034 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1035 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1036 * The following cases are considered N_Port confirmed:
1037 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1038 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1039 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1040 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1041 * 1) if there is a node on vport list other than the @ndlp with the same
1042 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1043 * on that node to release the RPI associated with the node; 2) if there is
1044 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1045 * into, a new node shall be allocated (or activated). In either case, the
1046 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1047 * be released and the new_ndlp shall be put on to the vport node list and
1048 * its pointer returned as the confirmed node.
1049 *
1050 * Note that before the @ndlp got "released", the keepDID from not-matching
1051 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1052 * of the @ndlp. This is because the release of @ndlp is actually to put it
1053 * into an inactive state on the vport node list and the vport node list
1054 * management algorithm does not allow two node with a same DID.
1055 *
1056 * Return code
1057 * pointer to the PLOGI N_Port @ndlp
1058 **/
822static struct lpfc_nodelist * 1059static struct lpfc_nodelist *
823lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1060lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
824 struct lpfc_nodelist *ndlp) 1061 struct lpfc_nodelist *ndlp)
@@ -922,6 +1159,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
922 return new_ndlp; 1159 return new_ndlp;
923} 1160}
924 1161
1162/**
1163 * lpfc_end_rscn: Check and handle more rscn for a vport.
1164 * @vport: pointer to a host virtual N_Port data structure.
1165 *
1166 * This routine checks whether more Registration State Change
1167 * Notifications (RSCNs) came in while the discovery state machine was in
1168 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1169 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1170 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1171 * handling the RSCNs.
1172 **/
925void 1173void
926lpfc_end_rscn(struct lpfc_vport *vport) 1174lpfc_end_rscn(struct lpfc_vport *vport)
927{ 1175{
@@ -943,6 +1191,26 @@ lpfc_end_rscn(struct lpfc_vport *vport)
943 } 1191 }
944} 1192}
945 1193
1194/**
1195 * lpfc_cmpl_els_plogi: Completion callback function for plogi.
1196 * @phba: pointer to lpfc hba data structure.
1197 * @cmdiocb: pointer to lpfc command iocb data structure.
1198 * @rspiocb: pointer to lpfc response iocb data structure.
1199 *
1200 * This routine is the completion callback function for issuing the Port
1201 * Login (PLOGI) command. For PLOGI completion, there must be an active
1202 * ndlp on the vport node list that matches the remote node ID from the
1203 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1204 * ignored and command IOCB released. The PLOGI response IOCB status is
1205 * checked for error conditons. If there is error status reported, PLOGI
1206 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1207 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1208 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1209 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1210 * there are additional N_Port nodes with the vport that need to perform
1211 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1212 * PLOGIs.
1213 **/
946static void 1214static void
947lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1215lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
948 struct lpfc_iocbq *rspiocb) 1216 struct lpfc_iocbq *rspiocb)
@@ -1048,6 +1316,27 @@ out:
1048 return; 1316 return;
1049} 1317}
1050 1318
1319/**
1320 * lpfc_issue_els_plogi: Issue an plogi iocb command for a vport.
1321 * @vport: pointer to a host virtual N_Port data structure.
1322 * @did: destination port identifier.
1323 * @retry: number of retries to the command IOCB.
1324 *
1325 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1326 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1327 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1328 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1329 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1330 *
1331 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1332 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1333 * will be stored into the context1 field of the IOCB for the completion
1334 * callback function to the PLOGI ELS command.
1335 *
1336 * Return code
1337 * 0 - Successfully issued a plogi for @vport
1338 * 1 - failed to issue a plogi for @vport
1339 **/
1051int 1340int
1052lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1341lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1053{ 1342{
@@ -1106,6 +1395,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1106 return 0; 1395 return 0;
1107} 1396}
1108 1397
1398/**
1399 * lpfc_cmpl_els_prli: Completion callback function for prli.
1400 * @phba: pointer to lpfc hba data structure.
1401 * @cmdiocb: pointer to lpfc command iocb data structure.
1402 * @rspiocb: pointer to lpfc response iocb data structure.
1403 *
1404 * This routine is the completion callback function for a Process Login
1405 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1406 * status. If there is error status reported, PRLI retry shall be attempted
1407 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1408 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1409 * ndlp to mark the PRLI completion.
1410 **/
1109static void 1411static void
1110lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1412lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1111 struct lpfc_iocbq *rspiocb) 1413 struct lpfc_iocbq *rspiocb)
@@ -1164,6 +1466,27 @@ out:
1164 return; 1466 return;
1165} 1467}
1166 1468
1469/**
1470 * lpfc_issue_els_prli: Issue a prli iocb command for a vport.
1471 * @vport: pointer to a host virtual N_Port data structure.
1472 * @ndlp: pointer to a node-list data structure.
1473 * @retry: number of retries to the command IOCB.
1474 *
1475 * This routine issues a Process Login (PRLI) ELS command for the
1476 * @vport. The PRLI service parameters are set up in the payload of the
1477 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1478 * is put to the IOCB completion callback func field before invoking the
1479 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1480 *
1481 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1482 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1483 * will be stored into the context1 field of the IOCB for the completion
1484 * callback function to the PRLI ELS command.
1485 *
1486 * Return code
1487 * 0 - successfully issued prli iocb command for @vport
1488 * 1 - failed to issue prli iocb command for @vport
1489 **/
1167int 1490int
1168lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1491lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1169 uint8_t retry) 1492 uint8_t retry)
@@ -1233,6 +1556,92 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1233 return 0; 1556 return 0;
1234} 1557}
1235 1558
1559/**
1560 * lpfc_rscn_disc: Perform rscn discovery for a vport.
1561 * @vport: pointer to a host virtual N_Port data structure.
1562 *
1563 * This routine performs Registration State Change Notification (RSCN)
1564 * discovery for a @vport. If the @vport's node port recovery count is not
1565 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1566 * the nodes that need recovery. If none of the PLOGI were needed through
1567 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1568 * invoked to check and handle possible more RSCN came in during the period
1569 * of processing the current ones.
1570 **/
1571static void
1572lpfc_rscn_disc(struct lpfc_vport *vport)
1573{
1574 lpfc_can_disctmo(vport);
1575
1576 /* RSCN discovery */
1577 /* go thru NPR nodes and issue ELS PLOGIs */
1578 if (vport->fc_npr_cnt)
1579 if (lpfc_els_disc_plogi(vport))
1580 return;
1581
1582 lpfc_end_rscn(vport);
1583}
1584
1585/**
1586 * lpfc_adisc_done: Complete the adisc phase of discovery.
1587 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1588 *
1589 * This function is called when the final ADISC is completed during discovery.
1590 * This function handles clearing link attention or issuing reg_vpi depending
1591 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
1592 * discovery.
1593 * This function is called with no locks held.
1594 **/
1595static void
1596lpfc_adisc_done(struct lpfc_vport *vport)
1597{
1598 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1599 struct lpfc_hba *phba = vport->phba;
1600
1601 /*
1602 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1603 * and continue discovery.
1604 */
1605 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1606 !(vport->fc_flag & FC_RSCN_MODE)) {
1607 lpfc_issue_reg_vpi(phba, vport);
1608 return;
1609 }
1610 /*
1611 * For SLI2, we need to set port_state to READY
1612 * and continue discovery.
1613 */
1614 if (vport->port_state < LPFC_VPORT_READY) {
1615 /* If we get here, there is nothing to ADISC */
1616 if (vport->port_type == LPFC_PHYSICAL_PORT)
1617 lpfc_issue_clear_la(phba, vport);
1618 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1619 vport->num_disc_nodes = 0;
1620 /* go thru NPR list, issue ELS PLOGIs */
1621 if (vport->fc_npr_cnt)
1622 lpfc_els_disc_plogi(vport);
1623 if (!vport->num_disc_nodes) {
1624 spin_lock_irq(shost->host_lock);
1625 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1626 spin_unlock_irq(shost->host_lock);
1627 lpfc_can_disctmo(vport);
1628 lpfc_end_rscn(vport);
1629 }
1630 }
1631 vport->port_state = LPFC_VPORT_READY;
1632 } else
1633 lpfc_rscn_disc(vport);
1634}
1635
1636/**
1637 * lpfc_more_adisc: Issue more adisc as needed.
1638 * @vport: pointer to a host virtual N_Port data structure.
1639 *
1640 * This routine determines whether there are more ndlps on a @vport
1641 * node list need to have Address Discover (ADISC) issued. If so, it will
1642 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
1643 * remaining nodes which need to have ADISC sent.
1644 **/
1236void 1645void
1237lpfc_more_adisc(struct lpfc_vport *vport) 1646lpfc_more_adisc(struct lpfc_vport *vport)
1238{ 1647{
@@ -1252,23 +1661,27 @@ lpfc_more_adisc(struct lpfc_vport *vport)
1252 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 1661 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1253 sentadisc = lpfc_els_disc_adisc(vport); 1662 sentadisc = lpfc_els_disc_adisc(vport);
1254 } 1663 }
1664 if (!vport->num_disc_nodes)
1665 lpfc_adisc_done(vport);
1255 return; 1666 return;
1256} 1667}
1257 1668
1258static void 1669/**
1259lpfc_rscn_disc(struct lpfc_vport *vport) 1670 * lpfc_cmpl_els_adisc: Completion callback function for adisc.
1260{ 1671 * @phba: pointer to lpfc hba data structure.
1261 lpfc_can_disctmo(vport); 1672 * @cmdiocb: pointer to lpfc command iocb data structure.
1262 1673 * @rspiocb: pointer to lpfc response iocb data structure.
1263 /* RSCN discovery */ 1674 *
1264 /* go thru NPR nodes and issue ELS PLOGIs */ 1675 * This routine is the completion function for issuing the Address Discover
1265 if (vport->fc_npr_cnt) 1676 * (ADISC) command. It first checks to see whether link went down during
1266 if (lpfc_els_disc_plogi(vport)) 1677 * the discovery process. If so, the node will be marked as node port
1267 return; 1678 * recovery for issuing discover IOCB by the link attention handler and
1268 1679 * exit. Otherwise, the response status is checked. If error was reported
1269 lpfc_end_rscn(vport); 1680 * in the response status, the ADISC command shall be retried by invoking
1270} 1681 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
1271 1682 * the response status, the state machine is invoked to set transition
1683 * with respect to NLP_EVT_CMPL_ADISC event.
1684 **/
1272static void 1685static void
1273lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1686lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1274 struct lpfc_iocbq *rspiocb) 1687 struct lpfc_iocbq *rspiocb)
@@ -1333,57 +1746,34 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1333 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1746 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1334 NLP_EVT_CMPL_ADISC); 1747 NLP_EVT_CMPL_ADISC);
1335 1748
1336 if (disc && vport->num_disc_nodes) { 1749 /* Check to see if there are more ADISCs to be sent */
1337 /* Check to see if there are more ADISCs to be sent */ 1750 if (disc && vport->num_disc_nodes)
1338 lpfc_more_adisc(vport); 1751 lpfc_more_adisc(vport);
1339
1340 /* Check to see if we are done with ADISC authentication */
1341 if (vport->num_disc_nodes == 0) {
1342 /* If we get here, there is nothing left to ADISC */
1343 /*
1344 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1345 * and continue discovery.
1346 */
1347 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1348 !(vport->fc_flag & FC_RSCN_MODE)) {
1349 lpfc_issue_reg_vpi(phba, vport);
1350 goto out;
1351 }
1352 /*
1353 * For SLI2, we need to set port_state to READY
1354 * and continue discovery.
1355 */
1356 if (vport->port_state < LPFC_VPORT_READY) {
1357 /* If we get here, there is nothing to ADISC */
1358 if (vport->port_type == LPFC_PHYSICAL_PORT)
1359 lpfc_issue_clear_la(phba, vport);
1360
1361 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1362 vport->num_disc_nodes = 0;
1363 /* go thru NPR list, issue ELS PLOGIs */
1364 if (vport->fc_npr_cnt)
1365 lpfc_els_disc_plogi(vport);
1366
1367 if (!vport->num_disc_nodes) {
1368 spin_lock_irq(shost->host_lock);
1369 vport->fc_flag &=
1370 ~FC_NDISC_ACTIVE;
1371 spin_unlock_irq(
1372 shost->host_lock);
1373 lpfc_can_disctmo(vport);
1374 }
1375 }
1376 vport->port_state = LPFC_VPORT_READY;
1377 } else {
1378 lpfc_rscn_disc(vport);
1379 }
1380 }
1381 }
1382out: 1752out:
1383 lpfc_els_free_iocb(phba, cmdiocb); 1753 lpfc_els_free_iocb(phba, cmdiocb);
1384 return; 1754 return;
1385} 1755}
1386 1756
1757/**
1758 * lpfc_issue_els_adisc: Issue an address discover iocb to an node on a vport.
1759 * @vport: pointer to a virtual N_Port data structure.
1760 * @ndlp: pointer to a node-list data structure.
1761 * @retry: number of retries to the command IOCB.
1762 *
1763 * This routine issues an Address Discover (ADISC) for an @ndlp on a
1764 * @vport. It prepares the payload of the ADISC ELS command, updates the
1765 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
1766 * to issue the ADISC ELS command.
1767 *
1768 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1769 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1770 * will be stored into the context1 field of the IOCB for the completion
1771 * callback function to the ADISC ELS command.
1772 *
1773 * Return code
1774 * 0 - successfully issued adisc
1775 * 1 - failed to issue adisc
1776 **/
1387int 1777int
1388lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1778lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1389 uint8_t retry) 1779 uint8_t retry)
@@ -1437,6 +1827,18 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1437 return 0; 1827 return 0;
1438} 1828}
1439 1829
1830/**
1831 * lpfc_cmpl_els_logo: Completion callback function for logo.
1832 * @phba: pointer to lpfc hba data structure.
1833 * @cmdiocb: pointer to lpfc command iocb data structure.
1834 * @rspiocb: pointer to lpfc response iocb data structure.
1835 *
1836 * This routine is the completion function for issuing the ELS Logout (LOGO)
1837 * command. If no error status was reported from the LOGO response, the
1838 * state machine of the associated ndlp shall be invoked for transition with
1839 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
1840 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
1841 **/
1440static void 1842static void
1441lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1843lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1442 struct lpfc_iocbq *rspiocb) 1844 struct lpfc_iocbq *rspiocb)
@@ -1502,6 +1904,26 @@ out:
1502 return; 1904 return;
1503} 1905}
1504 1906
1907/**
1908 * lpfc_issue_els_logo: Issue a logo to an node on a vport.
1909 * @vport: pointer to a virtual N_Port data structure.
1910 * @ndlp: pointer to a node-list data structure.
1911 * @retry: number of retries to the command IOCB.
1912 *
1913 * This routine constructs and issues an ELS Logout (LOGO) iocb command
1914 * to a remote node, referred by an @ndlp on a @vport. It constructs the
1915 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
1916 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
1917 *
1918 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1919 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1920 * will be stored into the context1 field of the IOCB for the completion
1921 * callback function to the LOGO ELS command.
1922 *
1923 * Return code
1924 * 0 - successfully issued logo
1925 * 1 - failed to issue logo
1926 **/
1505int 1927int
1506lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1928lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1507 uint8_t retry) 1929 uint8_t retry)
@@ -1563,6 +1985,22 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1563 return 0; 1985 return 0;
1564} 1986}
1565 1987
1988/**
1989 * lpfc_cmpl_els_cmd: Completion callback function for generic els command.
1990 * @phba: pointer to lpfc hba data structure.
1991 * @cmdiocb: pointer to lpfc command iocb data structure.
1992 * @rspiocb: pointer to lpfc response iocb data structure.
1993 *
1994 * This routine is a generic completion callback function for ELS commands.
1995 * Specifically, it is the callback function which does not need to perform
1996 * any command specific operations. It is currently used by the ELS command
1997 * issuing routines for the ELS State Change Request (SCR),
1998 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
1999 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2000 * certain debug loggings, this callback function simply invokes the
2001 * lpfc_els_chk_latt() routine to check whether link went down during the
2002 * discovery process.
2003 **/
1566static void 2004static void
1567lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2005lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1568 struct lpfc_iocbq *rspiocb) 2006 struct lpfc_iocbq *rspiocb)
@@ -1587,6 +2025,28 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1587 return; 2025 return;
1588} 2026}
1589 2027
2028/**
2029 * lpfc_issue_els_scr: Issue a scr to an node on a vport.
2030 * @vport: pointer to a host virtual N_Port data structure.
2031 * @nportid: N_Port identifier to the remote node.
2032 * @retry: number of retries to the command IOCB.
2033 *
2034 * This routine issues a State Change Request (SCR) to a fabric node
2035 * on a @vport. The remote node @nportid is passed into the function. It
2036 * first search the @vport node list to find the matching ndlp. If no such
2037 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2038 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2039 * routine is invoked to send the SCR IOCB.
2040 *
2041 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2042 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2043 * will be stored into the context1 field of the IOCB for the completion
2044 * callback function to the SCR ELS command.
2045 *
2046 * Return code
2047 * 0 - Successfully issued scr command
2048 * 1 - Failed to issue scr command
2049 **/
1590int 2050int
1591lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2051lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1592{ 2052{
@@ -1659,6 +2119,28 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1659 return 0; 2119 return 0;
1660} 2120}
1661 2121
2122/**
2123 * lpfc_issue_els_farpr: Issue a farp to an node on a vport.
2124 * @vport: pointer to a host virtual N_Port data structure.
2125 * @nportid: N_Port identifier to the remote node.
2126 * @retry: number of retries to the command IOCB.
2127 *
2128 * This routine issues a Fibre Channel Address Resolution Response
2129 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2130 * is passed into the function. It first search the @vport node list to find
2131 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2132 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2133 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2134 *
2135 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2136 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2137 * will be stored into the context1 field of the IOCB for the completion
2138 * callback function to the PARPR ELS command.
2139 *
2140 * Return code
2141 * 0 - Successfully issued farpr command
2142 * 1 - Failed to issue farpr command
2143 **/
1662static int 2144static int
1663lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2145lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1664{ 2146{
@@ -1748,6 +2230,18 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1748 return 0; 2230 return 0;
1749} 2231}
1750 2232
2233/**
2234 * lpfc_cancel_retry_delay_tmo: Cancel the timer with delayed iocb-cmd retry.
2235 * @vport: pointer to a host virtual N_Port data structure.
2236 * @nlp: pointer to a node-list data structure.
2237 *
2238 * This routine cancels the timer with a delayed IOCB-command retry for
2239 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2240 * removes the ELS retry event if it presents. In addition, if the
2241 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2242 * commands are sent for the @vport's nodes that require issuing discovery
2243 * ADISC.
2244 **/
1751void 2245void
1752lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 2246lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1753{ 2247{
@@ -1775,25 +2269,36 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1775 if (vport->port_state < LPFC_VPORT_READY) { 2269 if (vport->port_state < LPFC_VPORT_READY) {
1776 /* Check if there are more ADISCs to be sent */ 2270 /* Check if there are more ADISCs to be sent */
1777 lpfc_more_adisc(vport); 2271 lpfc_more_adisc(vport);
1778 if ((vport->num_disc_nodes == 0) &&
1779 (vport->fc_npr_cnt))
1780 lpfc_els_disc_plogi(vport);
1781 } else { 2272 } else {
1782 /* Check if there are more PLOGIs to be sent */ 2273 /* Check if there are more PLOGIs to be sent */
1783 lpfc_more_plogi(vport); 2274 lpfc_more_plogi(vport);
1784 } 2275 if (vport->num_disc_nodes == 0) {
1785 if (vport->num_disc_nodes == 0) { 2276 spin_lock_irq(shost->host_lock);
1786 spin_lock_irq(shost->host_lock); 2277 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1787 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2278 spin_unlock_irq(shost->host_lock);
1788 spin_unlock_irq(shost->host_lock); 2279 lpfc_can_disctmo(vport);
1789 lpfc_can_disctmo(vport); 2280 lpfc_end_rscn(vport);
1790 lpfc_end_rscn(vport); 2281 }
1791 } 2282 }
1792 } 2283 }
1793 } 2284 }
1794 return; 2285 return;
1795} 2286}
1796 2287
2288/**
2289 * lpfc_els_retry_delay: Timer function with a ndlp delayed function timer.
2290 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2291 *
2292 * This routine is invoked by the ndlp delayed-function timer to check
2293 * whether there is any pending ELS retry event(s) with the node. If not, it
2294 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2295 * adds the delayed events to the HBA work list and invokes the
2296 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2297 * event. Note that lpfc_nlp_get() is called before posting the event to
2298 * the work list to hold reference count of ndlp so that it guarantees the
2299 * reference to ndlp will still be available when the worker thread gets
2300 * to the event associated with the ndlp.
2301 **/
1797void 2302void
1798lpfc_els_retry_delay(unsigned long ptr) 2303lpfc_els_retry_delay(unsigned long ptr)
1799{ 2304{
@@ -1822,6 +2327,15 @@ lpfc_els_retry_delay(unsigned long ptr)
1822 return; 2327 return;
1823} 2328}
1824 2329
2330/**
2331 * lpfc_els_retry_delay_handler: Work thread handler for ndlp delayed function.
2332 * @ndlp: pointer to a node-list data structure.
2333 *
2334 * This routine is the worker-thread handler for processing the @ndlp delayed
2335 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2336 * the last ELS command from the associated ndlp and invokes the proper ELS
2337 * function according to the delayed ELS command to retry the command.
2338 **/
1825void 2339void
1826lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 2340lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1827{ 2341{
@@ -1884,6 +2398,27 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1884 return; 2398 return;
1885} 2399}
1886 2400
2401/**
2402 * lpfc_els_retry: Make retry decision on an els command iocb.
2403 * @phba: pointer to lpfc hba data structure.
2404 * @cmdiocb: pointer to lpfc command iocb data structure.
2405 * @rspiocb: pointer to lpfc response iocb data structure.
2406 *
2407 * This routine makes a retry decision on an ELS command IOCB, which has
2408 * failed. The following ELS IOCBs use this function for retrying the command
2409 * when previously issued command responsed with error status: FLOGI, PLOGI,
2410 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2411 * returned error status, it makes the decision whether a retry shall be
2412 * issued for the command, and whether a retry shall be made immediately or
2413 * delayed. In the former case, the corresponding ELS command issuing-function
2414 * is called to retry the command. In the later case, the ELS command shall
2415 * be posted to the ndlp delayed event and delayed function timer set to the
2416 * ndlp for the delayed command issusing.
2417 *
2418 * Return code
2419 * 0 - No retry of els command is made
2420 * 1 - Immediate or delayed retry of els command is made
2421 **/
1887static int 2422static int
1888lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2423lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1889 struct lpfc_iocbq *rspiocb) 2424 struct lpfc_iocbq *rspiocb)
@@ -2051,7 +2586,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2051 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 2586 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2052 ) { 2587 ) {
2053 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2588 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2054 "0123 FDISC Failed (x%x). " 2589 "0122 FDISC Failed (x%x). "
2055 "Fabric Detected Bad WWN\n", 2590 "Fabric Detected Bad WWN\n",
2056 stat.un.lsRjtError); 2591 stat.un.lsRjtError);
2057 lpfc_vport_set_state(vport, 2592 lpfc_vport_set_state(vport,
@@ -2182,12 +2717,26 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2182 return 0; 2717 return 0;
2183} 2718}
2184 2719
2720/**
2721 * lpfc_els_free_data: Free lpfc dma buffer and data structure with an iocb.
2722 * @phba: pointer to lpfc hba data structure.
2723 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
2724 *
2725 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
2726 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
2727 * checks to see whether there is a lpfc DMA buffer associated with the
2728 * response of the command IOCB. If so, it will be released before releasing
2729 * the lpfc DMA buffer associated with the IOCB itself.
2730 *
2731 * Return code
2732 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2733 **/
2185static int 2734static int
2186lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 2735lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2187{ 2736{
2188 struct lpfc_dmabuf *buf_ptr; 2737 struct lpfc_dmabuf *buf_ptr;
2189 2738
2190 /* Free the response before processing the command. */ 2739 /* Free the response before processing the command. */
2191 if (!list_empty(&buf_ptr1->list)) { 2740 if (!list_empty(&buf_ptr1->list)) {
2192 list_remove_head(&buf_ptr1->list, buf_ptr, 2741 list_remove_head(&buf_ptr1->list, buf_ptr,
2193 struct lpfc_dmabuf, 2742 struct lpfc_dmabuf,
@@ -2200,6 +2749,18 @@ lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2200 return 0; 2749 return 0;
2201} 2750}
2202 2751
2752/**
2753 * lpfc_els_free_bpl: Free lpfc dma buffer and data structure with bpl.
2754 * @phba: pointer to lpfc hba data structure.
2755 * @buf_ptr: pointer to the lpfc dma buffer data structure.
2756 *
2757 * This routine releases the lpfc Direct Memory Access (DMA) buffer
2758 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
2759 * pool.
2760 *
2761 * Return code
2762 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2763 **/
2203static int 2764static int
2204lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 2765lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2205{ 2766{
@@ -2208,6 +2769,33 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2208 return 0; 2769 return 0;
2209} 2770}
2210 2771
2772/**
2773 * lpfc_els_free_iocb: Free a command iocb and its associated resources.
2774 * @phba: pointer to lpfc hba data structure.
2775 * @elsiocb: pointer to lpfc els command iocb data structure.
2776 *
2777 * This routine frees a command IOCB and its associated resources. The
2778 * command IOCB data structure contains the reference to various associated
2779 * resources, these fields must be set to NULL if the associated reference
2780 * not present:
2781 * context1 - reference to ndlp
2782 * context2 - reference to cmd
2783 * context2->next - reference to rsp
2784 * context3 - reference to bpl
2785 *
2786 * It first properly decrements the reference count held on ndlp for the
2787 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
2788 * set, it invokes the lpfc_els_free_data() routine to release the Direct
2789 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
2790 * adds the DMA buffer the @phba data structure for the delayed release.
2791 * If reference to the Buffer Pointer List (BPL) is present, the
2792 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
2793 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
2794 * invoked to release the IOCB data structure back to @phba IOCBQ list.
2795 *
2796 * Return code
2797 * 0 - Success (currently, always return 0)
2798 **/
2211int 2799int
2212lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 2800lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2213{ 2801{
@@ -2274,6 +2862,23 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2274 return 0; 2862 return 0;
2275} 2863}
2276 2864
2865/**
2866 * lpfc_cmpl_els_logo_acc: Completion callback function to logo acc response.
2867 * @phba: pointer to lpfc hba data structure.
2868 * @cmdiocb: pointer to lpfc command iocb data structure.
2869 * @rspiocb: pointer to lpfc response iocb data structure.
2870 *
2871 * This routine is the completion callback function to the Logout (LOGO)
2872 * Accept (ACC) Response ELS command. This routine is invoked to indicate
2873 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
2874 * release the ndlp if it has the last reference remaining (reference count
2875 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
2876 * field to NULL to inform the following lpfc_els_free_iocb() routine no
2877 * ndlp reference count needs to be decremented. Otherwise, the ndlp
2878 * reference use-count shall be decremented by the lpfc_els_free_iocb()
2879 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
2880 * IOCB data structure.
2881 **/
2277static void 2882static void
2278lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2883lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2279 struct lpfc_iocbq *rspiocb) 2884 struct lpfc_iocbq *rspiocb)
@@ -2311,6 +2916,19 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2311 return; 2916 return;
2312} 2917}
2313 2918
2919/**
2920 * lpfc_mbx_cmpl_dflt_rpi: Completion callbk func for unreg dflt rpi mbox cmd.
2921 * @phba: pointer to lpfc hba data structure.
2922 * @pmb: pointer to the driver internal queue element for mailbox command.
2923 *
2924 * This routine is the completion callback function for unregister default
2925 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
2926 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
2927 * decrements the ndlp reference count held for this completion callback
2928 * function. After that, it invokes the lpfc_nlp_not_used() to check
2929 * whether there is only one reference left on the ndlp. If so, it will
2930 * perform one more decrement and trigger the release of the ndlp.
2931 **/
2314void 2932void
2315lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2933lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2316{ 2934{
@@ -2332,6 +2950,22 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2332 return; 2950 return;
2333} 2951}
2334 2952
2953/**
2954 * lpfc_cmpl_els_rsp: Completion callback function for els response iocb cmd.
2955 * @phba: pointer to lpfc hba data structure.
2956 * @cmdiocb: pointer to lpfc command iocb data structure.
2957 * @rspiocb: pointer to lpfc response iocb data structure.
2958 *
2959 * This routine is the completion callback function for ELS Response IOCB
2960 * command. In normal case, this callback function just properly sets the
2961 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
2962 * field in the command IOCB is not NULL, the referred mailbox command will
2963 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
2964 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
2965 * link down event occurred during the discovery, the lpfc_nlp_not_used()
2966 * routine shall be invoked trying to release the ndlp if no other threads
2967 * are currently referring it.
2968 **/
2335static void 2969static void
2336lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2970lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2337 struct lpfc_iocbq *rspiocb) 2971 struct lpfc_iocbq *rspiocb)
@@ -2487,6 +3121,31 @@ out:
2487 return; 3121 return;
2488} 3122}
2489 3123
3124/**
3125 * lpfc_els_rsp_acc: Prepare and issue an acc response iocb command.
3126 * @vport: pointer to a host virtual N_Port data structure.
3127 * @flag: the els command code to be accepted.
3128 * @oldiocb: pointer to the original lpfc command iocb data structure.
3129 * @ndlp: pointer to a node-list data structure.
3130 * @mbox: pointer to the driver internal queue element for mailbox command.
3131 *
3132 * This routine prepares and issues an Accept (ACC) response IOCB
3133 * command. It uses the @flag to properly set up the IOCB field for the
3134 * specific ACC response command to be issued and invokes the
3135 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3136 * @mbox pointer is passed in, it will be put into the context_un.mbox
3137 * field of the IOCB for the completion callback function to issue the
3138 * mailbox command to the HBA later when callback is invoked.
3139 *
3140 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3141 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3142 * will be stored into the context1 field of the IOCB for the completion
3143 * callback function to the corresponding response ELS IOCB command.
3144 *
3145 * Return code
3146 * 0 - Successfully issued acc response
3147 * 1 - Failed to issue acc response
3148 **/
2490int 3149int
2491lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 3150lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
2492 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3151 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2601,6 +3260,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
2601 return 0; 3260 return 0;
2602} 3261}
2603 3262
3263/**
3264 * lpfc_els_rsp_reject: Propare and issue a rjt response iocb command.
3265 * @vport: pointer to a virtual N_Port data structure.
3266 * @rejectError:
3267 * @oldiocb: pointer to the original lpfc command iocb data structure.
3268 * @ndlp: pointer to a node-list data structure.
3269 * @mbox: pointer to the driver internal queue element for mailbox command.
3270 *
3271 * This routine prepares and issue an Reject (RJT) response IOCB
3272 * command. If a @mbox pointer is passed in, it will be put into the
3273 * context_un.mbox field of the IOCB for the completion callback function
3274 * to issue to the HBA later.
3275 *
3276 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3277 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3278 * will be stored into the context1 field of the IOCB for the completion
3279 * callback function to the reject response ELS IOCB command.
3280 *
3281 * Return code
3282 * 0 - Successfully issued reject response
3283 * 1 - Failed to issue reject response
3284 **/
2604int 3285int
2605lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 3286lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2606 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3287 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2660,6 +3341,25 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2660 return 0; 3341 return 0;
2661} 3342}
2662 3343
3344/**
3345 * lpfc_els_rsp_adisc_acc: Prepare and issue acc response to adisc iocb cmd.
3346 * @vport: pointer to a virtual N_Port data structure.
3347 * @oldiocb: pointer to the original lpfc command iocb data structure.
3348 * @ndlp: pointer to a node-list data structure.
3349 *
3350 * This routine prepares and issues an Accept (ACC) response to Address
3351 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3352 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3353 *
3354 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3355 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3356 * will be stored into the context1 field of the IOCB for the completion
3357 * callback function to the ADISC Accept response ELS IOCB command.
3358 *
3359 * Return code
3360 * 0 - Successfully issued acc adisc response
3361 * 1 - Failed to issue adisc acc response
3362 **/
2663int 3363int
2664lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3364lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2665 struct lpfc_nodelist *ndlp) 3365 struct lpfc_nodelist *ndlp)
@@ -2716,6 +3416,25 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2716 return 0; 3416 return 0;
2717} 3417}
2718 3418
3419/**
3420 * lpfc_els_rsp_prli_acc: Prepare and issue acc response to prli iocb cmd.
3421 * @vport: pointer to a virtual N_Port data structure.
3422 * @oldiocb: pointer to the original lpfc command iocb data structure.
3423 * @ndlp: pointer to a node-list data structure.
3424 *
3425 * This routine prepares and issues an Accept (ACC) response to Process
3426 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3427 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3428 *
3429 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3430 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3431 * will be stored into the context1 field of the IOCB for the completion
3432 * callback function to the PRLI Accept response ELS IOCB command.
3433 *
3434 * Return code
3435 * 0 - Successfully issued acc prli response
3436 * 1 - Failed to issue acc prli response
3437 **/
2719int 3438int
2720lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3439lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2721 struct lpfc_nodelist *ndlp) 3440 struct lpfc_nodelist *ndlp)
@@ -2795,6 +3514,32 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2795 return 0; 3514 return 0;
2796} 3515}
2797 3516
3517/**
3518 * lpfc_els_rsp_rnid_acc: Issue rnid acc response iocb command.
3519 * @vport: pointer to a virtual N_Port data structure.
3520 * @format: rnid command format.
3521 * @oldiocb: pointer to the original lpfc command iocb data structure.
3522 * @ndlp: pointer to a node-list data structure.
3523 *
3524 * This routine issues a Request Node Identification Data (RNID) Accept
3525 * (ACC) response. It constructs the RNID ACC response command according to
3526 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3527 * issue the response. Note that this command does not need to hold the ndlp
3528 * reference count for the callback. So, the ndlp reference count taken by
3529 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3530 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3531 * there is no ndlp reference available.
3532 *
3533 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3534 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3535 * will be stored into the context1 field of the IOCB for the completion
3536 * callback function. However, for the RNID Accept Response ELS command,
3537 * this is undone later by this routine after the IOCB is allocated.
3538 *
3539 * Return code
3540 * 0 - Successfully issued acc rnid response
3541 * 1 - Failed to issue acc rnid response
3542 **/
2798static int 3543static int
2799lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 3544lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2800 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 3545 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -2875,6 +3620,25 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2875 return 0; 3620 return 0;
2876} 3621}
2877 3622
3623/**
3624 * lpfc_els_disc_adisc: Issue remaining adisc iocbs to npr nodes of a vport.
3625 * @vport: pointer to a host virtual N_Port data structure.
3626 *
3627 * This routine issues Address Discover (ADISC) ELS commands to those
3628 * N_Ports which are in node port recovery state and ADISC has not been issued
3629 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
3630 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
3631 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
3632 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
3633 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
3634 * IOCBs quit for later pick up. On the other hand, after walking through
3635 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
3636 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
3637 * no more ADISC need to be sent.
3638 *
3639 * Return code
3640 * The number of N_Ports with adisc issued.
3641 **/
2878int 3642int
2879lpfc_els_disc_adisc(struct lpfc_vport *vport) 3643lpfc_els_disc_adisc(struct lpfc_vport *vport)
2880{ 3644{
@@ -2914,6 +3678,25 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
2914 return sentadisc; 3678 return sentadisc;
2915} 3679}
2916 3680
3681/**
3682 * lpfc_els_disc_plogi: Issue plogi for all npr nodes of a vport before adisc.
3683 * @vport: pointer to a host virtual N_Port data structure.
3684 *
3685 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
3686 * which are in node port recovery state, with a @vport. Each time an ELS
3687 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
3688 * the per @vport number of discover count (num_disc_nodes) shall be
3689 * incremented. If the num_disc_nodes reaches a pre-configured threshold
3690 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
3691 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
3692 * later pick up. On the other hand, after walking through all the ndlps with
3693 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
3694 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
3695 * PLOGI need to be sent.
3696 *
3697 * Return code
3698 * The number of N_Ports with plogi issued.
3699 **/
2917int 3700int
2918lpfc_els_disc_plogi(struct lpfc_vport *vport) 3701lpfc_els_disc_plogi(struct lpfc_vport *vport)
2919{ 3702{
@@ -2954,6 +3737,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
2954 return sentplogi; 3737 return sentplogi;
2955} 3738}
2956 3739
3740/**
3741 * lpfc_els_flush_rscn: Clean up any rscn activities with a vport.
3742 * @vport: pointer to a host virtual N_Port data structure.
3743 *
3744 * This routine cleans up any Registration State Change Notification
3745 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
3746 * @vport together with the host_lock is used to prevent multiple thread
3747 * trying to access the RSCN array on a same @vport at the same time.
3748 **/
2957void 3749void
2958lpfc_els_flush_rscn(struct lpfc_vport *vport) 3750lpfc_els_flush_rscn(struct lpfc_vport *vport)
2959{ 3751{
@@ -2984,6 +3776,18 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
2984 vport->fc_rscn_flush = 0; 3776 vport->fc_rscn_flush = 0;
2985} 3777}
2986 3778
3779/**
3780 * lpfc_rscn_payload_check: Check whether there is a pending rscn to a did.
3781 * @vport: pointer to a host virtual N_Port data structure.
3782 * @did: remote destination port identifier.
3783 *
3784 * This routine checks whether there is any pending Registration State
3785 * Configuration Notification (RSCN) to a @did on @vport.
3786 *
3787 * Return code
3788 * None zero - The @did matched with a pending rscn
3789 * 0 - not able to match @did with a pending rscn
3790 **/
2987int 3791int
2988lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 3792lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2989{ 3793{
@@ -3053,6 +3857,17 @@ return_did_out:
3053 return did; 3857 return did;
3054} 3858}
3055 3859
3860/**
3861 * lpfc_rscn_recovery_check: Send recovery event to vport nodes matching rscn
3862 * @vport: pointer to a host virtual N_Port data structure.
3863 *
3864 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
3865 * state machine for a @vport's nodes that are with pending RSCN (Registration
3866 * State Change Notification).
3867 *
3868 * Return code
3869 * 0 - Successful (currently alway return 0)
3870 **/
3056static int 3871static int
3057lpfc_rscn_recovery_check(struct lpfc_vport *vport) 3872lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3058{ 3873{
@@ -3071,6 +3886,28 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3071 return 0; 3886 return 0;
3072} 3887}
3073 3888
3889/**
3890 * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
3891 * @vport: pointer to a host virtual N_Port data structure.
3892 * @cmdiocb: pointer to lpfc command iocb data structure.
3893 * @ndlp: pointer to a node-list data structure.
3894 *
3895 * This routine processes an unsolicited RSCN (Registration State Change
3896 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
3897 * to invoke fc_host_post_event() routine to the FC transport layer. If the
3898 * discover state machine is about to begin discovery, it just accepts the
3899 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
3900 * contains N_Port IDs for other vports on this HBA, it just accepts the
3901 * RSCN and ignore processing it. If the state machine is in the recovery
3902 * state, the fc_rscn_id_list of this @vport is walked and the
3903 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
3904 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
3905 * routine is invoked to handle the RSCN event.
3906 *
3907 * Return code
3908 * 0 - Just sent the acc response
3909 * 1 - Sent the acc response and waited for name server completion
3910 **/
3074static int 3911static int
3075lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3912lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3076 struct lpfc_nodelist *ndlp) 3913 struct lpfc_nodelist *ndlp)
@@ -3130,7 +3967,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3130 if (rscn_id == hba_id) { 3967 if (rscn_id == hba_id) {
3131 /* ALL NPortIDs in RSCN are on HBA */ 3968 /* ALL NPortIDs in RSCN are on HBA */
3132 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3969 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3133 "0214 Ignore RSCN " 3970 "0219 Ignore RSCN "
3134 "Data: x%x x%x x%x x%x\n", 3971 "Data: x%x x%x x%x x%x\n",
3135 vport->fc_flag, payload_len, 3972 vport->fc_flag, payload_len,
3136 *lp, vport->fc_rscn_id_cnt); 3973 *lp, vport->fc_rscn_id_cnt);
@@ -3241,6 +4078,22 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3241 return lpfc_els_handle_rscn(vport); 4078 return lpfc_els_handle_rscn(vport);
3242} 4079}
3243 4080
4081/**
4082 * lpfc_els_handle_rscn: Handle rscn for a vport.
4083 * @vport: pointer to a host virtual N_Port data structure.
4084 *
4085 * This routine handles the Registration State Configuration Notification
4086 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4087 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4088 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4089 * NameServer shall be issued. If CT command to the NameServer fails to be
4090 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4091 * RSCN activities with the @vport.
4092 *
4093 * Return code
4094 * 0 - Cleaned up rscn on the @vport
4095 * 1 - Wait for plogi to name server before proceed
4096 **/
3244int 4097int
3245lpfc_els_handle_rscn(struct lpfc_vport *vport) 4098lpfc_els_handle_rscn(struct lpfc_vport *vport)
3246{ 4099{
@@ -3313,6 +4166,31 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
3313 return 0; 4166 return 0;
3314} 4167}
3315 4168
4169/**
4170 * lpfc_els_rcv_flogi: Process an unsolicited flogi iocb.
4171 * @vport: pointer to a host virtual N_Port data structure.
4172 * @cmdiocb: pointer to lpfc command iocb data structure.
4173 * @ndlp: pointer to a node-list data structure.
4174 *
4175 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4176 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4177 * point topology. As an unsolicited FLOGI should not be received in a loop
4178 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4179 * lpfc_check_sparm() routine is invoked to check the parameters in the
4180 * unsolicited FLOGI. If parameters validation failed, the routine
4181 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4182 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4183 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4184 * will initiate PLOGI. The higher lexicographical value party shall has
4185 * higher priority (as the winning port) and will initiate PLOGI and
4186 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4187 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4188 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4189 *
4190 * Return code
4191 * 0 - Successfully processed the unsolicited flogi
4192 * 1 - Failed to process the unsolicited flogi
4193 **/
3316static int 4194static int
3317lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4195lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3318 struct lpfc_nodelist *ndlp) 4196 struct lpfc_nodelist *ndlp)
@@ -3402,6 +4280,22 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3402 return 0; 4280 return 0;
3403} 4281}
3404 4282
4283/**
4284 * lpfc_els_rcv_rnid: Process an unsolicited rnid iocb.
4285 * @vport: pointer to a host virtual N_Port data structure.
4286 * @cmdiocb: pointer to lpfc command iocb data structure.
4287 * @ndlp: pointer to a node-list data structure.
4288 *
4289 * This routine processes Request Node Identification Data (RNID) IOCB
4290 * received as an ELS unsolicited event. Only when the RNID specified format
4291 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4292 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4293 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4294 * rejected by invoking the lpfc_els_rsp_reject() routine.
4295 *
4296 * Return code
4297 * 0 - Successfully processed rnid iocb (currently always return 0)
4298 **/
3405static int 4299static int
3406lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4300lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3407 struct lpfc_nodelist *ndlp) 4301 struct lpfc_nodelist *ndlp)
@@ -3441,6 +4335,19 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3441 return 0; 4335 return 0;
3442} 4336}
3443 4337
4338/**
4339 * lpfc_els_rcv_lirr: Process an unsolicited lirr iocb.
4340 * @vport: pointer to a host virtual N_Port data structure.
4341 * @cmdiocb: pointer to lpfc command iocb data structure.
4342 * @ndlp: pointer to a node-list data structure.
4343 *
4344 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4345 * received as an ELS unsolicited event. Currently, this function just invokes
4346 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4347 *
4348 * Return code
4349 * 0 - Successfully processed lirr iocb (currently always return 0)
4350 **/
3444static int 4351static int
3445lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4352lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3446 struct lpfc_nodelist *ndlp) 4353 struct lpfc_nodelist *ndlp)
@@ -3456,6 +4363,25 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3456 return 0; 4363 return 0;
3457} 4364}
3458 4365
4366/**
4367 * lpfc_els_rsp_rps_acc: Completion callbk func for MBX_READ_LNK_STAT mbox cmd.
4368 * @phba: pointer to lpfc hba data structure.
4369 * @pmb: pointer to the driver internal queue element for mailbox command.
4370 *
4371 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4372 * mailbox command. This callback function is to actually send the Accept
4373 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4374 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4375 * mailbox command, constructs the RPS response with the link statistics
4376 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4377 * response to the RPS.
4378 *
4379 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4380 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4381 * will be stored into the context1 field of the IOCB for the completion
4382 * callback function to the RPS Accept Response ELS IOCB command.
4383 *
4384 **/
3459static void 4385static void
3460lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4386lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3461{ 4387{
@@ -3531,6 +4457,24 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3531 return; 4457 return;
3532} 4458}
3533 4459
4460/**
4461 * lpfc_els_rcv_rps: Process an unsolicited rps iocb.
4462 * @vport: pointer to a host virtual N_Port data structure.
4463 * @cmdiocb: pointer to lpfc command iocb data structure.
4464 * @ndlp: pointer to a node-list data structure.
4465 *
4466 * This routine processes Read Port Status (RPS) IOCB received as an
4467 * ELS unsolicited event. It first checks the remote port state. If the
4468 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4469 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
4470 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4471 * for reading the HBA link statistics. It is for the callback function,
4472 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
4473 * to actually sending out RPS Accept (ACC) response.
4474 *
4475 * Return codes
4476 * 0 - Successfully processed rps iocb (currently always return 0)
4477 **/
3534static int 4478static int
3535lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4479lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3536 struct lpfc_nodelist *ndlp) 4480 struct lpfc_nodelist *ndlp)
@@ -3544,14 +4488,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3544 struct ls_rjt stat; 4488 struct ls_rjt stat;
3545 4489
3546 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 4490 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3547 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 4491 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
3548 stat.un.b.lsRjtRsvd0 = 0; 4492 /* reject the unsolicited RPS request and done with it */
3549 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4493 goto reject_out;
3550 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3551 stat.un.b.vendorUnique = 0;
3552 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3553 NULL);
3554 }
3555 4494
3556 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4495 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3557 lp = (uint32_t *) pcmd->virt; 4496 lp = (uint32_t *) pcmd->virt;
@@ -3584,6 +4523,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3584 mempool_free(mbox, phba->mbox_mem_pool); 4523 mempool_free(mbox, phba->mbox_mem_pool);
3585 } 4524 }
3586 } 4525 }
4526
4527reject_out:
4528 /* issue rejection response */
3587 stat.un.b.lsRjtRsvd0 = 0; 4529 stat.un.b.lsRjtRsvd0 = 0;
3588 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4530 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3589 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4531 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
@@ -3592,6 +4534,25 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3592 return 0; 4534 return 0;
3593} 4535}
3594 4536
4537/**
4538 * lpfc_els_rsp_rpl_acc: Issue an accept rpl els command.
4539 * @vport: pointer to a host virtual N_Port data structure.
4540 * @cmdsize: size of the ELS command.
4541 * @oldiocb: pointer to the original lpfc command iocb data structure.
4542 * @ndlp: pointer to a node-list data structure.
4543 *
4544 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
4545 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
4546 *
4547 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4548 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4549 * will be stored into the context1 field of the IOCB for the completion
4550 * callback function to the RPL Accept Response ELS command.
4551 *
4552 * Return code
4553 * 0 - Successfully issued ACC RPL ELS command
4554 * 1 - Failed to issue ACC RPL ELS command
4555 **/
3595static int 4556static int
3596lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 4557lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
3597 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4558 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -3645,6 +4606,22 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
3645 return 0; 4606 return 0;
3646} 4607}
3647 4608
4609/**
4610 * lpfc_els_rcv_rpl: Process an unsolicited rpl iocb.
4611 * @vport: pointer to a host virtual N_Port data structure.
4612 * @cmdiocb: pointer to lpfc command iocb data structure.
4613 * @ndlp: pointer to a node-list data structure.
4614 *
4615 * This routine processes Read Port List (RPL) IOCB received as an ELS
4616 * unsolicited event. It first checks the remote port state. If the remote
4617 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
4618 * invokes the lpfc_els_rsp_reject() routine to send reject response.
4619 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
4620 * to accept the RPL.
4621 *
4622 * Return code
4623 * 0 - Successfully processed rpl iocb (currently always return 0)
4624 **/
3648static int 4625static int
3649lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4626lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3650 struct lpfc_nodelist *ndlp) 4627 struct lpfc_nodelist *ndlp)
@@ -3658,12 +4635,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3658 4635
3659 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 4636 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3660 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 4637 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
4638 /* issue rejection response */
3661 stat.un.b.lsRjtRsvd0 = 0; 4639 stat.un.b.lsRjtRsvd0 = 0;
3662 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4640 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3663 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4641 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3664 stat.un.b.vendorUnique = 0; 4642 stat.un.b.vendorUnique = 0;
3665 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 4643 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3666 NULL); 4644 NULL);
4645 /* rejected the unsolicited RPL request and done with it */
4646 return 0;
3667 } 4647 }
3668 4648
3669 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4649 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3685,6 +4665,30 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3685 return 0; 4665 return 0;
3686} 4666}
3687 4667
4668/**
4669 * lpfc_els_rcv_farp: Process an unsolicited farp request els command.
4670 * @vport: pointer to a virtual N_Port data structure.
4671 * @cmdiocb: pointer to lpfc command iocb data structure.
4672 * @ndlp: pointer to a node-list data structure.
4673 *
4674 * This routine processes Fibre Channel Address Resolution Protocol
4675 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
4676 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
4677 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
4678 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
4679 * remote PortName is compared against the FC PortName stored in the @vport
4680 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
4681 * compared against the FC NodeName stored in the @vport data structure.
4682 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
4683 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
4684 * invoked to send out FARP Response to the remote node. Before sending the
4685 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
4686 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
4687 * routine is invoked to log into the remote port first.
4688 *
4689 * Return code
4690 * 0 - Either the FARP Match Mode not supported or successfully processed
4691 **/
3688static int 4692static int
3689lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4693lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3690 struct lpfc_nodelist *ndlp) 4694 struct lpfc_nodelist *ndlp)
@@ -3744,6 +4748,20 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3744 return 0; 4748 return 0;
3745} 4749}
3746 4750
4751/**
4752 * lpfc_els_rcv_farpr: Process an unsolicited farp response iocb.
4753 * @vport: pointer to a host virtual N_Port data structure.
4754 * @cmdiocb: pointer to lpfc command iocb data structure.
4755 * @ndlp: pointer to a node-list data structure.
4756 *
4757 * This routine processes Fibre Channel Address Resolution Protocol
4758 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
4759 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
4760 * the FARP response request.
4761 *
4762 * Return code
4763 * 0 - Successfully processed FARPR IOCB (currently always return 0)
4764 **/
3747static int 4765static int
3748lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4766lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3749 struct lpfc_nodelist *ndlp) 4767 struct lpfc_nodelist *ndlp)
@@ -3768,6 +4786,25 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3768 return 0; 4786 return 0;
3769} 4787}
3770 4788
4789/**
4790 * lpfc_els_rcv_fan: Process an unsolicited fan iocb command.
4791 * @vport: pointer to a host virtual N_Port data structure.
4792 * @cmdiocb: pointer to lpfc command iocb data structure.
4793 * @fan_ndlp: pointer to a node-list data structure.
4794 *
4795 * This routine processes a Fabric Address Notification (FAN) IOCB
4796 * command received as an ELS unsolicited event. The FAN ELS command will
4797 * only be processed on a physical port (i.e., the @vport represents the
4798 * physical port). The fabric NodeName and PortName from the FAN IOCB are
4799 * compared against those in the phba data structure. If any of those is
4800 * different, the lpfc_initial_flogi() routine is invoked to initialize
4801 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
4802 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
4803 * is invoked to register login to the fabric.
4804 *
4805 * Return code
4806 * 0 - Successfully processed fan iocb (currently always return 0).
4807 **/
3771static int 4808static int
3772lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4809lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3773 struct lpfc_nodelist *fan_ndlp) 4810 struct lpfc_nodelist *fan_ndlp)
@@ -3797,6 +4834,16 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3797 return 0; 4834 return 0;
3798} 4835}
3799 4836
4837/**
4838 * lpfc_els_timeout: Handler funciton to the els timer.
4839 * @ptr: holder for the timer function associated data.
4840 *
4841 * This routine is invoked by the ELS timer after timeout. It posts the ELS
4842 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
4843 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
4844 * up the worker thread. It is for the worker thread to invoke the routine
4845 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
4846 **/
3800void 4847void
3801lpfc_els_timeout(unsigned long ptr) 4848lpfc_els_timeout(unsigned long ptr)
3802{ 4849{
@@ -3816,6 +4863,15 @@ lpfc_els_timeout(unsigned long ptr)
3816 return; 4863 return;
3817} 4864}
3818 4865
4866/**
4867 * lpfc_els_timeout_handler: Process an els timeout event.
4868 * @vport: pointer to a virtual N_Port data structure.
4869 *
4870 * This routine is the actual handler function that processes an ELS timeout
4871 * event. It walks the ELS ring to get and abort all the IOCBs (except the
4872 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
4873 * invoking the lpfc_sli_issue_abort_iotag() routine.
4874 **/
3819void 4875void
3820lpfc_els_timeout_handler(struct lpfc_vport *vport) 4876lpfc_els_timeout_handler(struct lpfc_vport *vport)
3821{ 4877{
@@ -3886,6 +4942,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3886 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 4942 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3887} 4943}
3888 4944
4945/**
4946 * lpfc_els_flush_cmd: Clean up the outstanding els commands to a vport.
4947 * @vport: pointer to a host virtual N_Port data structure.
4948 *
4949 * This routine is used to clean up all the outstanding ELS commands on a
4950 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
4951 * routine. After that, it walks the ELS transmit queue to remove all the
4952 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
4953 * the IOCBs with a non-NULL completion callback function, the callback
4954 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
4955 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
4956 * callback function, the IOCB will simply be released. Finally, it walks
4957 * the ELS transmit completion queue to issue an abort IOCB to any transmit
4958 * completion queue IOCB that is associated with the @vport and is not
4959 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
4960 * part of the discovery state machine) out to HBA by invoking the
4961 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
4962 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
4963 * the IOCBs are aborted when this function returns.
4964 **/
3889void 4965void
3890lpfc_els_flush_cmd(struct lpfc_vport *vport) 4966lpfc_els_flush_cmd(struct lpfc_vport *vport)
3891{ 4967{
@@ -3948,6 +5024,23 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
3948 return; 5024 return;
3949} 5025}
3950 5026
5027/**
5028 * lpfc_els_flush_all_cmd: Clean up all the outstanding els commands to a HBA.
5029 * @phba: pointer to lpfc hba data structure.
5030 *
5031 * This routine is used to clean up all the outstanding ELS commands on a
5032 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
5033 * routine. After that, it walks the ELS transmit queue to remove all the
5034 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
5035 * the IOCBs with the completion callback function associated, the callback
5036 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5037 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
5038 * callback function associated, the IOCB will simply be released. Finally,
5039 * it walks the ELS transmit completion queue to issue an abort IOCB to any
5040 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
5041 * management plane IOCBs that are not part of the discovery state machine)
5042 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
5043 **/
3951void 5044void
3952lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 5045lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
3953{ 5046{
@@ -3992,6 +5085,130 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
3992 return; 5085 return;
3993} 5086}
3994 5087
5088/**
5089 * lpfc_send_els_failure_event: Posts an ELS command failure event.
5090 * @phba: Pointer to hba context object.
5091 * @cmdiocbp: Pointer to command iocb which reported error.
5092 * @rspiocbp: Pointer to response iocb which reported error.
5093 *
5094 * This function sends an event when there is an ELS command
5095 * failure.
5096 **/
5097void
5098lpfc_send_els_failure_event(struct lpfc_hba *phba,
5099 struct lpfc_iocbq *cmdiocbp,
5100 struct lpfc_iocbq *rspiocbp)
5101{
5102 struct lpfc_vport *vport = cmdiocbp->vport;
5103 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5104 struct lpfc_lsrjt_event lsrjt_event;
5105 struct lpfc_fabric_event_header fabric_event;
5106 struct ls_rjt stat;
5107 struct lpfc_nodelist *ndlp;
5108 uint32_t *pcmd;
5109
5110 ndlp = cmdiocbp->context1;
5111 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5112 return;
5113
5114 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
5115 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
5116 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
5117 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
5118 sizeof(struct lpfc_name));
5119 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
5120 sizeof(struct lpfc_name));
5121 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5122 cmdiocbp->context2)->virt);
5123 lsrjt_event.command = *pcmd;
5124 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5125 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5126 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
5127 fc_host_post_vendor_event(shost,
5128 fc_get_event_number(),
5129 sizeof(lsrjt_event),
5130 (char *)&lsrjt_event,
5131 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
5132 return;
5133 }
5134 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
5135 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
5136 fabric_event.event_type = FC_REG_FABRIC_EVENT;
5137 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
5138 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
5139 else
5140 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
5141 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
5142 sizeof(struct lpfc_name));
5143 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
5144 sizeof(struct lpfc_name));
5145 fc_host_post_vendor_event(shost,
5146 fc_get_event_number(),
5147 sizeof(fabric_event),
5148 (char *)&fabric_event,
5149 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
5150 return;
5151 }
5152
5153}
5154
5155/**
5156 * lpfc_send_els_event: Posts unsolicited els event.
5157 * @vport: Pointer to vport object.
5158 * @ndlp: Pointer FC node object.
5159 * @cmd: ELS command code.
5160 *
5161 * This function posts an event when there is an incoming
5162 * unsolicited ELS command.
5163 **/
5164static void
5165lpfc_send_els_event(struct lpfc_vport *vport,
5166 struct lpfc_nodelist *ndlp,
5167 uint32_t cmd)
5168{
5169 struct lpfc_els_event_header els_data;
5170 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5171
5172 els_data.event_type = FC_REG_ELS_EVENT;
5173 switch (cmd) {
5174 case ELS_CMD_PLOGI:
5175 els_data.subcategory = LPFC_EVENT_PLOGI_RCV;
5176 break;
5177 case ELS_CMD_PRLO:
5178 els_data.subcategory = LPFC_EVENT_PRLO_RCV;
5179 break;
5180 case ELS_CMD_ADISC:
5181 els_data.subcategory = LPFC_EVENT_ADISC_RCV;
5182 break;
5183 default:
5184 return;
5185 }
5186 memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5187 memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5188 fc_host_post_vendor_event(shost,
5189 fc_get_event_number(),
5190 sizeof(els_data),
5191 (char *)&els_data,
5192 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
5193
5194 return;
5195}
5196
5197
5198/**
5199 * lpfc_els_unsol_buffer: Process an unsolicited event data buffer.
5200 * @phba: pointer to lpfc hba data structure.
5201 * @pring: pointer to a SLI ring.
5202 * @vport: pointer to a host virtual N_Port data structure.
5203 * @elsiocb: pointer to lpfc els command iocb data structure.
5204 *
5205 * This routine is used for processing the IOCB associated with a unsolicited
5206 * event. It first determines whether there is an existing ndlp that matches
5207 * the DID from the unsolicited IOCB. If not, it will create a new one with
5208 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
5209 * IOCB is then used to invoke the proper routine and to set up proper state
5210 * of the discovery state machine.
5211 **/
3995static void 5212static void
3996lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5213lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3997 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 5214 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
@@ -4059,8 +5276,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4059 } 5276 }
4060 5277
4061 phba->fc_stat.elsRcvFrame++; 5278 phba->fc_stat.elsRcvFrame++;
4062 if (elsiocb->context1)
4063 lpfc_nlp_put(elsiocb->context1);
4064 5279
4065 elsiocb->context1 = lpfc_nlp_get(ndlp); 5280 elsiocb->context1 = lpfc_nlp_get(ndlp);
4066 elsiocb->vport = vport; 5281 elsiocb->vport = vport;
@@ -4081,6 +5296,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4081 phba->fc_stat.elsRcvPLOGI++; 5296 phba->fc_stat.elsRcvPLOGI++;
4082 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 5297 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
4083 5298
5299 lpfc_send_els_event(vport, ndlp, cmd);
4084 if (vport->port_state < LPFC_DISC_AUTH) { 5300 if (vport->port_state < LPFC_DISC_AUTH) {
4085 if (!(phba->pport->fc_flag & FC_PT2PT) || 5301 if (!(phba->pport->fc_flag & FC_PT2PT) ||
4086 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 5302 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -4130,6 +5346,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4130 did, vport->port_state, ndlp->nlp_flag); 5346 did, vport->port_state, ndlp->nlp_flag);
4131 5347
4132 phba->fc_stat.elsRcvPRLO++; 5348 phba->fc_stat.elsRcvPRLO++;
5349 lpfc_send_els_event(vport, ndlp, cmd);
4133 if (vport->port_state < LPFC_DISC_AUTH) { 5350 if (vport->port_state < LPFC_DISC_AUTH) {
4134 rjt_err = LSRJT_UNABLE_TPC; 5351 rjt_err = LSRJT_UNABLE_TPC;
4135 break; 5352 break;
@@ -4147,6 +5364,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4147 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 5364 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
4148 did, vport->port_state, ndlp->nlp_flag); 5365 did, vport->port_state, ndlp->nlp_flag);
4149 5366
5367 lpfc_send_els_event(vport, ndlp, cmd);
4150 phba->fc_stat.elsRcvADISC++; 5368 phba->fc_stat.elsRcvADISC++;
4151 if (vport->port_state < LPFC_DISC_AUTH) { 5369 if (vport->port_state < LPFC_DISC_AUTH) {
4152 rjt_err = LSRJT_UNABLE_TPC; 5370 rjt_err = LSRJT_UNABLE_TPC;
@@ -4270,6 +5488,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4270 NULL); 5488 NULL);
4271 } 5489 }
4272 5490
5491 lpfc_nlp_put(elsiocb->context1);
5492 elsiocb->context1 = NULL;
4273 return; 5493 return;
4274 5494
4275dropit: 5495dropit:
@@ -4282,6 +5502,19 @@ dropit:
4282 phba->fc_stat.elsRcvDrop++; 5502 phba->fc_stat.elsRcvDrop++;
4283} 5503}
4284 5504
5505/**
5506 * lpfc_find_vport_by_vpid: Find a vport on a HBA through vport identifier.
5507 * @phba: pointer to lpfc hba data structure.
5508 * @vpi: host virtual N_Port identifier.
5509 *
5510 * This routine finds a vport on a HBA (referred by @phba) through a
5511 * @vpi. The function walks the HBA's vport list and returns the address
5512 * of the vport with the matching @vpi.
5513 *
5514 * Return code
5515 * NULL - No vport with the matching @vpi found
5516 * Otherwise - Address to the vport with the matching @vpi.
5517 **/
4285static struct lpfc_vport * 5518static struct lpfc_vport *
4286lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5519lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4287{ 5520{
@@ -4299,6 +5532,18 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4299 return NULL; 5532 return NULL;
4300} 5533}
4301 5534
5535/**
5536 * lpfc_els_unsol_event: Process an unsolicited event from an els sli ring.
5537 * @phba: pointer to lpfc hba data structure.
5538 * @pring: pointer to a SLI ring.
5539 * @elsiocb: pointer to lpfc els iocb data structure.
5540 *
5541 * This routine is used to process an unsolicited event received from a SLI
5542 * (Service Level Interface) ring. The actual processing of the data buffer
5543 * associated with the unsolicited event is done by invoking the routine
5544 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
5545 * SLI ring on which the unsolicited event was received.
5546 **/
4302void 5547void
4303lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5548lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4304 struct lpfc_iocbq *elsiocb) 5549 struct lpfc_iocbq *elsiocb)
@@ -4309,6 +5554,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4309 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 5554 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4310 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 5555 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4311 5556
5557 elsiocb->context1 = NULL;
4312 elsiocb->context2 = NULL; 5558 elsiocb->context2 = NULL;
4313 elsiocb->context3 = NULL; 5559 elsiocb->context3 = NULL;
4314 5560
@@ -4356,8 +5602,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4356 * The different unsolicited event handlers would tell us 5602 * The different unsolicited event handlers would tell us
4357 * if they are done with "mp" by setting context2 to NULL. 5603 * if they are done with "mp" by setting context2 to NULL.
4358 */ 5604 */
4359 lpfc_nlp_put(elsiocb->context1);
4360 elsiocb->context1 = NULL;
4361 if (elsiocb->context2) { 5605 if (elsiocb->context2) {
4362 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 5606 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
4363 elsiocb->context2 = NULL; 5607 elsiocb->context2 = NULL;
@@ -4376,6 +5620,19 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4376 } 5620 }
4377} 5621}
4378 5622
5623/**
5624 * lpfc_do_scr_ns_plogi: Issue a plogi to the name server for scr.
5625 * @phba: pointer to lpfc hba data structure.
5626 * @vport: pointer to a virtual N_Port data structure.
5627 *
5628 * This routine issues a Port Login (PLOGI) to the Name Server with
5629 * State Change Request (SCR) for a @vport. This routine will create an
5630 * ndlp for the Name Server associated to the @vport if such node does
5631 * not already exist. The PLOGI to Name Server is issued by invoking the
5632 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
5633 * (FDMI) is configured to the @vport, a FDMI node will be created and
5634 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
5635 **/
4379void 5636void
4380lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 5637lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4381{ 5638{
@@ -4434,6 +5691,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4434 return; 5691 return;
4435} 5692}
4436 5693
5694/**
5695 * lpfc_cmpl_reg_new_vport: Completion callback function to register new vport.
5696 * @phba: pointer to lpfc hba data structure.
5697 * @pmb: pointer to the driver internal queue element for mailbox command.
5698 *
5699 * This routine is the completion callback function to register new vport
5700 * mailbox command. If the new vport mailbox command completes successfully,
5701 * the fabric registration login shall be performed on physical port (the
5702 * new vport created is actually a physical port, with VPI 0) or the port
5703 * login to Name Server for State Change Request (SCR) will be performed
5704 * on virtual port (real virtual port, with VPI greater than 0).
5705 **/
4437static void 5706static void
4438lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5707lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4439{ 5708{
@@ -4491,6 +5760,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4491 return; 5760 return;
4492} 5761}
4493 5762
5763/**
5764 * lpfc_register_new_vport: Register a new vport with a HBA.
5765 * @phba: pointer to lpfc hba data structure.
5766 * @vport: pointer to a host virtual N_Port data structure.
5767 * @ndlp: pointer to a node-list data structure.
5768 *
5769 * This routine registers the @vport as a new virtual port with a HBA.
5770 * It is done through a registering vpi mailbox command.
5771 **/
4494static void 5772static void
4495lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 5773lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4496 struct lpfc_nodelist *ndlp) 5774 struct lpfc_nodelist *ndlp)
@@ -4531,6 +5809,26 @@ mbox_err_exit:
4531 return; 5809 return;
4532} 5810}
4533 5811
5812/**
5813 * lpfc_cmpl_els_fdisc: Completion function for fdisc iocb command.
5814 * @phba: pointer to lpfc hba data structure.
5815 * @cmdiocb: pointer to lpfc command iocb data structure.
5816 * @rspiocb: pointer to lpfc response iocb data structure.
5817 *
5818 * This routine is the completion callback function to a Fabric Discover
5819 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
5820 * single threaded, each FDISC completion callback function will reset
5821 * the discovery timer for all vports such that the timers will not get
5822 * unnecessary timeout. The function checks the FDISC IOCB status. If error
5823 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
5824 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
5825 * assigned to the vport has been changed with the completion of the FDISC
5826 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
5827 * are unregistered from the HBA, and then the lpfc_register_new_vport()
5828 * routine is invoked to register new vport with the HBA. Otherwise, the
5829 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
5830 * Server for State Change Request (SCR).
5831 **/
4534static void 5832static void
4535lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5833lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4536 struct lpfc_iocbq *rspiocb) 5834 struct lpfc_iocbq *rspiocb)
@@ -4565,58 +5863,80 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4565 goto out; 5863 goto out;
4566 /* FDISC failed */ 5864 /* FDISC failed */
4567 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 5865 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4568 "0124 FDISC failed. (%d/%d)\n", 5866 "0126 FDISC failed. (%d/%d)\n",
4569 irsp->ulpStatus, irsp->un.ulpWord[4]); 5867 irsp->ulpStatus, irsp->un.ulpWord[4]);
5868 goto fdisc_failed;
5869 }
4570 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) 5870 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4571 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 5871 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4572 lpfc_nlp_put(ndlp); 5872 lpfc_nlp_put(ndlp);
4573 /* giving up on FDISC. Cancel discovery timer */ 5873 /* giving up on FDISC. Cancel discovery timer */
4574 lpfc_can_disctmo(vport); 5874 lpfc_can_disctmo(vport);
4575 } else { 5875 spin_lock_irq(shost->host_lock);
4576 spin_lock_irq(shost->host_lock); 5876 vport->fc_flag |= FC_FABRIC;
4577 vport->fc_flag |= FC_FABRIC; 5877 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4578 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 5878 vport->fc_flag |= FC_PUBLIC_LOOP;
4579 vport->fc_flag |= FC_PUBLIC_LOOP; 5879 spin_unlock_irq(shost->host_lock);
4580 spin_unlock_irq(shost->host_lock);
4581 5880
4582 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 5881 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4583 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 5882 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4584 if ((vport->fc_prevDID != vport->fc_myDID) && 5883 if ((vport->fc_prevDID != vport->fc_myDID) &&
4585 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 5884 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4586 /* If our NportID changed, we need to ensure all 5885 /* If our NportID changed, we need to ensure all
4587 * remaining NPORTs get unreg_login'ed so we can 5886 * remaining NPORTs get unreg_login'ed so we can
4588 * issue unreg_vpi. 5887 * issue unreg_vpi.
4589 */ 5888 */
4590 list_for_each_entry_safe(np, next_np, 5889 list_for_each_entry_safe(np, next_np,
4591 &vport->fc_nodes, nlp_listp) { 5890 &vport->fc_nodes, nlp_listp) {
4592 if (!NLP_CHK_NODE_ACT(ndlp) || 5891 if (!NLP_CHK_NODE_ACT(ndlp) ||
4593 (np->nlp_state != NLP_STE_NPR_NODE) || 5892 (np->nlp_state != NLP_STE_NPR_NODE) ||
4594 !(np->nlp_flag & NLP_NPR_ADISC)) 5893 !(np->nlp_flag & NLP_NPR_ADISC))
4595 continue; 5894 continue;
4596 spin_lock_irq(shost->host_lock);
4597 np->nlp_flag &= ~NLP_NPR_ADISC;
4598 spin_unlock_irq(shost->host_lock);
4599 lpfc_unreg_rpi(vport, np);
4600 }
4601 lpfc_mbx_unreg_vpi(vport);
4602 spin_lock_irq(shost->host_lock); 5895 spin_lock_irq(shost->host_lock);
4603 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 5896 np->nlp_flag &= ~NLP_NPR_ADISC;
4604 spin_unlock_irq(shost->host_lock); 5897 spin_unlock_irq(shost->host_lock);
5898 lpfc_unreg_rpi(vport, np);
4605 } 5899 }
4606 5900 lpfc_mbx_unreg_vpi(vport);
4607 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 5901 spin_lock_irq(shost->host_lock);
4608 lpfc_register_new_vport(phba, vport, ndlp); 5902 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4609 else 5903 spin_unlock_irq(shost->host_lock);
4610 lpfc_do_scr_ns_plogi(phba, vport);
4611
4612 /* Unconditionaly kick off releasing fabric node for vports */
4613 lpfc_nlp_put(ndlp);
4614 } 5904 }
4615 5905
5906 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
5907 lpfc_register_new_vport(phba, vport, ndlp);
5908 else
5909 lpfc_do_scr_ns_plogi(phba, vport);
5910 goto out;
5911fdisc_failed:
5912 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5913 /* Cancel discovery timer */
5914 lpfc_can_disctmo(vport);
5915 lpfc_nlp_put(ndlp);
4616out: 5916out:
4617 lpfc_els_free_iocb(phba, cmdiocb); 5917 lpfc_els_free_iocb(phba, cmdiocb);
4618} 5918}
4619 5919
5920/**
5921 * lpfc_issue_els_fdisc: Issue a fdisc iocb command.
5922 * @vport: pointer to a virtual N_Port data structure.
5923 * @ndlp: pointer to a node-list data structure.
5924 * @retry: number of retries to the command IOCB.
5925 *
5926 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
5927 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
5928 * routine to issue the IOCB, which makes sure only one outstanding fabric
5929 * IOCB will be sent off HBA at any given time.
5930 *
5931 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5932 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5933 * will be stored into the context1 field of the IOCB for the completion
5934 * callback function to the FDISC ELS command.
5935 *
5936 * Return code
5937 * 0 - Successfully issued fdisc iocb command
5938 * 1 - Failed to issue fdisc iocb command
5939 **/
4620static int 5940static int
4621lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5941lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4622 uint8_t retry) 5942 uint8_t retry)
@@ -4691,6 +6011,20 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4691 return 0; 6011 return 0;
4692} 6012}
4693 6013
6014/**
6015 * lpfc_cmpl_els_npiv_logo: Completion function with vport logo.
6016 * @phba: pointer to lpfc hba data structure.
6017 * @cmdiocb: pointer to lpfc command iocb data structure.
6018 * @rspiocb: pointer to lpfc response iocb data structure.
6019 *
6020 * This routine is the completion callback function to the issuing of a LOGO
6021 * ELS command off a vport. It frees the command IOCB and then decrement the
6022 * reference count held on ndlp for this completion function, indicating that
6023 * the reference to the ndlp is no long needed. Note that the
6024 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
6025 * callback function and an additional explicit ndlp reference decrementation
6026 * will trigger the actual release of the ndlp.
6027 **/
4694static void 6028static void
4695lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 6029lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4696 struct lpfc_iocbq *rspiocb) 6030 struct lpfc_iocbq *rspiocb)
@@ -4712,6 +6046,22 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4712 lpfc_nlp_put(ndlp); 6046 lpfc_nlp_put(ndlp);
4713} 6047}
4714 6048
6049/**
6050 * lpfc_issue_els_npiv_logo: Issue a logo off a vport.
6051 * @vport: pointer to a virtual N_Port data structure.
6052 * @ndlp: pointer to a node-list data structure.
6053 *
6054 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
6055 *
6056 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6057 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6058 * will be stored into the context1 field of the IOCB for the completion
6059 * callback function to the LOGO ELS command.
6060 *
6061 * Return codes
6062 * 0 - Successfully issued logo off the @vport
6063 * 1 - Failed to issue logo off the @vport
6064 **/
4715int 6065int
4716lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6066lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4717{ 6067{
@@ -4757,6 +6107,17 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4757 return 0; 6107 return 0;
4758} 6108}
4759 6109
6110/**
6111 * lpfc_fabric_block_timeout: Handler function to the fabric block timer.
6112 * @ptr: holder for the timer function associated data.
6113 *
6114 * This routine is invoked by the fabric iocb block timer after
6115 * timeout. It posts the fabric iocb block timeout event by setting the
6116 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
6117 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
6118 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
6119 * posted event WORKER_FABRIC_BLOCK_TMO.
6120 **/
4760void 6121void
4761lpfc_fabric_block_timeout(unsigned long ptr) 6122lpfc_fabric_block_timeout(unsigned long ptr)
4762{ 6123{
@@ -4775,6 +6136,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
4775 return; 6136 return;
4776} 6137}
4777 6138
6139/**
6140 * lpfc_resume_fabric_iocbs: Issue a fabric iocb from driver internal list.
6141 * @phba: pointer to lpfc hba data structure.
6142 *
6143 * This routine issues one fabric iocb from the driver internal list to
6144 * the HBA. It first checks whether it's ready to issue one fabric iocb to
6145 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
6146 * remove one pending fabric iocb from the driver internal list and invokes
6147 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
6148 **/
4778static void 6149static void
4779lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 6150lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4780{ 6151{
@@ -4824,6 +6195,15 @@ repeat:
4824 return; 6195 return;
4825} 6196}
4826 6197
6198/**
6199 * lpfc_unblock_fabric_iocbs: Unblock issuing fabric iocb command.
6200 * @phba: pointer to lpfc hba data structure.
6201 *
6202 * This routine unblocks the issuing fabric iocb command. The function
6203 * will clear the fabric iocb block bit and then invoke the routine
6204 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
6205 * from the driver internal fabric iocb list.
6206 **/
4827void 6207void
4828lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 6208lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4829{ 6209{
@@ -4833,6 +6213,15 @@ lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4833 return; 6213 return;
4834} 6214}
4835 6215
6216/**
6217 * lpfc_block_fabric_iocbs: Block issuing fabric iocb command.
6218 * @phba: pointer to lpfc hba data structure.
6219 *
6220 * This routine blocks the issuing fabric iocb for a specified amount of
6221 * time (currently 100 ms). This is done by set the fabric iocb block bit
6222 * and set up a timeout timer for 100ms. When the block bit is set, no more
6223 * fabric iocb will be issued out of the HBA.
6224 **/
4836static void 6225static void
4837lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 6226lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4838{ 6227{
@@ -4846,6 +6235,19 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4846 return; 6235 return;
4847} 6236}
4848 6237
6238/**
6239 * lpfc_cmpl_fabric_iocb: Completion callback function for fabric iocb.
6240 * @phba: pointer to lpfc hba data structure.
6241 * @cmdiocb: pointer to lpfc command iocb data structure.
6242 * @rspiocb: pointer to lpfc response iocb data structure.
6243 *
6244 * This routine is the callback function that is put to the fabric iocb's
6245 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
6246 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
6247 * function first restores and invokes the original iocb's callback function
6248 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
6249 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
6250 **/
4849static void 6251static void
4850lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 6252lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4851 struct lpfc_iocbq *rspiocb) 6253 struct lpfc_iocbq *rspiocb)
@@ -4892,6 +6294,30 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4892 } 6294 }
4893} 6295}
4894 6296
6297/**
6298 * lpfc_issue_fabric_iocb: Issue a fabric iocb command.
6299 * @phba: pointer to lpfc hba data structure.
6300 * @iocb: pointer to lpfc command iocb data structure.
6301 *
6302 * This routine is used as the top-level API for issuing a fabric iocb command
6303 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
6304 * function makes sure that only one fabric bound iocb will be outstanding at
6305 * any given time. As such, this function will first check to see whether there
6306 * is already an outstanding fabric iocb on the wire. If so, it will put the
6307 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
6308 * issued later. Otherwise, it will issue the iocb on the wire and update the
6309 * fabric iocb count it indicate that there is one fabric iocb on the wire.
6310 *
6311 * Note, this implementation has a potential sending out fabric IOCBs out of
6312 * order. The problem is caused by the construction of the "ready" boolen does
6313 * not include the condition that the internal fabric IOCB list is empty. As
6314 * such, it is possible a fabric IOCB issued by this routine might be "jump"
6315 * ahead of the fabric IOCBs in the internal list.
6316 *
6317 * Return code
6318 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
6319 * IOCB_ERROR - failed to issue fabric iocb
6320 **/
4895static int 6321static int
4896lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6322lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4897{ 6323{
@@ -4937,7 +6363,17 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4937 return ret; 6363 return ret;
4938} 6364}
4939 6365
4940 6366/**
6367 * lpfc_fabric_abort_vport: Abort a vport's iocbs from driver fabric iocb list.
6368 * @vport: pointer to a virtual N_Port data structure.
6369 *
6370 * This routine aborts all the IOCBs associated with a @vport from the
6371 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6372 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6373 * list, removes each IOCB associated with the @vport off the list, set the
6374 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6375 * associated with the IOCB.
6376 **/
4941static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 6377static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4942{ 6378{
4943 LIST_HEAD(completions); 6379 LIST_HEAD(completions);
@@ -4967,6 +6403,17 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4967 } 6403 }
4968} 6404}
4969 6405
6406/**
6407 * lpfc_fabric_abort_nport: Abort a ndlp's iocbs from driver fabric iocb list.
6408 * @ndlp: pointer to a node-list data structure.
6409 *
6410 * This routine aborts all the IOCBs associated with an @ndlp from the
6411 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6412 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6413 * list, removes each IOCB associated with the @ndlp off the list, set the
6414 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6415 * associated with the IOCB.
6416 **/
4970void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 6417void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4971{ 6418{
4972 LIST_HEAD(completions); 6419 LIST_HEAD(completions);
@@ -4996,6 +6443,17 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4996 } 6443 }
4997} 6444}
4998 6445
6446/**
6447 * lpfc_fabric_abort_hba: Abort all iocbs on driver fabric iocb list.
6448 * @phba: pointer to lpfc hba data structure.
6449 *
6450 * This routine aborts all the IOCBs currently on the driver internal
6451 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
6452 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
6453 * list, removes IOCBs off the list, set the status feild to
6454 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
6455 * the IOCB.
6456 **/
4999void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 6457void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
5000{ 6458{
5001 LIST_HEAD(completions); 6459 LIST_HEAD(completions);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a98d11bf3576..a1a70d9ffc2a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw.h" 32#include "lpfc_hw.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
35#include "lpfc_scsi.h" 36#include "lpfc_scsi.h"
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
88 &phba->sli.ring[phba->sli.fcp_ring], 89 &phba->sli.ring[phba->sli.fcp_ring],
89 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 90 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
90 } 91 }
91
92 /*
93 * A device is normally blocked for rediscovery and unblocked when
94 * devloss timeout happens. In case a vport is removed or driver
95 * unloaded before devloss timeout happens, we need to unblock here.
96 */
97 scsi_target_unblock(&rport->dev);
98 return;
99} 92}
100 93
101/* 94/*
@@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
215 return; 208 return;
216 } 209 }
217 210
218 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 211 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
212 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
213 "0284 Devloss timeout Ignored on "
214 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
215 "NPort x%x\n",
216 *name, *(name+1), *(name+2), *(name+3),
217 *(name+4), *(name+5), *(name+6), *(name+7),
218 ndlp->nlp_DID);
219 return; 219 return;
220 }
220 221
221 if (ndlp->nlp_type & NLP_FABRIC) { 222 if (ndlp->nlp_type & NLP_FABRIC) {
222 /* We will clean up these Nodes in linkup */ 223 /* We will clean up these Nodes in linkup */
@@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
237 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 238 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
238 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 239 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
239 } 240 }
240 if (vport->load_flag & FC_UNLOADING)
241 warn_on = 0;
242 241
243 if (warn_on) { 242 if (warn_on) {
244 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 243 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 276}
278 277
278/**
279 * lpfc_alloc_fast_evt: Allocates data structure for posting event.
280 * @phba: Pointer to hba context object.
281 *
282 * This function is called from the functions which need to post
283 * events from interrupt context. This function allocates data
284 * structure required for posting event. It also keeps track of
285 * number of events pending and prevent event storm when there are
286 * too many events.
287 **/
288struct lpfc_fast_path_event *
289lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
290 struct lpfc_fast_path_event *ret;
291
292 /* If there are lot of fast event do not exhaust memory due to this */
293 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
294 return NULL;
295
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC);
298 if (ret)
299 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
302 return ret;
303}
304
305/**
306 * lpfc_free_fast_evt: Frees event data structure.
307 * @phba: Pointer to hba context object.
308 * @evt: Event object which need to be freed.
309 *
310 * This function frees the data structure required for posting
311 * events.
312 **/
313void
314lpfc_free_fast_evt(struct lpfc_hba *phba,
315 struct lpfc_fast_path_event *evt) {
316
317 atomic_dec(&phba->fast_event_count);
318 kfree(evt);
319}
320
321/**
322 * lpfc_send_fastpath_evt: Posts events generated from fast path.
323 * @phba: Pointer to hba context object.
324 * @evtp: Event data structure.
325 *
326 * This function is called from worker thread, when the interrupt
327 * context need to post an event. This function posts the event
328 * to fc transport netlink interface.
329 **/
330static void
331lpfc_send_fastpath_evt(struct lpfc_hba *phba,
332 struct lpfc_work_evt *evtp)
333{
334 unsigned long evt_category, evt_sub_category;
335 struct lpfc_fast_path_event *fast_evt_data;
336 char *evt_data;
337 uint32_t evt_data_size;
338 struct Scsi_Host *shost;
339
340 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
341 work_evt);
342
343 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
344 evt_sub_category = (unsigned long) fast_evt_data->un.
345 fabric_evt.subcategory;
346 shost = lpfc_shost_from_vport(fast_evt_data->vport);
347 if (evt_category == FC_REG_FABRIC_EVENT) {
348 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
349 evt_data = (char *) &fast_evt_data->un.read_check_error;
350 evt_data_size = sizeof(fast_evt_data->un.
351 read_check_error);
352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
353 (evt_sub_category == IOSTAT_NPORT_BSY)) {
354 evt_data = (char *) &fast_evt_data->un.fabric_evt;
355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
356 } else {
357 lpfc_free_fast_evt(phba, fast_evt_data);
358 return;
359 }
360 } else if (evt_category == FC_REG_SCSI_EVENT) {
361 switch (evt_sub_category) {
362 case LPFC_EVENT_QFULL:
363 case LPFC_EVENT_DEVBSY:
364 evt_data = (char *) &fast_evt_data->un.scsi_evt;
365 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
366 break;
367 case LPFC_EVENT_CHECK_COND:
368 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
369 evt_data_size = sizeof(fast_evt_data->un.
370 check_cond_evt);
371 break;
372 case LPFC_EVENT_VARQUEDEPTH:
373 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
374 evt_data_size = sizeof(fast_evt_data->un.
375 queue_depth_evt);
376 break;
377 default:
378 lpfc_free_fast_evt(phba, fast_evt_data);
379 return;
380 }
381 } else {
382 lpfc_free_fast_evt(phba, fast_evt_data);
383 return;
384 }
385
386 fc_host_post_vendor_event(shost,
387 fc_get_event_number(),
388 evt_data_size,
389 evt_data,
390 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
391
392 lpfc_free_fast_evt(phba, fast_evt_data);
393 return;
394}
395
279static void 396static void
280lpfc_work_list_done(struct lpfc_hba *phba) 397lpfc_work_list_done(struct lpfc_hba *phba)
281{ 398{
@@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
347 lpfc_unblock_mgmt_io(phba); 464 lpfc_unblock_mgmt_io(phba);
348 complete((struct completion *)(evtp->evt_arg2)); 465 complete((struct completion *)(evtp->evt_arg2));
349 break; 466 break;
467 case LPFC_EVT_FASTPATH_MGMT_EVT:
468 lpfc_send_fastpath_evt(phba, evtp);
469 free_evt = 0;
470 break;
350 } 471 }
351 if (free_evt) 472 if (free_evt)
352 kfree(evtp); 473 kfree(evtp);
@@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
371 spin_unlock_irq(&phba->hbalock); 492 spin_unlock_irq(&phba->hbalock);
372 493
373 if (ha_copy & HA_ERATT) 494 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */
374 lpfc_handle_eratt(phba); 496 lpfc_handle_eratt(phba);
375 497
376 if (ha_copy & HA_MBATT) 498 if (ha_copy & HA_MBATT)
@@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba)
378 500
379 if (ha_copy & HA_LATT) 501 if (ha_copy & HA_LATT)
380 lpfc_handle_latt(phba); 502 lpfc_handle_latt(phba);
503
381 vports = lpfc_create_vport_work_array(phba); 504 vports = lpfc_create_vport_work_array(phba);
382 if (vports != NULL) 505 if (vports != NULL)
383 for(i = 0; i <= phba->max_vpi; i++) { 506 for(i = 0; i <= phba->max_vpi; i++) {
@@ -1013,14 +1136,10 @@ out:
1013} 1136}
1014 1137
1015static void 1138static void
1016lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 1139lpfc_enable_la(struct lpfc_hba *phba)
1017{ 1140{
1018 uint32_t control; 1141 uint32_t control;
1019 struct lpfc_sli *psli = &phba->sli; 1142 struct lpfc_sli *psli = &phba->sli;
1020
1021 lpfc_linkdown(phba);
1022
1023 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1024 spin_lock_irq(&phba->hbalock); 1143 spin_lock_irq(&phba->hbalock);
1025 psli->sli_flag |= LPFC_PROCESS_LA; 1144 psli->sli_flag |= LPFC_PROCESS_LA;
1026 control = readl(phba->HCregaddr); 1145 control = readl(phba->HCregaddr);
@@ -1030,6 +1149,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1030 spin_unlock_irq(&phba->hbalock); 1149 spin_unlock_irq(&phba->hbalock);
1031} 1150}
1032 1151
1152static void
1153lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1154{
1155 lpfc_linkdown(phba);
1156 lpfc_enable_la(phba);
1157 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1158}
1159
1160
1033/* 1161/*
1034 * This routine handles processing a READ_LA mailbox 1162 * This routine handles processing a READ_LA mailbox
1035 * command upon completion. It is setup in the LPFC_MBOXQ 1163 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1077,8 +1205,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1077 } 1205 }
1078 1206
1079 phba->fc_eventTag = la->eventTag; 1207 phba->fc_eventTag = la->eventTag;
1208 if (la->mm)
1209 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
1210 else
1211 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
1080 1212
1081 if (la->attType == AT_LINK_UP) { 1213 if (la->attType == AT_LINK_UP && (!la->mm)) {
1082 phba->fc_stat.LinkUp++; 1214 phba->fc_stat.LinkUp++;
1083 if (phba->link_flag & LS_LOOPBACK_MODE) { 1215 if (phba->link_flag & LS_LOOPBACK_MODE) {
1084 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1216 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1090,13 +1222,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1090 } else { 1222 } else {
1091 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1223 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1092 "1303 Link Up Event x%x received " 1224 "1303 Link Up Event x%x received "
1093 "Data: x%x x%x x%x x%x\n", 1225 "Data: x%x x%x x%x x%x x%x x%x %d\n",
1094 la->eventTag, phba->fc_eventTag, 1226 la->eventTag, phba->fc_eventTag,
1095 la->granted_AL_PA, la->UlnkSpeed, 1227 la->granted_AL_PA, la->UlnkSpeed,
1096 phba->alpa_map[0]); 1228 phba->alpa_map[0],
1229 la->mm, la->fa,
1230 phba->wait_4_mlo_maint_flg);
1097 } 1231 }
1098 lpfc_mbx_process_link_up(phba, la); 1232 lpfc_mbx_process_link_up(phba, la);
1099 } else { 1233 } else if (la->attType == AT_LINK_DOWN) {
1100 phba->fc_stat.LinkDown++; 1234 phba->fc_stat.LinkDown++;
1101 if (phba->link_flag & LS_LOOPBACK_MODE) { 1235 if (phba->link_flag & LS_LOOPBACK_MODE) {
1102 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1236 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1109,11 +1243,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1109 else { 1243 else {
1110 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1244 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1111 "1305 Link Down Event x%x received " 1245 "1305 Link Down Event x%x received "
1246 "Data: x%x x%x x%x x%x x%x\n",
1247 la->eventTag, phba->fc_eventTag,
1248 phba->pport->port_state, vport->fc_flag,
1249 la->mm, la->fa);
1250 }
1251 lpfc_mbx_issue_link_down(phba);
1252 }
1253 if (la->mm && la->attType == AT_LINK_UP) {
1254 if (phba->link_state != LPFC_LINK_DOWN) {
1255 phba->fc_stat.LinkDown++;
1256 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1257 "1312 Link Down Event x%x received "
1258 "Data: x%x x%x x%x\n",
1259 la->eventTag, phba->fc_eventTag,
1260 phba->pport->port_state, vport->fc_flag);
1261 lpfc_mbx_issue_link_down(phba);
1262 } else
1263 lpfc_enable_la(phba);
1264
1265 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1266 "1310 Menlo Maint Mode Link up Event x%x rcvd "
1112 "Data: x%x x%x x%x\n", 1267 "Data: x%x x%x x%x\n",
1113 la->eventTag, phba->fc_eventTag, 1268 la->eventTag, phba->fc_eventTag,
1114 phba->pport->port_state, vport->fc_flag); 1269 phba->pport->port_state, vport->fc_flag);
1270 /*
1271 * The cmnd that triggered this will be waiting for this
1272 * signal.
1273 */
1274 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
1275 if (phba->wait_4_mlo_maint_flg) {
1276 phba->wait_4_mlo_maint_flg = 0;
1277 wake_up_interruptible(&phba->wait_4_mlo_m_q);
1115 } 1278 }
1116 lpfc_mbx_issue_link_down(phba); 1279 }
1280
1281 if (la->fa) {
1282 if (la->mm)
1283 lpfc_issue_clear_la(phba, vport);
1284 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1285 "1311 fa %d\n", la->fa);
1117 } 1286 }
1118 1287
1119lpfc_mbx_cmpl_read_la_free_mbuf: 1288lpfc_mbx_cmpl_read_la_free_mbuf:
@@ -1177,7 +1346,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1177 scsi_host_put(shost); 1346 scsi_host_put(shost);
1178} 1347}
1179 1348
1180void 1349int
1181lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 1350lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1182{ 1351{
1183 struct lpfc_hba *phba = vport->phba; 1352 struct lpfc_hba *phba = vport->phba;
@@ -1186,7 +1355,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1186 1355
1187 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1188 if (!mbox) 1357 if (!mbox)
1189 return; 1358 return 1;
1190 1359
1191 lpfc_unreg_vpi(phba, vport->vpi, mbox); 1360 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1192 mbox->vport = vport; 1361 mbox->vport = vport;
@@ -1197,7 +1366,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1197 "1800 Could not issue unreg_vpi\n"); 1366 "1800 Could not issue unreg_vpi\n");
1198 mempool_free(mbox, phba->mbox_mem_pool); 1367 mempool_free(mbox, phba->mbox_mem_pool);
1199 vport->unreg_vpi_cmpl = VPORT_ERROR; 1368 vport->unreg_vpi_cmpl = VPORT_ERROR;
1369 return rc;
1200 } 1370 }
1371 return 0;
1201} 1372}
1202 1373
1203static void 1374static void
@@ -1553,6 +1724,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1553 */ 1724 */
1554 lpfc_register_remote_port(vport, ndlp); 1725 lpfc_register_remote_port(vport, ndlp);
1555 } 1726 }
1727 if ((new_state == NLP_STE_MAPPED_NODE) &&
1728 (vport->stat_data_enabled)) {
1729 /*
1730 * A new target is discovered, if there is no buffer for
1731 * statistical data collection allocate buffer.
1732 */
1733 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
1734 sizeof(struct lpfc_scsicmd_bkt),
1735 GFP_KERNEL);
1736
1737 if (!ndlp->lat_data)
1738 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
1739 "0286 lpfc_nlp_state_cleanup failed to "
1740 "allocate statistical data buffer DID "
1741 "0x%x\n", ndlp->nlp_DID);
1742 }
1556 /* 1743 /*
1557 * if we added to Mapped list, but the remote port 1744 * if we added to Mapped list, but the remote port
1558 * registration failed or assigned a target id outside 1745 * registration failed or assigned a target id outside
@@ -2786,7 +2973,7 @@ restart_disc:
2786 2973
2787 default: 2974 default:
2788 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2975 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2789 "0229 Unexpected discovery timeout, " 2976 "0273 Unexpected discovery timeout, "
2790 "vport State x%x\n", vport->port_state); 2977 "vport State x%x\n", vport->port_state);
2791 break; 2978 break;
2792 } 2979 }
@@ -2940,6 +3127,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2940 INIT_LIST_HEAD(&ndlp->nlp_listp); 3127 INIT_LIST_HEAD(&ndlp->nlp_listp);
2941 kref_init(&ndlp->kref); 3128 kref_init(&ndlp->kref);
2942 NLP_INT_NODE_ACT(ndlp); 3129 NLP_INT_NODE_ACT(ndlp);
3130 atomic_set(&ndlp->cmd_pending, 0);
3131 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2943 3132
2944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2945 "node init: did:x%x", 3134 "node init: did:x%x",
@@ -2979,8 +3168,10 @@ lpfc_nlp_release(struct kref *kref)
2979 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3168 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2980 3169
2981 /* free ndlp memory for final ndlp release */ 3170 /* free ndlp memory for final ndlp release */
2982 if (NLP_CHK_FREE_REQ(ndlp)) 3171 if (NLP_CHK_FREE_REQ(ndlp)) {
3172 kfree(ndlp->lat_data);
2983 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3173 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
3174 }
2984} 3175}
2985 3176
2986/* This routine bumps the reference count for a ndlp structure to ensure 3177/* This routine bumps the reference count for a ndlp structure to ensure
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7773b949aa7c..5de5dabbbee6 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1107,6 +1107,8 @@ typedef struct {
1107/* Start FireFly Register definitions */ 1107/* Start FireFly Register definitions */
1108#define PCI_VENDOR_ID_EMULEX 0x10df 1108#define PCI_VENDOR_ID_EMULEX 0x10df
1109#define PCI_DEVICE_ID_FIREFLY 0x1ae5 1109#define PCI_DEVICE_ID_FIREFLY 0x1ae5
1110#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
1111#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
1110#define PCI_DEVICE_ID_SAT_SMB 0xf011 1112#define PCI_DEVICE_ID_SAT_SMB 0xf011
1111#define PCI_DEVICE_ID_SAT_MID 0xf015 1113#define PCI_DEVICE_ID_SAT_MID 0xf015
1112#define PCI_DEVICE_ID_RFLY 0xf095 1114#define PCI_DEVICE_ID_RFLY 0xf095
@@ -1133,10 +1135,12 @@ typedef struct {
1133#define PCI_DEVICE_ID_LP11000S 0xfc10 1135#define PCI_DEVICE_ID_LP11000S 0xfc10
1134#define PCI_DEVICE_ID_LPE11000S 0xfc20 1136#define PCI_DEVICE_ID_LPE11000S 0xfc20
1135#define PCI_DEVICE_ID_SAT_S 0xfc40 1137#define PCI_DEVICE_ID_SAT_S 0xfc40
1138#define PCI_DEVICE_ID_PROTEUS_S 0xfc50
1136#define PCI_DEVICE_ID_HELIOS 0xfd00 1139#define PCI_DEVICE_ID_HELIOS 0xfd00
1137#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 1140#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
1138#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 1141#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
1139#define PCI_DEVICE_ID_ZEPHYR 0xfe00 1142#define PCI_DEVICE_ID_ZEPHYR 0xfe00
1143#define PCI_DEVICE_ID_HORNET 0xfe05
1140#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1144#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1141#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1145#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1142 1146
@@ -1154,6 +1158,7 @@ typedef struct {
1154#define ZEPHYR_JEDEC_ID 0x0577 1158#define ZEPHYR_JEDEC_ID 0x0577
1155#define VIPER_JEDEC_ID 0x4838 1159#define VIPER_JEDEC_ID 0x4838
1156#define SATURN_JEDEC_ID 0x1004 1160#define SATURN_JEDEC_ID 0x1004
1161#define HORNET_JDEC_ID 0x2057706D
1157 1162
1158#define JEDEC_ID_MASK 0x0FFFF000 1163#define JEDEC_ID_MASK 0x0FFFF000
1159#define JEDEC_ID_SHIFT 12 1164#define JEDEC_ID_SHIFT 12
@@ -1198,6 +1203,18 @@ typedef struct { /* FireFly BIU registers */
1198#define HA_RXATT 0x00000008 /* Bit 3 */ 1203#define HA_RXATT 0x00000008 /* Bit 3 */
1199#define HA_RXMASK 0x0000000f 1204#define HA_RXMASK 0x0000000f
1200 1205
1206#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
1207#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
1208#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
1209#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
1210
1211#define HA_R0_POS 3
1212#define HA_R1_POS 7
1213#define HA_R2_POS 11
1214#define HA_R3_POS 15
1215#define HA_LE_POS 29
1216#define HA_MB_POS 30
1217#define HA_ER_POS 31
1201/* Chip Attention Register */ 1218/* Chip Attention Register */
1202 1219
1203#define CA_REG_OFFSET 4 /* Byte offset from register base address */ 1220#define CA_REG_OFFSET 4 /* Byte offset from register base address */
@@ -1235,7 +1252,7 @@ typedef struct { /* FireFly BIU registers */
1235 1252
1236/* Host Control Register */ 1253/* Host Control Register */
1237 1254
1238#define HC_REG_OFFSET 12 /* Word offset from register base address */ 1255#define HC_REG_OFFSET 12 /* Byte offset from register base address */
1239 1256
1240#define HC_MBINT_ENA 0x00000001 /* Bit 0 */ 1257#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
1241#define HC_R0INT_ENA 0x00000002 /* Bit 1 */ 1258#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
@@ -1248,6 +1265,19 @@ typedef struct { /* FireFly BIU registers */
1248#define HC_LAINT_ENA 0x20000000 /* Bit 29 */ 1265#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
1249#define HC_ERINT_ENA 0x80000000 /* Bit 31 */ 1266#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
1250 1267
1268/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
1269#define MSIX_DFLT_ID 0
1270#define MSIX_RNG0_ID 0
1271#define MSIX_RNG1_ID 1
1272#define MSIX_RNG2_ID 2
1273#define MSIX_RNG3_ID 3
1274
1275#define MSIX_LINK_ID 4
1276#define MSIX_MBOX_ID 5
1277
1278#define MSIX_SPARE0_ID 6
1279#define MSIX_SPARE1_ID 7
1280
1251/* Mailbox Commands */ 1281/* Mailbox Commands */
1252#define MBX_SHUTDOWN 0x00 /* terminate testing */ 1282#define MBX_SHUTDOWN 0x00 /* terminate testing */
1253#define MBX_LOAD_SM 0x01 1283#define MBX_LOAD_SM 0x01
@@ -1285,10 +1315,14 @@ typedef struct { /* FireFly BIU registers */
1285#define MBX_KILL_BOARD 0x24 1315#define MBX_KILL_BOARD 0x24
1286#define MBX_CONFIG_FARP 0x25 1316#define MBX_CONFIG_FARP 0x25
1287#define MBX_BEACON 0x2A 1317#define MBX_BEACON 0x2A
1318#define MBX_CONFIG_MSI 0x30
1288#define MBX_HEARTBEAT 0x31 1319#define MBX_HEARTBEAT 0x31
1289#define MBX_WRITE_VPARMS 0x32 1320#define MBX_WRITE_VPARMS 0x32
1290#define MBX_ASYNCEVT_ENABLE 0x33 1321#define MBX_ASYNCEVT_ENABLE 0x33
1291 1322
1323#define MBX_PORT_CAPABILITIES 0x3B
1324#define MBX_PORT_IOV_CONTROL 0x3C
1325
1292#define MBX_CONFIG_HBQ 0x7C 1326#define MBX_CONFIG_HBQ 0x7C
1293#define MBX_LOAD_AREA 0x81 1327#define MBX_LOAD_AREA 0x81
1294#define MBX_RUN_BIU_DIAG64 0x84 1328#define MBX_RUN_BIU_DIAG64 0x84
@@ -1474,24 +1508,18 @@ struct ulp_bde64 { /* SLI-2 */
1474 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED 1508 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1475 VALUE !! */ 1509 VALUE !! */
1476#endif 1510#endif
1477 1511#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1478#define BUFF_USE_RSVD 0x01 /* bdeFlags */ 1512#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1479#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */ 1513#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1480#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */ 1514#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1481#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit 1515#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1482 buffer */ 1516#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1483#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit 1517#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1484 addr */
1485#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
1486#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
1487#define BUFF_TYPE_INVALID 0x80 /* "" "" */
1488 } f; 1518 } f;
1489 } tus; 1519 } tus;
1490 uint32_t addrLow; 1520 uint32_t addrLow;
1491 uint32_t addrHigh; 1521 uint32_t addrHigh;
1492}; 1522};
1493#define BDE64_SIZE_WORD 0
1494#define BPL64_SIZE_WORD 0x40
1495 1523
1496typedef struct ULP_BDL { /* SLI-2 */ 1524typedef struct ULP_BDL { /* SLI-2 */
1497#ifdef __BIG_ENDIAN_BITFIELD 1525#ifdef __BIG_ENDIAN_BITFIELD
@@ -2201,7 +2229,10 @@ typedef struct {
2201typedef struct { 2229typedef struct {
2202 uint32_t eventTag; /* Event tag */ 2230 uint32_t eventTag; /* Event tag */
2203#ifdef __BIG_ENDIAN_BITFIELD 2231#ifdef __BIG_ENDIAN_BITFIELD
2204 uint32_t rsvd1:22; 2232 uint32_t rsvd1:19;
2233 uint32_t fa:1;
2234 uint32_t mm:1; /* Menlo Maintenance mode enabled */
2235 uint32_t rx:1;
2205 uint32_t pb:1; 2236 uint32_t pb:1;
2206 uint32_t il:1; 2237 uint32_t il:1;
2207 uint32_t attType:8; 2238 uint32_t attType:8;
@@ -2209,7 +2240,10 @@ typedef struct {
2209 uint32_t attType:8; 2240 uint32_t attType:8;
2210 uint32_t il:1; 2241 uint32_t il:1;
2211 uint32_t pb:1; 2242 uint32_t pb:1;
2212 uint32_t rsvd1:22; 2243 uint32_t rx:1;
2244 uint32_t mm:1;
2245 uint32_t fa:1;
2246 uint32_t rsvd1:19;
2213#endif 2247#endif
2214 2248
2215#define AT_RESERVED 0x00 /* Reserved - attType */ 2249#define AT_RESERVED 0x00 /* Reserved - attType */
@@ -2230,6 +2264,7 @@ typedef struct {
2230 2264
2231#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ 2265#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
2232#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ 2266#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
2267#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
2233 2268
2234 union { 2269 union {
2235 struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer 2270 struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
@@ -2324,6 +2359,36 @@ typedef struct {
2324#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2359#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2325#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2360#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2326 2361
2362/* Structure for MB Command UPDATE_CFG (0x1B) */
2363
2364struct update_cfg_var {
2365#ifdef __BIG_ENDIAN_BITFIELD
2366 uint32_t rsvd2:16;
2367 uint32_t type:8;
2368 uint32_t rsvd:1;
2369 uint32_t ra:1;
2370 uint32_t co:1;
2371 uint32_t cv:1;
2372 uint32_t req:4;
2373 uint32_t entry_length:16;
2374 uint32_t region_id:16;
2375#else /* __LITTLE_ENDIAN_BITFIELD */
2376 uint32_t req:4;
2377 uint32_t cv:1;
2378 uint32_t co:1;
2379 uint32_t ra:1;
2380 uint32_t rsvd:1;
2381 uint32_t type:8;
2382 uint32_t rsvd2:16;
2383 uint32_t region_id:16;
2384 uint32_t entry_length:16;
2385#endif
2386
2387 uint32_t resp_info;
2388 uint32_t byte_cnt;
2389 uint32_t data_offset;
2390};
2391
2327struct hbq_mask { 2392struct hbq_mask {
2328#ifdef __BIG_ENDIAN_BITFIELD 2393#ifdef __BIG_ENDIAN_BITFIELD
2329 uint8_t tmatch; 2394 uint8_t tmatch;
@@ -2560,6 +2625,40 @@ typedef struct {
2560 2625
2561} CONFIG_PORT_VAR; 2626} CONFIG_PORT_VAR;
2562 2627
2628/* Structure for MB Command CONFIG_MSI (0x30) */
2629struct config_msi_var {
2630#ifdef __BIG_ENDIAN_BITFIELD
2631 uint32_t dfltMsgNum:8; /* Default message number */
2632 uint32_t rsvd1:11; /* Reserved */
2633 uint32_t NID:5; /* Number of secondary attention IDs */
2634 uint32_t rsvd2:5; /* Reserved */
2635 uint32_t dfltPresent:1; /* Default message number present */
2636 uint32_t addFlag:1; /* Add association flag */
2637 uint32_t reportFlag:1; /* Report association flag */
2638#else /* __LITTLE_ENDIAN_BITFIELD */
2639 uint32_t reportFlag:1; /* Report association flag */
2640 uint32_t addFlag:1; /* Add association flag */
2641 uint32_t dfltPresent:1; /* Default message number present */
2642 uint32_t rsvd2:5; /* Reserved */
2643 uint32_t NID:5; /* Number of secondary attention IDs */
2644 uint32_t rsvd1:11; /* Reserved */
2645 uint32_t dfltMsgNum:8; /* Default message number */
2646#endif
2647 uint32_t attentionConditions[2];
2648 uint8_t attentionId[16];
2649 uint8_t messageNumberByHA[64];
2650 uint8_t messageNumberByID[16];
2651 uint32_t autoClearHA[2];
2652#ifdef __BIG_ENDIAN_BITFIELD
2653 uint32_t rsvd3:16;
2654 uint32_t autoClearID:16;
2655#else /* __LITTLE_ENDIAN_BITFIELD */
2656 uint32_t autoClearID:16;
2657 uint32_t rsvd3:16;
2658#endif
2659 uint32_t rsvd4;
2660};
2661
2563/* SLI-2 Port Control Block */ 2662/* SLI-2 Port Control Block */
2564 2663
2565/* SLIM POINTER */ 2664/* SLIM POINTER */
@@ -2678,10 +2777,12 @@ typedef union {
2678 * NEW_FEATURE 2777 * NEW_FEATURE
2679 */ 2778 */
2680 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ 2779 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
2780 struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
2681 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 2781 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2682 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ 2782 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2683 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ 2783 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2684 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ 2784 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
2785 struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
2685} MAILVARIANTS; 2786} MAILVARIANTS;
2686 2787
2687/* 2788/*
@@ -2715,11 +2816,19 @@ struct sli3_pgp {
2715 uint32_t hbq_get[16]; 2816 uint32_t hbq_get[16];
2716}; 2817};
2717 2818
2718typedef union { 2819struct sli3_inb_pgp {
2719 struct sli2_desc s2; 2820 uint32_t ha_copy;
2720 struct sli3_desc s3; 2821 uint32_t counter;
2721 struct sli3_pgp s3_pgp; 2822 struct lpfc_pgp port[MAX_RINGS];
2722} SLI_VAR; 2823 uint32_t hbq_get[16];
2824};
2825
2826union sli_var {
2827 struct sli2_desc s2;
2828 struct sli3_desc s3;
2829 struct sli3_pgp s3_pgp;
2830 struct sli3_inb_pgp s3_inb_pgp;
2831};
2723 2832
2724typedef struct { 2833typedef struct {
2725#ifdef __BIG_ENDIAN_BITFIELD 2834#ifdef __BIG_ENDIAN_BITFIELD
@@ -2737,7 +2846,7 @@ typedef struct {
2737#endif 2846#endif
2738 2847
2739 MAILVARIANTS un; 2848 MAILVARIANTS un;
2740 SLI_VAR us; 2849 union sli_var us;
2741} MAILBOX_t; 2850} MAILBOX_t;
2742 2851
2743/* 2852/*
@@ -3105,6 +3214,27 @@ struct que_xri64cx_ext_fields {
3105 struct lpfc_hbq_entry buff[5]; 3214 struct lpfc_hbq_entry buff[5];
3106}; 3215};
3107 3216
3217#define LPFC_EXT_DATA_BDE_COUNT 3
3218struct fcp_irw_ext {
3219 uint32_t io_tag64_low;
3220 uint32_t io_tag64_high;
3221#ifdef __BIG_ENDIAN_BITFIELD
3222 uint8_t reserved1;
3223 uint8_t reserved2;
3224 uint8_t reserved3;
3225 uint8_t ebde_count;
3226#else /* __LITTLE_ENDIAN */
3227 uint8_t ebde_count;
3228 uint8_t reserved3;
3229 uint8_t reserved2;
3230 uint8_t reserved1;
3231#endif
3232 uint32_t reserved4;
3233 struct ulp_bde64 rbde; /* response bde */
3234 struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */
3235 uint8_t icd[32]; /* immediate command data (32 bytes) */
3236};
3237
3108typedef struct _IOCB { /* IOCB structure */ 3238typedef struct _IOCB { /* IOCB structure */
3109 union { 3239 union {
3110 GENERIC_RSP grsp; /* Generic response */ 3240 GENERIC_RSP grsp; /* Generic response */
@@ -3190,7 +3320,7 @@ typedef struct _IOCB { /* IOCB structure */
3190 3320
3191 /* words 8-31 used for que_xri_cx iocb */ 3321 /* words 8-31 used for que_xri_cx iocb */
3192 struct que_xri64cx_ext_fields que_xri64cx_ext_words; 3322 struct que_xri64cx_ext_fields que_xri64cx_ext_words;
3193 3323 struct fcp_irw_ext fcp_ext;
3194 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ 3324 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3195 } unsli3; 3325 } unsli3;
3196 3326
@@ -3292,3 +3422,10 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3292 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || 3422 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
3293 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); 3423 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
3294} 3424}
3425
3426#define MENLO_TRANSPORT_TYPE 0xfe
3427#define MENLO_CONTEXT 0
3428#define MENLO_PU 3
3429#define MENLO_TIMEOUT 30
3430#define SETVAR_MLOMNT 0x103107
3431#define SETVAR_MLORST 0x103007
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d51a2a4b43eb..909be3301bba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -36,6 +36,7 @@
36 36
37#include "lpfc_hw.h" 37#include "lpfc_hw.h"
38#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_nl.h"
39#include "lpfc_disc.h" 40#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
41#include "lpfc.h" 42#include "lpfc.h"
@@ -52,17 +53,20 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
52static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 53static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
53static DEFINE_IDR(lpfc_hba_index); 54static DEFINE_IDR(lpfc_hba_index);
54 55
55/************************************************************************/ 56/**
56/* */ 57 * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
57/* lpfc_config_port_prep */ 58 * @phba: pointer to lpfc hba data structure.
58/* This routine will do LPFC initialization prior to the */ 59 *
59/* CONFIG_PORT mailbox command. This will be initialized */ 60 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
60/* as a SLI layer callback routine. */ 61 * mailbox command. It retrieves the revision information from the HBA and
61/* This routine returns 0 on success or -ERESTART if it wants */ 62 * collects the Vital Product Data (VPD) about the HBA for preparing the
62/* the SLI layer to reset the HBA and try again. Any */ 63 * configuration of the HBA.
63/* other return value indicates an error. */ 64 *
64/* */ 65 * Return codes:
65/************************************************************************/ 66 * 0 - success.
67 * -ERESTART - requests the SLI layer to reset the HBA and try again.
68 * Any other value - indicates an error.
69 **/
66int 70int
67lpfc_config_port_prep(struct lpfc_hba *phba) 71lpfc_config_port_prep(struct lpfc_hba *phba)
68{ 72{
@@ -180,12 +184,9 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
180 sizeof (phba->RandomData)); 184 sizeof (phba->RandomData));
181 185
182 /* Get adapter VPD information */ 186 /* Get adapter VPD information */
183 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
184 if (!pmb->context2)
185 goto out_free_mbox;
186 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 187 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
187 if (!lpfc_vpd_data) 188 if (!lpfc_vpd_data)
188 goto out_free_context2; 189 goto out_free_mbox;
189 190
190 do { 191 do {
191 lpfc_dump_mem(phba, pmb, offset); 192 lpfc_dump_mem(phba, pmb, offset);
@@ -200,21 +201,29 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
200 } 201 }
201 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 202 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
202 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 203 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
203 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 204 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
205 lpfc_vpd_data + offset,
204 mb->un.varDmp.word_cnt); 206 mb->un.varDmp.word_cnt);
205 offset += mb->un.varDmp.word_cnt; 207 offset += mb->un.varDmp.word_cnt;
206 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 208 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
207 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 209 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
208 210
209 kfree(lpfc_vpd_data); 211 kfree(lpfc_vpd_data);
210out_free_context2:
211 kfree(pmb->context2);
212out_free_mbox: 212out_free_mbox:
213 mempool_free(pmb, phba->mbox_mem_pool); 213 mempool_free(pmb, phba->mbox_mem_pool);
214 return 0; 214 return 0;
215} 215}
216 216
217/* Completion handler for config async event mailbox command. */ 217/**
218 * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd.
219 * @phba: pointer to lpfc hba data structure.
220 * @pmboxq: pointer to the driver internal queue element for mailbox command.
221 *
222 * This is the completion handler for driver's configuring asynchronous event
223 * mailbox command to the device. If the mailbox command returns successfully,
224 * it will set internal async event support flag to 1; otherwise, it will
225 * set internal async event support flag to 0.
226 **/
218static void 227static void
219lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 228lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
220{ 229{
@@ -226,16 +235,19 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
226 return; 235 return;
227} 236}
228 237
229/************************************************************************/ 238/**
230/* */ 239 * lpfc_config_port_post: Perform lpfc initialization after config port.
231/* lpfc_config_port_post */ 240 * @phba: pointer to lpfc hba data structure.
232/* This routine will do LPFC initialization after the */ 241 *
233/* CONFIG_PORT mailbox command. This will be initialized */ 242 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
234/* as a SLI layer callback routine. */ 243 * command call. It performs all internal resource and state setups on the
235/* This routine returns 0 on success. Any other return value */ 244 * port: post IOCB buffers, enable appropriate host interrupt attentions,
236/* indicates an error. */ 245 * ELS ring timers, etc.
237/* */ 246 *
238/************************************************************************/ 247 * Return codes
248 * 0 - success.
249 * Any other value - error.
250 **/
239int 251int
240lpfc_config_port_post(struct lpfc_hba *phba) 252lpfc_config_port_post(struct lpfc_hba *phba)
241{ 253{
@@ -378,6 +390,29 @@ lpfc_config_port_post(struct lpfc_hba *phba)
378 if (phba->sli_rev != 3) 390 if (phba->sli_rev != 3)
379 lpfc_post_rcv_buf(phba); 391 lpfc_post_rcv_buf(phba);
380 392
393 /*
394 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
395 */
396 if (phba->intr_type == MSIX) {
397 rc = lpfc_config_msi(phba, pmb);
398 if (rc) {
399 mempool_free(pmb, phba->mbox_mem_pool);
400 return -EIO;
401 }
402 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
403 if (rc != MBX_SUCCESS) {
404 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
405 "0352 Config MSI mailbox command "
406 "failed, mbxCmd x%x, mbxStatus x%x\n",
407 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
408 mempool_free(pmb, phba->mbox_mem_pool);
409 return -EIO;
410 }
411 }
412
413 /* Initialize ERATT handling flag */
414 phba->hba_flag &= ~HBA_ERATT_HANDLED;
415
381 /* Enable appropriate host interrupts */ 416 /* Enable appropriate host interrupts */
382 spin_lock_irq(&phba->hbalock); 417 spin_lock_irq(&phba->hbalock);
383 status = readl(phba->HCregaddr); 418 status = readl(phba->HCregaddr);
@@ -393,26 +428,26 @@ lpfc_config_port_post(struct lpfc_hba *phba)
393 428
394 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 429 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
395 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 430 (phba->cfg_poll & DISABLE_FCP_RING_INT))
396 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 431 status &= ~(HC_R0INT_ENA);
397 432
398 writel(status, phba->HCregaddr); 433 writel(status, phba->HCregaddr);
399 readl(phba->HCregaddr); /* flush */ 434 readl(phba->HCregaddr); /* flush */
400 spin_unlock_irq(&phba->hbalock); 435 spin_unlock_irq(&phba->hbalock);
401 436
402 /* 437 /* Set up ring-0 (ELS) timer */
403 * Setup the ring 0 (els) timeout handler 438 timeout = phba->fc_ratov * 2;
404 */
405 timeout = phba->fc_ratov << 1;
406 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 439 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
440 /* Set up heart beat (HB) timer */
407 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 441 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
408 phba->hb_outstanding = 0; 442 phba->hb_outstanding = 0;
409 phba->last_completion_time = jiffies; 443 phba->last_completion_time = jiffies;
444 /* Set up error attention (ERATT) polling timer */
445 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
410 446
411 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 447 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
412 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 448 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
413 pmb->vport = vport;
414 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
415 lpfc_set_loopback_flag(phba); 449 lpfc_set_loopback_flag(phba);
450 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
416 if (rc != MBX_SUCCESS) { 451 if (rc != MBX_SUCCESS) {
417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
418 "0454 Adapter failed to init, mbxCmd x%x " 453 "0454 Adapter failed to init, mbxCmd x%x "
@@ -447,19 +482,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
447 rc); 482 rc);
448 mempool_free(pmb, phba->mbox_mem_pool); 483 mempool_free(pmb, phba->mbox_mem_pool);
449 } 484 }
450 return (0); 485 return 0;
451} 486}
452 487
453/************************************************************************/ 488/**
454/* */ 489 * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset.
455/* lpfc_hba_down_prep */ 490 * @phba: pointer to lpfc HBA data structure.
456/* This routine will do LPFC uninitialization before the */ 491 *
457/* HBA is reset when bringing down the SLI Layer. This will be */ 492 * This routine will do LPFC uninitialization before the HBA is reset when
458/* initialized as a SLI layer callback routine. */ 493 * bringing down the SLI Layer.
459/* This routine returns 0 on success. Any other return value */ 494 *
460/* indicates an error. */ 495 * Return codes
461/* */ 496 * 0 - success.
462/************************************************************************/ 497 * Any other value - error.
498 **/
463int 499int
464lpfc_hba_down_prep(struct lpfc_hba *phba) 500lpfc_hba_down_prep(struct lpfc_hba *phba)
465{ 501{
@@ -481,15 +517,17 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
481 return 0; 517 return 0;
482} 518}
483 519
484/************************************************************************/ 520/**
485/* */ 521 * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset.
486/* lpfc_hba_down_post */ 522 * @phba: pointer to lpfc HBA data structure.
487/* This routine will do uninitialization after the HBA is reset */ 523 *
488/* when bringing down the SLI Layer. */ 524 * This routine will do uninitialization after the HBA is reset when bring
489/* This routine returns 0 on success. Any other return value */ 525 * down the SLI Layer.
490/* indicates an error. */ 526 *
491/* */ 527 * Return codes
492/************************************************************************/ 528 * 0 - sucess.
529 * Any other value - error.
530 **/
493int 531int
494lpfc_hba_down_post(struct lpfc_hba *phba) 532lpfc_hba_down_post(struct lpfc_hba *phba)
495{ 533{
@@ -548,7 +586,18 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
548 return 0; 586 return 0;
549} 587}
550 588
551/* HBA heart beat timeout handler */ 589/**
590 * lpfc_hb_timeout: The HBA-timer timeout handler.
591 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
592 *
593 * This is the HBA-timer timeout handler registered to the lpfc driver. When
594 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
595 * work-port-events bitmap and the worker thread is notified. This timeout
596 * event will be used by the worker thread to invoke the actual timeout
597 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
598 * be performed in the timeout handler and the HBA timeout event bit shall
599 * be cleared by the worker thread after it has taken the event bitmap out.
600 **/
552static void 601static void
553lpfc_hb_timeout(unsigned long ptr) 602lpfc_hb_timeout(unsigned long ptr)
554{ 603{
@@ -557,17 +606,36 @@ lpfc_hb_timeout(unsigned long ptr)
557 unsigned long iflag; 606 unsigned long iflag;
558 607
559 phba = (struct lpfc_hba *)ptr; 608 phba = (struct lpfc_hba *)ptr;
609
610 /* Check for heart beat timeout conditions */
560 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 611 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
561 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 612 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
562 if (!tmo_posted) 613 if (!tmo_posted)
563 phba->pport->work_port_events |= WORKER_HB_TMO; 614 phba->pport->work_port_events |= WORKER_HB_TMO;
564 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 615 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
565 616
617 /* Tell the worker thread there is work to do */
566 if (!tmo_posted) 618 if (!tmo_posted)
567 lpfc_worker_wake_up(phba); 619 lpfc_worker_wake_up(phba);
568 return; 620 return;
569} 621}
570 622
623/**
624 * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function.
625 * @phba: pointer to lpfc hba data structure.
626 * @pmboxq: pointer to the driver internal queue element for mailbox command.
627 *
628 * This is the callback function to the lpfc heart-beat mailbox command.
629 * If configured, the lpfc driver issues the heart-beat mailbox command to
630 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
631 * heart-beat mailbox command is issued, the driver shall set up heart-beat
632 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
633 * heart-beat outstanding state. Once the mailbox command comes back and
634 * no error conditions detected, the heart-beat mailbox command timer is
635 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
636 * state is cleared for the next heart-beat. If the timer expired with the
637 * heart-beat outstanding state set, the driver will put the HBA offline.
638 **/
571static void 639static void
572lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 640lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
573{ 641{
@@ -577,6 +645,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
577 phba->hb_outstanding = 0; 645 phba->hb_outstanding = 0;
578 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 646 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
579 647
648 /* Check and reset heart-beat timer is necessary */
580 mempool_free(pmboxq, phba->mbox_mem_pool); 649 mempool_free(pmboxq, phba->mbox_mem_pool);
581 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 650 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
582 !(phba->link_state == LPFC_HBA_ERROR) && 651 !(phba->link_state == LPFC_HBA_ERROR) &&
@@ -586,6 +655,22 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
586 return; 655 return;
587} 656}
588 657
658/**
659 * lpfc_hb_timeout_handler: The HBA-timer timeout handler.
660 * @phba: pointer to lpfc hba data structure.
661 *
662 * This is the actual HBA-timer timeout handler to be invoked by the worker
663 * thread whenever the HBA timer fired and HBA-timeout event posted. This
664 * handler performs any periodic operations needed for the device. If such
665 * periodic event has already been attended to either in the interrupt handler
666 * or by processing slow-ring or fast-ring events within the HBA-timer
667 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
668 * the timer for the next timeout period. If lpfc heart-beat mailbox command
669 * is configured and there is no heart-beat mailbox command outstanding, a
670 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
671 * has been a heart-beat mailbox command outstanding, the HBA shall be put
672 * to offline.
673 **/
589void 674void
590lpfc_hb_timeout_handler(struct lpfc_hba *phba) 675lpfc_hb_timeout_handler(struct lpfc_hba *phba)
591{ 676{
@@ -684,6 +769,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
684 } 769 }
685} 770}
686 771
772/**
773 * lpfc_offline_eratt: Bring lpfc offline on hardware error attention.
774 * @phba: pointer to lpfc hba data structure.
775 *
776 * This routine is called to bring the HBA offline when HBA hardware error
777 * other than Port Error 6 has been detected.
778 **/
687static void 779static void
688lpfc_offline_eratt(struct lpfc_hba *phba) 780lpfc_offline_eratt(struct lpfc_hba *phba)
689{ 781{
@@ -704,14 +796,16 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
704 return; 796 return;
705} 797}
706 798
707/************************************************************************/ 799/**
708/* */ 800 * lpfc_handle_eratt: The HBA hardware error handler.
709/* lpfc_handle_eratt */ 801 * @phba: pointer to lpfc hba data structure.
710/* This routine will handle processing a Host Attention */ 802 *
711/* Error Status event. This will be initialized */ 803 * This routine is invoked to handle the following HBA hardware error
712/* as a SLI layer callback routine. */ 804 * conditions:
713/* */ 805 * 1 - HBA error attention interrupt
714/************************************************************************/ 806 * 2 - DMA ring index out of range
807 * 3 - Mailbox command came back as unknown
808 **/
715void 809void
716lpfc_handle_eratt(struct lpfc_hba *phba) 810lpfc_handle_eratt(struct lpfc_hba *phba)
717{ 811{
@@ -722,6 +816,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
722 unsigned long temperature; 816 unsigned long temperature;
723 struct temp_event temp_event_data; 817 struct temp_event temp_event_data;
724 struct Scsi_Host *shost; 818 struct Scsi_Host *shost;
819 struct lpfc_board_event_header board_event;
725 820
726 /* If the pci channel is offline, ignore possible errors, 821 /* If the pci channel is offline, ignore possible errors,
727 * since we cannot communicate with the pci card anyway. */ 822 * since we cannot communicate with the pci card anyway. */
@@ -731,6 +826,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
731 if (!phba->cfg_enable_hba_reset) 826 if (!phba->cfg_enable_hba_reset)
732 return; 827 return;
733 828
829 /* Send an internal error event to mgmt application */
830 board_event.event_type = FC_REG_BOARD_EVENT;
831 board_event.subcategory = LPFC_EVENT_PORTINTERR;
832 shost = lpfc_shost_from_vport(phba->pport);
833 fc_host_post_vendor_event(shost, fc_get_event_number(),
834 sizeof(board_event),
835 (char *) &board_event,
836 SCSI_NL_VID_TYPE_PCI
837 | PCI_VENDOR_ID_EMULEX);
838
734 if (phba->work_hs & HS_FFER6) { 839 if (phba->work_hs & HS_FFER6) {
735 /* Re-establishing Link */ 840 /* Re-establishing Link */
736 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 841 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -771,7 +876,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
771 temp_event_data.data = (uint32_t)temperature; 876 temp_event_data.data = (uint32_t)temperature;
772 877
773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
774 "0459 Adapter maximum temperature exceeded " 879 "0406 Adapter maximum temperature exceeded "
775 "(%ld), taking this port offline " 880 "(%ld), taking this port offline "
776 "Data: x%x x%x x%x\n", 881 "Data: x%x x%x x%x\n",
777 temperature, phba->work_hs, 882 temperature, phba->work_hs,
@@ -791,8 +896,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
791 896
792 } else { 897 } else {
793 /* The if clause above forces this code path when the status 898 /* The if clause above forces this code path when the status
794 * failure is a value other than FFER6. Do not call the offline 899 * failure is a value other than FFER6. Do not call the offline
795 * twice. This is the adapter hardware error path. 900 * twice. This is the adapter hardware error path.
796 */ 901 */
797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
798 "0457 Adapter Hardware Error " 903 "0457 Adapter Hardware Error "
@@ -808,16 +913,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
808 913
809 lpfc_offline_eratt(phba); 914 lpfc_offline_eratt(phba);
810 } 915 }
916 return;
811} 917}
812 918
813/************************************************************************/ 919/**
814/* */ 920 * lpfc_handle_latt: The HBA link event handler.
815/* lpfc_handle_latt */ 921 * @phba: pointer to lpfc hba data structure.
816/* This routine will handle processing a Host Attention */ 922 *
817/* Link Status event. This will be initialized */ 923 * This routine is invoked from the worker thread to handle a HBA host
818/* as a SLI layer callback routine. */ 924 * attention link event.
819/* */ 925 **/
820/************************************************************************/
821void 926void
822lpfc_handle_latt(struct lpfc_hba *phba) 927lpfc_handle_latt(struct lpfc_hba *phba)
823{ 928{
@@ -898,12 +1003,20 @@ lpfc_handle_latt_err_exit:
898 return; 1003 return;
899} 1004}
900 1005
901/************************************************************************/ 1006/**
902/* */ 1007 * lpfc_parse_vpd: Parse VPD (Vital Product Data).
903/* lpfc_parse_vpd */ 1008 * @phba: pointer to lpfc hba data structure.
904/* This routine will parse the VPD data */ 1009 * @vpd: pointer to the vital product data.
905/* */ 1010 * @len: length of the vital product data in bytes.
906/************************************************************************/ 1011 *
1012 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1013 * an array of characters. In this routine, the ModelName, ProgramType, and
1014 * ModelDesc, etc. fields of the phba data structure will be populated.
1015 *
1016 * Return codes
1017 * 0 - pointer to the VPD passed in is NULL
1018 * 1 - success
1019 **/
907static int 1020static int
908lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1021lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
909{ 1022{
@@ -1040,12 +1153,25 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1040 return(1); 1153 return(1);
1041} 1154}
1042 1155
1156/**
1157 * lpfc_get_hba_model_desc: Retrieve HBA device model name and description.
1158 * @phba: pointer to lpfc hba data structure.
1159 * @mdp: pointer to the data structure to hold the derived model name.
1160 * @descp: pointer to the data structure to hold the derived description.
1161 *
1162 * This routine retrieves HBA's description based on its registered PCI device
1163 * ID. The @descp passed into this function points to an array of 256 chars. It
1164 * shall be returned with the model name, maximum speed, and the host bus type.
1165 * The @mdp passed into this function points to an array of 80 chars. When the
1166 * function returns, the @mdp will be filled with the model name.
1167 **/
1043static void 1168static void
1044lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1169lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1045{ 1170{
1046 lpfc_vpd_t *vp; 1171 lpfc_vpd_t *vp;
1047 uint16_t dev_id = phba->pcidev->device; 1172 uint16_t dev_id = phba->pcidev->device;
1048 int max_speed; 1173 int max_speed;
1174 int GE = 0;
1049 struct { 1175 struct {
1050 char * name; 1176 char * name;
1051 int max_speed; 1177 int max_speed;
@@ -1177,6 +1303,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1177 case PCI_DEVICE_ID_SAT_S: 1303 case PCI_DEVICE_ID_SAT_S:
1178 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1304 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1179 break; 1305 break;
1306 case PCI_DEVICE_ID_HORNET:
1307 m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1308 GE = 1;
1309 break;
1310 case PCI_DEVICE_ID_PROTEUS_VF:
1311 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1312 break;
1313 case PCI_DEVICE_ID_PROTEUS_PF:
1314 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1315 break;
1316 case PCI_DEVICE_ID_PROTEUS_S:
1317 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1318 break;
1180 default: 1319 default:
1181 m = (typeof(m)){ NULL }; 1320 m = (typeof(m)){ NULL };
1182 break; 1321 break;
@@ -1186,18 +1325,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1186 snprintf(mdp, 79,"%s", m.name); 1325 snprintf(mdp, 79,"%s", m.name);
1187 if (descp && descp[0] == '\0') 1326 if (descp && descp[0] == '\0')
1188 snprintf(descp, 255, 1327 snprintf(descp, 255,
1189 "Emulex %s %dGb %s Fibre Channel Adapter", 1328 "Emulex %s %d%s %s %s",
1190 m.name, m.max_speed, m.bus); 1329 m.name, m.max_speed,
1330 (GE) ? "GE" : "Gb",
1331 m.bus,
1332 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
1191} 1333}
1192 1334
1193/**************************************************/ 1335/**
1194/* lpfc_post_buffer */ 1336 * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring.
1195/* */ 1337 * @phba: pointer to lpfc hba data structure.
1196/* This routine will post count buffers to the */ 1338 * @pring: pointer to a IOCB ring.
1197/* ring with the QUE_RING_BUF_CN command. This */ 1339 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1198/* allows 3 buffers / command to be posted. */ 1340 *
1199/* Returns the number of buffers NOT posted. */ 1341 * This routine posts a given number of IOCBs with the associated DMA buffer
1200/**************************************************/ 1342 * descriptors specified by the cnt argument to the given IOCB ring.
1343 *
1344 * Return codes
1345 * The number of IOCBs NOT able to be posted to the IOCB ring.
1346 **/
1201int 1347int
1202lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1348lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1203{ 1349{
@@ -1287,12 +1433,17 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1287 return 0; 1433 return 0;
1288} 1434}
1289 1435
1290/************************************************************************/ 1436/**
1291/* */ 1437 * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring.
1292/* lpfc_post_rcv_buf */ 1438 * @phba: pointer to lpfc hba data structure.
1293/* This routine post initial rcv buffers to the configured rings */ 1439 *
1294/* */ 1440 * This routine posts initial receive IOCB buffers to the ELS ring. The
1295/************************************************************************/ 1441 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1442 * set to 64 IOCBs.
1443 *
1444 * Return codes
1445 * 0 - success (currently always success)
1446 **/
1296static int 1447static int
1297lpfc_post_rcv_buf(struct lpfc_hba *phba) 1448lpfc_post_rcv_buf(struct lpfc_hba *phba)
1298{ 1449{
@@ -1307,11 +1458,13 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
1307 1458
1308#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1459#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1309 1460
1310/************************************************************************/ 1461/**
1311/* */ 1462 * lpfc_sha_init: Set up initial array of hash table entries.
1312/* lpfc_sha_init */ 1463 * @HashResultPointer: pointer to an array as hash table.
1313/* */ 1464 *
1314/************************************************************************/ 1465 * This routine sets up the initial values to the array of hash table entries
1466 * for the LC HBAs.
1467 **/
1315static void 1468static void
1316lpfc_sha_init(uint32_t * HashResultPointer) 1469lpfc_sha_init(uint32_t * HashResultPointer)
1317{ 1470{
@@ -1322,11 +1475,16 @@ lpfc_sha_init(uint32_t * HashResultPointer)
1322 HashResultPointer[4] = 0xC3D2E1F0; 1475 HashResultPointer[4] = 0xC3D2E1F0;
1323} 1476}
1324 1477
1325/************************************************************************/ 1478/**
1326/* */ 1479 * lpfc_sha_iterate: Iterate initial hash table with the working hash table.
1327/* lpfc_sha_iterate */ 1480 * @HashResultPointer: pointer to an initial/result hash table.
1328/* */ 1481 * @HashWorkingPointer: pointer to an working hash table.
1329/************************************************************************/ 1482 *
1483 * This routine iterates an initial hash table pointed by @HashResultPointer
1484 * with the values from the working hash table pointeed by @HashWorkingPointer.
1485 * The results are putting back to the initial hash table, returned through
1486 * the @HashResultPointer as the result hash table.
1487 **/
1330static void 1488static void
1331lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1489lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1332{ 1490{
@@ -1374,22 +1532,29 @@ lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1374 1532
1375} 1533}
1376 1534
1377/************************************************************************/ 1535/**
1378/* */ 1536 * lpfc_challenge_key: Create challenge key based on WWPN of the HBA.
1379/* lpfc_challenge_key */ 1537 * @RandomChallenge: pointer to the entry of host challenge random number array.
1380/* */ 1538 * @HashWorking: pointer to the entry of the working hash array.
1381/************************************************************************/ 1539 *
1540 * This routine calculates the working hash array referred by @HashWorking
1541 * from the challenge random numbers associated with the host, referred by
1542 * @RandomChallenge. The result is put into the entry of the working hash
1543 * array and returned by reference through @HashWorking.
1544 **/
1382static void 1545static void
1383lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1546lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1384{ 1547{
1385 *HashWorking = (*RandomChallenge ^ *HashWorking); 1548 *HashWorking = (*RandomChallenge ^ *HashWorking);
1386} 1549}
1387 1550
1388/************************************************************************/ 1551/**
1389/* */ 1552 * lpfc_hba_init: Perform special handling for LC HBA initialization.
1390/* lpfc_hba_init */ 1553 * @phba: pointer to lpfc hba data structure.
1391/* */ 1554 * @hbainit: pointer to an array of unsigned 32-bit integers.
1392/************************************************************************/ 1555 *
1556 * This routine performs the special handling for LC HBA initialization.
1557 **/
1393void 1558void
1394lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1559lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1395{ 1560{
@@ -1412,6 +1577,15 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1412 kfree(HashWorking); 1577 kfree(HashWorking);
1413} 1578}
1414 1579
1580/**
1581 * lpfc_cleanup: Performs vport cleanups before deleting a vport.
1582 * @vport: pointer to a virtual N_Port data structure.
1583 *
1584 * This routine performs the necessary cleanups before deleting the @vport.
1585 * It invokes the discovery state machine to perform necessary state
1586 * transitions and to release the ndlps associated with the @vport. Note,
1587 * the physical port is treated as @vport 0.
1588 **/
1415void 1589void
1416lpfc_cleanup(struct lpfc_vport *vport) 1590lpfc_cleanup(struct lpfc_vport *vport)
1417{ 1591{
@@ -1459,14 +1633,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1459 lpfc_disc_state_machine(vport, ndlp, NULL, 1633 lpfc_disc_state_machine(vport, ndlp, NULL,
1460 NLP_EVT_DEVICE_RM); 1634 NLP_EVT_DEVICE_RM);
1461 1635
1462 /* nlp_type zero is not defined, nlp_flag zero also not defined,
1463 * nlp_state is unused, this happens when
1464 * an initiator has logged
1465 * into us so cleanup this ndlp.
1466 */
1467 if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
1468 (ndlp->nlp_state == 0))
1469 lpfc_nlp_put(ndlp);
1470 } 1636 }
1471 1637
1472 /* At this point, ALL ndlp's should be gone 1638 /* At this point, ALL ndlp's should be gone
@@ -1482,7 +1648,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
1482 &vport->fc_nodes, nlp_listp) { 1648 &vport->fc_nodes, nlp_listp) {
1483 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1649 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1484 LOG_NODE, 1650 LOG_NODE,
1485 "0282: did:x%x ndlp:x%p " 1651 "0282 did:x%x ndlp:x%p "
1486 "usgmap:x%x refcnt:%d\n", 1652 "usgmap:x%x refcnt:%d\n",
1487 ndlp->nlp_DID, (void *)ndlp, 1653 ndlp->nlp_DID, (void *)ndlp,
1488 ndlp->nlp_usg_map, 1654 ndlp->nlp_usg_map,
@@ -1498,6 +1664,14 @@ lpfc_cleanup(struct lpfc_vport *vport)
1498 return; 1664 return;
1499} 1665}
1500 1666
1667/**
1668 * lpfc_stop_vport_timers: Stop all the timers associated with a vport.
1669 * @vport: pointer to a virtual N_Port data structure.
1670 *
1671 * This routine stops all the timers associated with a @vport. This function
1672 * is invoked before disabling or deleting a @vport. Note that the physical
1673 * port is treated as @vport 0.
1674 **/
1501void 1675void
1502lpfc_stop_vport_timers(struct lpfc_vport *vport) 1676lpfc_stop_vport_timers(struct lpfc_vport *vport)
1503{ 1677{
@@ -1507,6 +1681,13 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1507 return; 1681 return;
1508} 1682}
1509 1683
1684/**
1685 * lpfc_stop_phba_timers: Stop all the timers associated with an HBA.
1686 * @phba: pointer to lpfc hba data structure.
1687 *
1688 * This routine stops all the timers associated with a HBA. This function is
1689 * invoked before either putting a HBA offline or unloading the driver.
1690 **/
1510static void 1691static void
1511lpfc_stop_phba_timers(struct lpfc_hba *phba) 1692lpfc_stop_phba_timers(struct lpfc_hba *phba)
1512{ 1693{
@@ -1516,9 +1697,20 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
1516 del_timer_sync(&phba->fabric_block_timer); 1697 del_timer_sync(&phba->fabric_block_timer);
1517 phba->hb_outstanding = 0; 1698 phba->hb_outstanding = 0;
1518 del_timer_sync(&phba->hb_tmofunc); 1699 del_timer_sync(&phba->hb_tmofunc);
1700 del_timer_sync(&phba->eratt_poll);
1519 return; 1701 return;
1520} 1702}
1521 1703
1704/**
1705 * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked.
1706 * @phba: pointer to lpfc hba data structure.
1707 *
1708 * This routine marks a HBA's management interface as blocked. Once the HBA's
1709 * management interface is marked as blocked, all the user space access to
1710 * the HBA, whether they are from sysfs interface or libdfc interface will
1711 * all be blocked. The HBA is set to block the management interface when the
1712 * driver prepares the HBA interface for online or offline.
1713 **/
1522static void 1714static void
1523lpfc_block_mgmt_io(struct lpfc_hba * phba) 1715lpfc_block_mgmt_io(struct lpfc_hba * phba)
1524{ 1716{
@@ -1529,6 +1721,18 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
1529 spin_unlock_irqrestore(&phba->hbalock, iflag); 1721 spin_unlock_irqrestore(&phba->hbalock, iflag);
1530} 1722}
1531 1723
1724/**
1725 * lpfc_online: Initialize and bring a HBA online.
1726 * @phba: pointer to lpfc hba data structure.
1727 *
1728 * This routine initializes the HBA and brings a HBA online. During this
1729 * process, the management interface is blocked to prevent user space access
1730 * to the HBA interfering with the driver initialization.
1731 *
1732 * Return codes
1733 * 0 - successful
1734 * 1 - failed
1735 **/
1532int 1736int
1533lpfc_online(struct lpfc_hba *phba) 1737lpfc_online(struct lpfc_hba *phba)
1534{ 1738{
@@ -1574,6 +1778,17 @@ lpfc_online(struct lpfc_hba *phba)
1574 return 0; 1778 return 0;
1575} 1779}
1576 1780
1781/**
1782 * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked.
1783 * @phba: pointer to lpfc hba data structure.
1784 *
1785 * This routine marks a HBA's management interface as not blocked. Once the
1786 * HBA's management interface is marked as not blocked, all the user space
1787 * access to the HBA, whether they are from sysfs interface or libdfc
1788 * interface will be allowed. The HBA is set to block the management interface
1789 * when the driver prepares the HBA interface for online or offline and then
1790 * set to unblock the management interface afterwards.
1791 **/
1577void 1792void
1578lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1793lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1579{ 1794{
@@ -1584,6 +1799,14 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1584 spin_unlock_irqrestore(&phba->hbalock, iflag); 1799 spin_unlock_irqrestore(&phba->hbalock, iflag);
1585} 1800}
1586 1801
1802/**
1803 * lpfc_offline_prep: Prepare a HBA to be brought offline.
1804 * @phba: pointer to lpfc hba data structure.
1805 *
1806 * This routine is invoked to prepare a HBA to be brought offline. It performs
1807 * unregistration login to all the nodes on all vports and flushes the mailbox
1808 * queue to make it ready to be brought offline.
1809 **/
1587void 1810void
1588lpfc_offline_prep(struct lpfc_hba * phba) 1811lpfc_offline_prep(struct lpfc_hba * phba)
1589{ 1812{
@@ -1633,6 +1856,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1633 lpfc_sli_flush_mbox_queue(phba); 1856 lpfc_sli_flush_mbox_queue(phba);
1634} 1857}
1635 1858
1859/**
1860 * lpfc_offline: Bring a HBA offline.
1861 * @phba: pointer to lpfc hba data structure.
1862 *
1863 * This routine actually brings a HBA offline. It stops all the timers
1864 * associated with the HBA, brings down the SLI layer, and eventually
1865 * marks the HBA as in offline state for the upper layer protocol.
1866 **/
1636void 1867void
1637lpfc_offline(struct lpfc_hba *phba) 1868lpfc_offline(struct lpfc_hba *phba)
1638{ 1869{
@@ -1670,12 +1901,17 @@ lpfc_offline(struct lpfc_hba *phba)
1670 lpfc_destroy_vport_work_array(phba, vports); 1901 lpfc_destroy_vport_work_array(phba, vports);
1671} 1902}
1672 1903
1673/****************************************************************************** 1904/**
1674* Function name: lpfc_scsi_free 1905 * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists.
1675* 1906 * @phba: pointer to lpfc hba data structure.
1676* Description: Called from lpfc_pci_remove_one free internal driver resources 1907 *
1677* 1908 * This routine is to free all the SCSI buffers and IOCBs from the driver
1678******************************************************************************/ 1909 * list back to kernel. It is called from lpfc_pci_remove_one to free
1910 * the internal resources before the device is removed from the system.
1911 *
1912 * Return codes
1913 * 0 - successful (for now, it always returns 0)
1914 **/
1679static int 1915static int
1680lpfc_scsi_free(struct lpfc_hba *phba) 1916lpfc_scsi_free(struct lpfc_hba *phba)
1681{ 1917{
@@ -1704,6 +1940,22 @@ lpfc_scsi_free(struct lpfc_hba *phba)
1704 return 0; 1940 return 0;
1705} 1941}
1706 1942
1943/**
1944 * lpfc_create_port: Create an FC port.
1945 * @phba: pointer to lpfc hba data structure.
1946 * @instance: a unique integer ID to this FC port.
1947 * @dev: pointer to the device data structure.
1948 *
1949 * This routine creates a FC port for the upper layer protocol. The FC port
1950 * can be created on top of either a physical port or a virtual port provided
1951 * by the HBA. This routine also allocates a SCSI host data structure (shost)
1952 * and associates the FC port created before adding the shost into the SCSI
1953 * layer.
1954 *
1955 * Return codes
1956 * @vport - pointer to the virtual N_Port data structure.
1957 * NULL - port create failed.
1958 **/
1707struct lpfc_vport * 1959struct lpfc_vport *
1708lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 1960lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
1709{ 1961{
@@ -1777,6 +2029,13 @@ out:
1777 return NULL; 2029 return NULL;
1778} 2030}
1779 2031
2032/**
2033 * destroy_port: Destroy an FC port.
2034 * @vport: pointer to an lpfc virtual N_Port data structure.
2035 *
2036 * This routine destroys a FC port from the upper layer protocol. All the
2037 * resources associated with the port are released.
2038 **/
1780void 2039void
1781destroy_port(struct lpfc_vport *vport) 2040destroy_port(struct lpfc_vport *vport)
1782{ 2041{
@@ -1797,6 +2056,16 @@ destroy_port(struct lpfc_vport *vport)
1797 return; 2056 return;
1798} 2057}
1799 2058
2059/**
2060 * lpfc_get_instance: Get a unique integer ID.
2061 *
2062 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2063 * uses the kernel idr facility to perform the task.
2064 *
2065 * Return codes:
2066 * instance - a unique integer ID allocated as the new instance.
2067 * -1 - lpfc get instance failed.
2068 **/
1800int 2069int
1801lpfc_get_instance(void) 2070lpfc_get_instance(void)
1802{ 2071{
@@ -1810,11 +2079,21 @@ lpfc_get_instance(void)
1810 return instance; 2079 return instance;
1811} 2080}
1812 2081
1813/* 2082/**
1814 * Note: there is no scan_start function as adapter initialization 2083 * lpfc_scan_finished: method for SCSI layer to detect whether scan is done.
1815 * will have asynchronously kicked off the link initialization. 2084 * @shost: pointer to SCSI host data structure.
1816 */ 2085 * @time: elapsed time of the scan in jiffies.
1817 2086 *
2087 * This routine is called by the SCSI layer with a SCSI host to determine
2088 * whether the scan host is finished.
2089 *
2090 * Note: there is no scan_start function as adapter initialization will have
2091 * asynchronously kicked off the link initialization.
2092 *
2093 * Return codes
2094 * 0 - SCSI host scan is not over yet.
2095 * 1 - SCSI host scan is over.
2096 **/
1818int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2097int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1819{ 2098{
1820 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2099 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1858,6 +2137,13 @@ finished:
1858 return stat; 2137 return stat;
1859} 2138}
1860 2139
2140/**
2141 * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port.
2142 * @shost: pointer to SCSI host data structure.
2143 *
2144 * This routine initializes a given SCSI host attributes on a FC port. The
2145 * SCSI host can be either on top of a physical port or a virtual port.
2146 **/
1861void lpfc_host_attrib_init(struct Scsi_Host *shost) 2147void lpfc_host_attrib_init(struct Scsi_Host *shost)
1862{ 2148{
1863 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2149 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1906,42 +2192,157 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
1906 spin_unlock_irq(shost->host_lock); 2192 spin_unlock_irq(shost->host_lock);
1907} 2193}
1908 2194
2195/**
2196 * lpfc_enable_msix: Enable MSI-X interrupt mode.
2197 * @phba: pointer to lpfc hba data structure.
2198 *
2199 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
2200 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
2201 * pci_enable_msix(), once invoked, enables either all or nothing, depending
2202 * on the current availability of PCI vector resources. The device driver is
2203 * responsible for calling the individual request_irq() to register each MSI-X
2204 * vector with a interrupt handler, which is done in this function. Note that
2205 * later when device is unloading, the driver should always call free_irq()
2206 * on all MSI-X vectors it has done request_irq() on before calling
2207 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
2208 * will be left with MSI-X enabled and leaks its vectors.
2209 *
2210 * Return codes
2211 * 0 - sucessful
2212 * other values - error
2213 **/
1909static int 2214static int
1910lpfc_enable_msix(struct lpfc_hba *phba) 2215lpfc_enable_msix(struct lpfc_hba *phba)
1911{ 2216{
1912 int error; 2217 int rc, i;
2218 LPFC_MBOXQ_t *pmb;
1913 2219
1914 phba->msix_entries[0].entry = 0; 2220 /* Set up MSI-X multi-message vectors */
1915 phba->msix_entries[0].vector = 0; 2221 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2222 phba->msix_entries[i].entry = i;
1916 2223
1917 error = pci_enable_msix(phba->pcidev, phba->msix_entries, 2224 /* Configure MSI-X capability structure */
2225 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
1918 ARRAY_SIZE(phba->msix_entries)); 2226 ARRAY_SIZE(phba->msix_entries));
1919 if (error) { 2227 if (rc) {
1920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1921 "0420 Enable MSI-X failed (%d), continuing " 2229 "0420 Enable MSI-X failed (%d), continuing "
1922 "with MSI\n", error); 2230 "with MSI\n", rc);
1923 pci_disable_msix(phba->pcidev); 2231 goto msi_fail_out;
1924 return error; 2232 } else
2233 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2234 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2235 "0477 MSI-X entry[%d]: vector=x%x "
2236 "message=%d\n", i,
2237 phba->msix_entries[i].vector,
2238 phba->msix_entries[i].entry);
2239 /*
2240 * Assign MSI-X vectors to interrupt handlers
2241 */
2242
2243 /* vector-0 is associated to slow-path handler */
2244 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
2245 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
2246 if (rc) {
2247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2248 "0421 MSI-X slow-path request_irq failed "
2249 "(%d), continuing with MSI\n", rc);
2250 goto msi_fail_out;
1925 } 2251 }
1926 2252
1927 error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, 2253 /* vector-1 is associated to fast-path handler */
1928 LPFC_DRIVER_NAME, phba); 2254 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
1929 if (error) { 2255 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
2256
2257 if (rc) {
1930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1931 "0421 MSI-X request_irq failed (%d), " 2259 "0429 MSI-X fast-path request_irq failed "
1932 "continuing with MSI\n", error); 2260 "(%d), continuing with MSI\n", rc);
1933 pci_disable_msix(phba->pcidev); 2261 goto irq_fail_out;
1934 } 2262 }
1935 return error; 2263
2264 /*
2265 * Configure HBA MSI-X attention conditions to messages
2266 */
2267 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268
2269 if (!pmb) {
2270 rc = -ENOMEM;
2271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2272 "0474 Unable to allocate memory for issuing "
2273 "MBOX_CONFIG_MSI command\n");
2274 goto mem_fail_out;
2275 }
2276 rc = lpfc_config_msi(phba, pmb);
2277 if (rc)
2278 goto mbx_fail_out;
2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2280 if (rc != MBX_SUCCESS) {
2281 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2282 "0351 Config MSI mailbox command failed, "
2283 "mbxCmd x%x, mbxStatus x%x\n",
2284 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
2285 goto mbx_fail_out;
2286 }
2287
2288 /* Free memory allocated for mailbox command */
2289 mempool_free(pmb, phba->mbox_mem_pool);
2290 return rc;
2291
2292mbx_fail_out:
2293 /* Free memory allocated for mailbox command */
2294 mempool_free(pmb, phba->mbox_mem_pool);
2295
2296mem_fail_out:
2297 /* free the irq already requested */
2298 free_irq(phba->msix_entries[1].vector, phba);
2299
2300irq_fail_out:
2301 /* free the irq already requested */
2302 free_irq(phba->msix_entries[0].vector, phba);
2303
2304msi_fail_out:
2305 /* Unconfigure MSI-X capability structure */
2306 pci_disable_msix(phba->pcidev);
2307 return rc;
1936} 2308}
1937 2309
2310/**
2311 * lpfc_disable_msix: Disable MSI-X interrupt mode.
2312 * @phba: pointer to lpfc hba data structure.
2313 *
2314 * This routine is invoked to release the MSI-X vectors and then disable the
2315 * MSI-X interrupt mode.
2316 **/
1938static void 2317static void
1939lpfc_disable_msix(struct lpfc_hba *phba) 2318lpfc_disable_msix(struct lpfc_hba *phba)
1940{ 2319{
1941 free_irq(phba->msix_entries[0].vector, phba); 2320 int i;
2321
2322 /* Free up MSI-X multi-message vectors */
2323 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2324 free_irq(phba->msix_entries[i].vector, phba);
2325 /* Disable MSI-X */
1942 pci_disable_msix(phba->pcidev); 2326 pci_disable_msix(phba->pcidev);
1943} 2327}
1944 2328
2329/**
2330 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
2331 * @pdev: pointer to PCI device
2332 * @pid: pointer to PCI device identifier
2333 *
2334 * This routine is to be registered to the kernel's PCI subsystem. When an
2335 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2336 * PCI device-specific information of the device and driver to see if the
2337 * driver state that it can support this kind of device. If the match is
2338 * successful, the driver core invokes this routine. If this routine
2339 * determines it can claim the HBA, it does all the initialization that it
2340 * needs to do to handle the HBA properly.
2341 *
2342 * Return code
2343 * 0 - driver can claim the device
2344 * negative value - driver can not claim the device
2345 **/
1945static int __devinit 2346static int __devinit
1946lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 2347lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1947{ 2348{
@@ -1956,6 +2357,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1956 int i, hbq_count; 2357 int i, hbq_count;
1957 uint16_t iotag; 2358 uint16_t iotag;
1958 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2359 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2360 struct lpfc_adapter_event_header adapter_event;
1959 2361
1960 if (pci_enable_device_mem(pdev)) 2362 if (pci_enable_device_mem(pdev))
1961 goto out; 2363 goto out;
@@ -1966,6 +2368,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1966 if (!phba) 2368 if (!phba)
1967 goto out_release_regions; 2369 goto out_release_regions;
1968 2370
2371 atomic_set(&phba->fast_event_count, 0);
1969 spin_lock_init(&phba->hbalock); 2372 spin_lock_init(&phba->hbalock);
1970 2373
1971 /* Initialize ndlp management spinlock */ 2374 /* Initialize ndlp management spinlock */
@@ -1978,6 +2381,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1978 goto out_free_phba; 2381 goto out_free_phba;
1979 2382
1980 INIT_LIST_HEAD(&phba->port_list); 2383 INIT_LIST_HEAD(&phba->port_list);
2384 init_waitqueue_head(&phba->wait_4_mlo_m_q);
1981 /* 2385 /*
1982 * Get all the module params for configuring this host and then 2386 * Get all the module params for configuring this host and then
1983 * establish the host. 2387 * establish the host.
@@ -2000,6 +2404,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2000 init_timer(&phba->fabric_block_timer); 2404 init_timer(&phba->fabric_block_timer);
2001 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 2405 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
2002 phba->fabric_block_timer.data = (unsigned long) phba; 2406 phba->fabric_block_timer.data = (unsigned long) phba;
2407 init_timer(&phba->eratt_poll);
2408 phba->eratt_poll.function = lpfc_poll_eratt;
2409 phba->eratt_poll.data = (unsigned long) phba;
2003 2410
2004 pci_set_master(pdev); 2411 pci_set_master(pdev);
2005 pci_try_set_mwi(pdev); 2412 pci_try_set_mwi(pdev);
@@ -2019,7 +2426,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2019 bar2map_len = pci_resource_len(phba->pcidev, 2); 2426 bar2map_len = pci_resource_len(phba->pcidev, 2);
2020 2427
2021 /* Map HBA SLIM to a kernel virtual address. */ 2428 /* Map HBA SLIM to a kernel virtual address. */
2022 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 2429 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
2023 if (!phba->slim_memmap_p) { 2430 if (!phba->slim_memmap_p) {
2024 error = -ENODEV; 2431 error = -ENODEV;
2025 dev_printk(KERN_ERR, &pdev->dev, 2432 dev_printk(KERN_ERR, &pdev->dev,
@@ -2037,12 +2444,18 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2037 } 2444 }
2038 2445
2039 /* Allocate memory for SLI-2 structures */ 2446 /* Allocate memory for SLI-2 structures */
2040 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 2447 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
2041 &phba->slim2p_mapping, GFP_KERNEL); 2448 SLI2_SLIM_SIZE,
2042 if (!phba->slim2p) 2449 &phba->slim2p.phys,
2450 GFP_KERNEL);
2451 if (!phba->slim2p.virt)
2043 goto out_iounmap; 2452 goto out_iounmap;
2044 2453
2045 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 2454 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
2455 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
2456 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2457 phba->IOCBs = (phba->slim2p.virt +
2458 offsetof(struct lpfc_sli2_slim, IOCBs));
2046 2459
2047 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 2460 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
2048 lpfc_sli_hbq_size(), 2461 lpfc_sli_hbq_size(),
@@ -2111,7 +2524,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2111 phba->fc_arbtov = FF_DEF_ARBTOV; 2524 phba->fc_arbtov = FF_DEF_ARBTOV;
2112 2525
2113 INIT_LIST_HEAD(&phba->work_list); 2526 INIT_LIST_HEAD(&phba->work_list);
2114 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2527 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
2115 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2528 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2116 2529
2117 /* Initialize the wait queue head for the kernel thread */ 2530 /* Initialize the wait queue head for the kernel thread */
@@ -2146,21 +2559,42 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2146 pci_set_drvdata(pdev, shost); 2559 pci_set_drvdata(pdev, shost);
2147 phba->intr_type = NONE; 2560 phba->intr_type = NONE;
2148 2561
2562 phba->MBslimaddr = phba->slim_memmap_p;
2563 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
2564 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
2565 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2566 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2567
2568 /* Configure and enable interrupt */
2149 if (phba->cfg_use_msi == 2) { 2569 if (phba->cfg_use_msi == 2) {
2150 error = lpfc_enable_msix(phba); 2570 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2151 if (!error) 2571 error = lpfc_sli_config_port(phba, 3);
2152 phba->intr_type = MSIX; 2572 if (error)
2573 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2574 "0427 Firmware not capable of SLI 3 mode.\n");
2575 else {
2576 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2577 "0426 Firmware capable of SLI 3 mode.\n");
2578 /* Now, try to enable MSI-X interrupt mode */
2579 error = lpfc_enable_msix(phba);
2580 if (!error) {
2581 phba->intr_type = MSIX;
2582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2583 "0430 enable MSI-X mode.\n");
2584 }
2585 }
2153 } 2586 }
2154 2587
2155 /* Fallback to MSI if MSI-X initialization failed */ 2588 /* Fallback to MSI if MSI-X initialization failed */
2156 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2589 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2157 retval = pci_enable_msi(phba->pcidev); 2590 retval = pci_enable_msi(phba->pcidev);
2158 if (!retval) 2591 if (!retval) {
2159 phba->intr_type = MSI; 2592 phba->intr_type = MSI;
2160 else
2161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2162 "0452 Enable MSI failed, continuing " 2594 "0473 enable MSI mode.\n");
2163 "with IRQ\n"); 2595 } else
2596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2597 "0452 enable IRQ mode.\n");
2164 } 2598 }
2165 2599
2166 /* MSI-X is the only case the doesn't need to call request_irq */ 2600 /* MSI-X is the only case the doesn't need to call request_irq */
@@ -2176,18 +2610,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2176 phba->intr_type = INTx; 2610 phba->intr_type = INTx;
2177 } 2611 }
2178 2612
2179 phba->MBslimaddr = phba->slim_memmap_p;
2180 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
2181 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
2182 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2183 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2184
2185 if (lpfc_alloc_sysfs_attr(vport)) { 2613 if (lpfc_alloc_sysfs_attr(vport)) {
2614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2615 "1476 Failed to allocate sysfs attr\n");
2186 error = -ENOMEM; 2616 error = -ENOMEM;
2187 goto out_free_irq; 2617 goto out_free_irq;
2188 } 2618 }
2189 2619
2190 if (lpfc_sli_hba_setup(phba)) { 2620 if (lpfc_sli_hba_setup(phba)) {
2621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2622 "1477 Failed to set up hba\n");
2191 error = -ENODEV; 2623 error = -ENODEV;
2192 goto out_remove_device; 2624 goto out_remove_device;
2193 } 2625 }
@@ -2206,6 +2638,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2206 spin_unlock_irq(shost->host_lock); 2638 spin_unlock_irq(shost->host_lock);
2207 } 2639 }
2208 2640
2641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2642 "0428 Perform SCSI scan\n");
2643 /* Send board arrival event to upper layer */
2644 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
2645 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
2646 fc_host_post_vendor_event(shost, fc_get_event_number(),
2647 sizeof(adapter_event),
2648 (char *) &adapter_event,
2649 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2650
2209 scsi_scan_host(shost); 2651 scsi_scan_host(shost);
2210 2652
2211 return 0; 2653 return 0;
@@ -2238,11 +2680,11 @@ out_free_iocbq:
2238 } 2680 }
2239 lpfc_mem_free(phba); 2681 lpfc_mem_free(phba);
2240out_free_hbqslimp: 2682out_free_hbqslimp:
2241 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2683 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
2242 phba->hbqslimp.phys); 2684 phba->hbqslimp.virt, phba->hbqslimp.phys);
2243out_free_slim: 2685out_free_slim:
2244 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 2686 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2245 phba->slim2p_mapping); 2687 phba->slim2p.virt, phba->slim2p.phys);
2246out_iounmap: 2688out_iounmap:
2247 iounmap(phba->ctrl_regs_memmap_p); 2689 iounmap(phba->ctrl_regs_memmap_p);
2248out_iounmap_slim: 2690out_iounmap_slim:
@@ -2262,6 +2704,14 @@ out:
2262 return error; 2704 return error;
2263} 2705}
2264 2706
2707/**
2708 * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem.
2709 * @pdev: pointer to PCI device
2710 *
2711 * This routine is to be registered to the kernel's PCI subsystem. When an
2712 * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup
2713 * for the HBA device to be removed from the PCI subsystem properly.
2714 **/
2265static void __devexit 2715static void __devexit
2266lpfc_pci_remove_one(struct pci_dev *pdev) 2716lpfc_pci_remove_one(struct pci_dev *pdev)
2267{ 2717{
@@ -2316,12 +2766,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2316 lpfc_scsi_free(phba); 2766 lpfc_scsi_free(phba);
2317 lpfc_mem_free(phba); 2767 lpfc_mem_free(phba);
2318 2768
2319 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2769 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
2320 phba->hbqslimp.phys); 2770 phba->hbqslimp.virt, phba->hbqslimp.phys);
2321 2771
2322 /* Free resources associated with SLI2 interface */ 2772 /* Free resources associated with SLI2 interface */
2323 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 2773 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2324 phba->slim2p, phba->slim2p_mapping); 2774 phba->slim2p.virt, phba->slim2p.phys);
2325 2775
2326 /* unmap adapter SLIM and Control Registers */ 2776 /* unmap adapter SLIM and Control Registers */
2327 iounmap(phba->ctrl_regs_memmap_p); 2777 iounmap(phba->ctrl_regs_memmap_p);
@@ -2336,13 +2786,21 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2336} 2786}
2337 2787
2338/** 2788/**
2339 * lpfc_io_error_detected - called when PCI error is detected 2789 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
2340 * @pdev: Pointer to PCI device 2790 * @pdev: pointer to PCI device.
2341 * @state: The current pci conneection state 2791 * @state: the current PCI connection state.
2342 * 2792 *
2343 * This function is called after a PCI bus error affecting 2793 * This routine is registered to the PCI subsystem for error handling. This
2344 * this device has been detected. 2794 * function is called by the PCI subsystem after a PCI bus error affecting
2345 */ 2795 * this device has been detected. When this function is invoked, it will
2796 * need to stop all the I/Os and interrupt(s) to the device. Once that is
2797 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
2798 * perform proper recovery as desired.
2799 *
2800 * Return codes
2801 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
2802 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
2803 **/
2346static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 2804static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2347 pci_channel_state_t state) 2805 pci_channel_state_t state)
2348{ 2806{
@@ -2351,8 +2809,15 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2351 struct lpfc_sli *psli = &phba->sli; 2809 struct lpfc_sli *psli = &phba->sli;
2352 struct lpfc_sli_ring *pring; 2810 struct lpfc_sli_ring *pring;
2353 2811
2354 if (state == pci_channel_io_perm_failure) 2812 if (state == pci_channel_io_perm_failure) {
2813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2814 "0472 PCI channel I/O permanent failure\n");
2815 /* Block all SCSI devices' I/Os on the host */
2816 lpfc_scsi_dev_block(phba);
2817 /* Clean up all driver's outstanding SCSI I/Os */
2818 lpfc_sli_flush_fcp_rings(phba);
2355 return PCI_ERS_RESULT_DISCONNECT; 2819 return PCI_ERS_RESULT_DISCONNECT;
2820 }
2356 2821
2357 pci_disable_device(pdev); 2822 pci_disable_device(pdev);
2358 /* 2823 /*
@@ -2376,10 +2841,21 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2376} 2841}
2377 2842
2378/** 2843/**
2379 * lpfc_io_slot_reset - called after the pci bus has been reset. 2844 * lpfc_io_slot_reset: Restart a PCI device from scratch.
2380 * @pdev: Pointer to PCI device 2845 * @pdev: pointer to PCI device.
2846 *
2847 * This routine is registered to the PCI subsystem for error handling. This is
2848 * called after PCI bus has been reset to restart the PCI card from scratch,
2849 * as if from a cold-boot. During the PCI subsystem error recovery, after the
2850 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
2851 * proper error recovery and then call this routine before calling the .resume
2852 * method to recover the device. This function will initialize the HBA device,
2853 * enable the interrupt, but it will just put the HBA to offline state without
2854 * passing any I/O traffic.
2381 * 2855 *
2382 * Restart the card from scratch, as if from a cold-boot. 2856 * Return codes
2857 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
2858 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
2383 */ 2859 */
2384static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 2860static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2385{ 2861{
@@ -2404,20 +2880,34 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2404 /* Enable configured interrupt method */ 2880 /* Enable configured interrupt method */
2405 phba->intr_type = NONE; 2881 phba->intr_type = NONE;
2406 if (phba->cfg_use_msi == 2) { 2882 if (phba->cfg_use_msi == 2) {
2407 error = lpfc_enable_msix(phba); 2883 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2408 if (!error) 2884 error = lpfc_sli_config_port(phba, 3);
2409 phba->intr_type = MSIX; 2885 if (error)
2886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2887 "0478 Firmware not capable of SLI 3 mode.\n");
2888 else {
2889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2890 "0479 Firmware capable of SLI 3 mode.\n");
2891 /* Now, try to enable MSI-X interrupt mode */
2892 error = lpfc_enable_msix(phba);
2893 if (!error) {
2894 phba->intr_type = MSIX;
2895 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2896 "0480 enable MSI-X mode.\n");
2897 }
2898 }
2410 } 2899 }
2411 2900
2412 /* Fallback to MSI if MSI-X initialization failed */ 2901 /* Fallback to MSI if MSI-X initialization failed */
2413 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2902 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2414 retval = pci_enable_msi(phba->pcidev); 2903 retval = pci_enable_msi(phba->pcidev);
2415 if (!retval) 2904 if (!retval) {
2416 phba->intr_type = MSI; 2905 phba->intr_type = MSI;
2417 else
2418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2419 "0470 Enable MSI failed, continuing " 2907 "0481 enable MSI mode.\n");
2420 "with IRQ\n"); 2908 } else
2909 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2910 "0470 enable IRQ mode.\n");
2421 } 2911 }
2422 2912
2423 /* MSI-X is the only case the doesn't need to call request_irq */ 2913 /* MSI-X is the only case the doesn't need to call request_irq */
@@ -2440,11 +2930,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2440} 2930}
2441 2931
2442/** 2932/**
2443 * lpfc_io_resume - called when traffic can start flowing again. 2933 * lpfc_io_resume: Resume PCI I/O operation.
2444 * @pdev: Pointer to PCI device 2934 * @pdev: pointer to PCI device
2445 * 2935 *
2446 * This callback is called when the error recovery driver tells us that 2936 * This routine is registered to the PCI subsystem for error handling. It is
2447 * its OK to resume normal operation. 2937 * called when kernel error recovery tells the lpfc driver that it is ok to
2938 * resume normal PCI operation after PCI bus error recovery. After this call,
2939 * traffic can start to flow from this device again.
2448 */ 2940 */
2449static void lpfc_io_resume(struct pci_dev *pdev) 2941static void lpfc_io_resume(struct pci_dev *pdev)
2450{ 2942{
@@ -2491,6 +2983,8 @@ static struct pci_device_id lpfc_id_table[] = {
2491 PCI_ANY_ID, PCI_ANY_ID, }, 2983 PCI_ANY_ID, PCI_ANY_ID, },
2492 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 2984 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
2493 PCI_ANY_ID, PCI_ANY_ID, }, 2985 PCI_ANY_ID, PCI_ANY_ID, },
2986 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
2987 PCI_ANY_ID, PCI_ANY_ID, },
2494 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 2988 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
2495 PCI_ANY_ID, PCI_ANY_ID, }, 2989 PCI_ANY_ID, PCI_ANY_ID, },
2496 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 2990 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -2521,6 +3015,12 @@ static struct pci_device_id lpfc_id_table[] = {
2521 PCI_ANY_ID, PCI_ANY_ID, }, 3015 PCI_ANY_ID, PCI_ANY_ID, },
2522 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 3016 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
2523 PCI_ANY_ID, PCI_ANY_ID, }, 3017 PCI_ANY_ID, PCI_ANY_ID, },
3018 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
3019 PCI_ANY_ID, PCI_ANY_ID, },
3020 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
3021 PCI_ANY_ID, PCI_ANY_ID, },
3022 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3023 PCI_ANY_ID, PCI_ANY_ID, },
2524 { 0 } 3024 { 0 }
2525}; 3025};
2526 3026
@@ -2540,6 +3040,18 @@ static struct pci_driver lpfc_driver = {
2540 .err_handler = &lpfc_err_handler, 3040 .err_handler = &lpfc_err_handler,
2541}; 3041};
2542 3042
3043/**
3044 * lpfc_init: lpfc module initialization routine.
3045 *
3046 * This routine is to be invoked when the lpfc module is loaded into the
3047 * kernel. The special kernel macro module_init() is used to indicate the
3048 * role of this routine to the kernel as lpfc module entry point.
3049 *
3050 * Return codes
3051 * 0 - successful
3052 * -ENOMEM - FC attach transport failed
3053 * all others - failed
3054 */
2543static int __init 3055static int __init
2544lpfc_init(void) 3056lpfc_init(void)
2545{ 3057{
@@ -2567,12 +3079,20 @@ lpfc_init(void)
2567 error = pci_register_driver(&lpfc_driver); 3079 error = pci_register_driver(&lpfc_driver);
2568 if (error) { 3080 if (error) {
2569 fc_release_transport(lpfc_transport_template); 3081 fc_release_transport(lpfc_transport_template);
2570 fc_release_transport(lpfc_vport_transport_template); 3082 if (lpfc_enable_npiv)
3083 fc_release_transport(lpfc_vport_transport_template);
2571 } 3084 }
2572 3085
2573 return error; 3086 return error;
2574} 3087}
2575 3088
3089/**
3090 * lpfc_exit: lpfc module removal routine.
3091 *
3092 * This routine is invoked when the lpfc module is removed from the kernel.
3093 * The special kernel macro module_exit() is used to indicate the role of
3094 * this routine to the kernel as lpfc module exit point.
3095 */
2576static void __exit 3096static void __exit
2577lpfc_exit(void) 3097lpfc_exit(void)
2578{ 3098{
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7a9be4c5b7cb..7465fe746fe9 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
30 30
31#include "lpfc_hw.h" 31#include "lpfc_hw.h"
32#include "lpfc_sli.h" 32#include "lpfc_sli.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_scsi.h" 35#include "lpfc_scsi.h"
35#include "lpfc.h" 36#include "lpfc.h"
@@ -37,10 +38,20 @@
37#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
38#include "lpfc_compat.h" 39#include "lpfc_compat.h"
39 40
40/**********************************************/ 41/**
41 42 * lpfc_dump_mem: Prepare a mailbox command for retrieving HBA's VPD memory.
42/* mailbox command */ 43 * @phba: pointer to lpfc hba data structure.
43/**********************************************/ 44 * @pmb: pointer to the driver internal queue element for mailbox command.
45 * @offset: offset for dumping VPD memory mailbox command.
46 *
47 * The dump mailbox command provides a method for the device driver to obtain
48 * various types of information from the HBA device.
49 *
50 * This routine prepares the mailbox command for dumping HBA Vital Product
51 * Data (VPD) memory. This mailbox command is to be used for retrieving a
52 * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
53 * offset specified by the offset parameter.
54 **/
44void 55void
45lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) 56lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
46{ 57{
@@ -65,10 +76,17 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
65 return; 76 return;
66} 77}
67 78
68/**********************************************/ 79/**
69/* lpfc_read_nv Issue a READ NVPARAM */ 80 * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
70/* mailbox command */ 81 * @phba: pointer to lpfc hba data structure.
71/**********************************************/ 82 * @pmb: pointer to the driver internal queue element for mailbox command.
83 *
84 * The read NVRAM mailbox command returns the HBA's non-volatile parameters
85 * that are used as defaults when the Fibre Channel link is brought on-line.
86 *
87 * This routine prepares the mailbox command for reading information stored
88 * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
89 **/
72void 90void
73lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 91lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
74{ 92{
@@ -81,10 +99,19 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
81 return; 99 return;
82} 100}
83 101
84/**********************************************/ 102/**
85/* lpfc_config_async Issue a */ 103 * lpfc_config_async: Prepare a mailbox command for enabling HBA async event.
86/* MBX_ASYNC_EVT_ENABLE mailbox command */ 104 * @phba: pointer to lpfc hba data structure.
87/**********************************************/ 105 * @pmb: pointer to the driver internal queue element for mailbox command.
106 * @ring: ring number for the asynchronous event to be configured.
107 *
108 * The asynchronous event enable mailbox command is used to enable the
109 * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
110 * specifies the default ring to which events are posted.
111 *
112 * This routine prepares the mailbox command for enabling HBA asynchronous
113 * event support on a IOCB ring.
114 **/
88void 115void
89lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 116lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
90 uint32_t ring) 117 uint32_t ring)
@@ -99,10 +126,19 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
99 return; 126 return;
100} 127}
101 128
102/**********************************************/ 129/**
103/* lpfc_heart_beat Issue a HEART_BEAT */ 130 * lpfc_heart_beat: Prepare a mailbox command for heart beat.
104/* mailbox command */ 131 * @phba: pointer to lpfc hba data structure.
105/**********************************************/ 132 * @pmb: pointer to the driver internal queue element for mailbox command.
133 *
134 * The heart beat mailbox command is used to detect an unresponsive HBA, which
135 * is defined as any device where no error attention is sent and both mailbox
136 * and rings are not processed.
137 *
138 * This routine prepares the mailbox command for issuing a heart beat in the
139 * form of mailbox command to the HBA. The timely completion of the heart
140 * beat mailbox command indicates the health of the HBA.
141 **/
106void 142void
107lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 143lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
108{ 144{
@@ -115,10 +151,26 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
115 return; 151 return;
116} 152}
117 153
118/**********************************************/ 154/**
119/* lpfc_read_la Issue a READ LA */ 155 * lpfc_read_la: Prepare a mailbox command for reading HBA link attention.
120/* mailbox command */ 156 * @phba: pointer to lpfc hba data structure.
121/**********************************************/ 157 * @pmb: pointer to the driver internal queue element for mailbox command.
158 * @mp: DMA buffer memory for reading the link attention information into.
159 *
160 * The read link attention mailbox command is issued to read the Link Event
161 * Attention information indicated by the HBA port when the Link Event bit
162 * of the Host Attention (HSTATT) register is set to 1. A Link Event
163 * Attention occurs based on an exception detected at the Fibre Channel link
164 * interface.
165 *
166 * This routine prepares the mailbox command for reading HBA link attention
167 * information. A DMA memory has been set aside and address passed to the
168 * HBA through @mp for the HBA to DMA link attention information into the
169 * memory as part of the execution of the mailbox command.
170 *
171 * Return codes
172 * 0 - Success (currently always return 0)
173 **/
122int 174int
123lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) 175lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
124{ 176{
@@ -143,10 +195,21 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
143 return (0); 195 return (0);
144} 196}
145 197
146/**********************************************/ 198/**
147/* lpfc_clear_la Issue a CLEAR LA */ 199 * lpfc_clear_la: Prepare a mailbox command for clearing HBA link attention.
148/* mailbox command */ 200 * @phba: pointer to lpfc hba data structure.
149/**********************************************/ 201 * @pmb: pointer to the driver internal queue element for mailbox command.
202 *
203 * The clear link attention mailbox command is issued to clear the link event
204 * attention condition indicated by the Link Event bit of the Host Attention
205 * (HSTATT) register. The link event attention condition is cleared only if
206 * the event tag specified matches that of the current link event counter.
207 * The current event tag is read using the read link attention event mailbox
208 * command.
209 *
210 * This routine prepares the mailbox command for clearing HBA link attention
211 * information.
212 **/
150void 213void
151lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 214lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
152{ 215{
@@ -161,10 +224,20 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
161 return; 224 return;
162} 225}
163 226
164/**************************************************/ 227/**
165/* lpfc_config_link Issue a CONFIG LINK */ 228 * lpfc_config_link: Prepare a mailbox command for configuring link on a HBA.
166/* mailbox command */ 229 * @phba: pointer to lpfc hba data structure.
167/**************************************************/ 230 * @pmb: pointer to the driver internal queue element for mailbox command.
231 *
232 * The configure link mailbox command is used before the initialize link
233 * mailbox command to override default value and to configure link-oriented
234 * parameters such as DID address and various timers. Typically, this
235 * command would be used after an F_Port login to set the returned DID address
236 * and the fabric timeout values. This command is not valid before a configure
237 * port command has configured the HBA port.
238 *
239 * This routine prepares the mailbox command for configuring link on a HBA.
240 **/
168void 241void
169lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 242lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
170{ 243{
@@ -199,10 +272,98 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
199 return; 272 return;
200} 273}
201 274
202/**********************************************/ 275/**
203/* lpfc_init_link Issue an INIT LINK */ 276 * lpfc_config_msi: Prepare a mailbox command for configuring msi-x.
204/* mailbox command */ 277 * @phba: pointer to lpfc hba data structure.
205/**********************************************/ 278 * @pmb: pointer to the driver internal queue element for mailbox command.
279 *
280 * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
281 * MSI-X multi-message interrupt vector association to interrupt attention
282 * conditions.
283 *
284 * Return codes
285 * 0 - Success
286 * -EINVAL - Failure
287 **/
288int
289lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
290{
291 MAILBOX_t *mb = &pmb->mb;
292 uint32_t attentionConditions[2];
293
294 /* Sanity check */
295 if (phba->cfg_use_msi != 2) {
296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
297 "0475 Not configured for supporting MSI-X "
298 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
299 return -EINVAL;
300 }
301
302 if (phba->sli_rev < 3) {
303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
304 "0476 HBA not supporting SLI-3 or later "
305 "SLI Revision: 0x%x\n", phba->sli_rev);
306 return -EINVAL;
307 }
308
309 /* Clear mailbox command fields */
310 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
311
312 /*
313 * SLI-3, Message Signaled Interrupt Fearure.
314 */
315
316 /* Multi-message attention configuration */
317 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
318 HA_LATT | HA_MBATT);
319 attentionConditions[1] = 0;
320
321 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
322 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
323
324 /*
325 * Set up message number to HA bit association
326 */
327#ifdef __BIG_ENDIAN_BITFIELD
328 /* RA0 (FCP Ring) */
329 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
330 /* RA1 (Other Protocol Extra Ring) */
331 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
332#else /* __LITTLE_ENDIAN_BITFIELD */
333 /* RA0 (FCP Ring) */
334 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
335 /* RA1 (Other Protocol Extra Ring) */
336 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
337#endif
338 /* Multi-message interrupt autoclear configuration*/
339 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
340 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
341
342 /* For now, HBA autoclear does not work reliably, disable it */
343 mb->un.varCfgMSI.autoClearHA[0] = 0;
344 mb->un.varCfgMSI.autoClearHA[1] = 0;
345
346 /* Set command and owner bit */
347 mb->mbxCommand = MBX_CONFIG_MSI;
348 mb->mbxOwner = OWN_HOST;
349
350 return 0;
351}
352
353/**
354 * lpfc_init_link: Prepare a mailbox command for initialize link on a HBA.
355 * @phba: pointer to lpfc hba data structure.
356 * @pmb: pointer to the driver internal queue element for mailbox command.
357 * @topology: the link topology for the link to be initialized to.
358 * @linkspeed: the link speed for the link to be initialized to.
359 *
360 * The initialize link mailbox command is used to initialize the Fibre
361 * Channel link. This command must follow a configure port command that
362 * establishes the mode of operation.
363 *
364 * This routine prepares the mailbox command for initializing link on a HBA
365 * with the specified link topology and speed.
366 **/
206void 367void
207lpfc_init_link(struct lpfc_hba * phba, 368lpfc_init_link(struct lpfc_hba * phba,
208 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) 369 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
@@ -269,10 +430,27 @@ lpfc_init_link(struct lpfc_hba * phba,
269 return; 430 return;
270} 431}
271 432
272/**********************************************/ 433/**
273/* lpfc_read_sparam Issue a READ SPARAM */ 434 * lpfc_read_sparam: Prepare a mailbox command for reading HBA parameters.
274/* mailbox command */ 435 * @phba: pointer to lpfc hba data structure.
275/**********************************************/ 436 * @pmb: pointer to the driver internal queue element for mailbox command.
437 * @vpi: virtual N_Port identifier.
438 *
439 * The read service parameter mailbox command is used to read the HBA port
440 * service parameters. The service parameters are read into the buffer
441 * specified directly by a BDE in the mailbox command. These service
442 * parameters may then be used to build the payload of an N_Port/F_POrt
443 * login request and reply (LOGI/ACC).
444 *
445 * This routine prepares the mailbox command for reading HBA port service
446 * parameters. The DMA memory is allocated in this function and the addresses
447 * are populated into the mailbox command for the HBA to DMA the service
448 * parameters into.
449 *
450 * Return codes
451 * 0 - Success
452 * 1 - DMA memory allocation failed
453 **/
276int 454int
277lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) 455lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
278{ 456{
@@ -312,10 +490,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
312 return (0); 490 return (0);
313} 491}
314 492
315/********************************************/ 493/**
316/* lpfc_unreg_did Issue a UNREG_DID */ 494 * lpfc_unreg_did: Prepare a mailbox command for unregistering DID.
317/* mailbox command */ 495 * @phba: pointer to lpfc hba data structure.
318/********************************************/ 496 * @vpi: virtual N_Port identifier.
497 * @did: remote port identifier.
498 * @pmb: pointer to the driver internal queue element for mailbox command.
499 *
500 * The unregister DID mailbox command is used to unregister an N_Port/F_Port
501 * login for an unknown RPI by specifying the DID of a remote port. This
502 * command frees an RPI context in the HBA port. This has the effect of
503 * performing an implicit N_Port/F_Port logout.
504 *
505 * This routine prepares the mailbox command for unregistering a remote
506 * N_Port/F_Port (DID) login.
507 **/
319void 508void
320lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, 509lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
321 LPFC_MBOXQ_t * pmb) 510 LPFC_MBOXQ_t * pmb)
@@ -333,10 +522,19 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
333 return; 522 return;
334} 523}
335 524
336/**********************************************/ 525/**
337/* lpfc_read_nv Issue a READ CONFIG */ 526 * lpfc_read_config: Prepare a mailbox command for reading HBA configuration.
338/* mailbox command */ 527 * @phba: pointer to lpfc hba data structure.
339/**********************************************/ 528 * @pmb: pointer to the driver internal queue element for mailbox command.
529 *
530 * The read configuration mailbox command is used to read the HBA port
531 * configuration parameters. This mailbox command provides a method for
532 * seeing any parameters that may have changed via various configuration
533 * mailbox commands.
534 *
535 * This routine prepares the mailbox command for reading out HBA configuration
536 * parameters.
537 **/
340void 538void
341lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 539lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
342{ 540{
@@ -350,10 +548,18 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
350 return; 548 return;
351} 549}
352 550
353/*************************************************/ 551/**
354/* lpfc_read_lnk_stat Issue a READ LINK STATUS */ 552 * lpfc_read_lnk_stat: Prepare a mailbox command for reading HBA link stats.
355/* mailbox command */ 553 * @phba: pointer to lpfc hba data structure.
356/*************************************************/ 554 * @pmb: pointer to the driver internal queue element for mailbox command.
555 *
556 * The read link status mailbox command is used to read the link status from
557 * the HBA. Link status includes all link-related error counters. These
558 * counters are maintained by the HBA and originated in the link hardware
559 * unit. Note that all of these counters wrap.
560 *
561 * This routine prepares the mailbox command for reading out HBA link status.
562 **/
357void 563void
358lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 564lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
359{ 565{
@@ -367,10 +573,30 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
367 return; 573 return;
368} 574}
369 575
370/********************************************/ 576/**
371/* lpfc_reg_login Issue a REG_LOGIN */ 577 * lpfc_reg_login: Prepare a mailbox command for registering remote login.
372/* mailbox command */ 578 * @phba: pointer to lpfc hba data structure.
373/********************************************/ 579 * @vpi: virtual N_Port identifier.
580 * @did: remote port identifier.
581 * @param: pointer to memory holding the server parameters.
582 * @pmb: pointer to the driver internal queue element for mailbox command.
583 * @flag: action flag to be passed back for the complete function.
584 *
585 * The registration login mailbox command is used to register an N_Port or
586 * F_Port login. This registration allows the HBA to cache the remote N_Port
587 * service parameters internally and thereby make the appropriate FC-2
588 * decisions. The remote port service parameters are handed off by the driver
589 * to the HBA using a descriptor entry that directly identifies a buffer in
590 * host memory. In exchange, the HBA returns an RPI identifier.
591 *
592 * This routine prepares the mailbox command for registering remote port login.
593 * The function allocates DMA buffer for passing the service parameters to the
594 * HBA with the mailbox command.
595 *
596 * Return codes
597 * 0 - Success
598 * 1 - DMA memory allocation failed
599 **/
374int 600int
375lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 601lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
376 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 602 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
@@ -418,10 +644,20 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
418 return (0); 644 return (0);
419} 645}
420 646
421/**********************************************/ 647/**
422/* lpfc_unreg_login Issue a UNREG_LOGIN */ 648 * lpfc_unreg_login: Prepare a mailbox command for unregistering remote login.
423/* mailbox command */ 649 * @phba: pointer to lpfc hba data structure.
424/**********************************************/ 650 * @vpi: virtual N_Port identifier.
651 * @rpi: remote port identifier
652 * @pmb: pointer to the driver internal queue element for mailbox command.
653 *
654 * The unregistration login mailbox command is used to unregister an N_Port
655 * or F_Port login. This command frees an RPI context in the HBA. It has the
656 * effect of performing an implicit N_Port/F_Port logout.
657 *
658 * This routine prepares the mailbox command for unregistering remote port
659 * login.
660 **/
425void 661void
426lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 662lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
427 LPFC_MBOXQ_t * pmb) 663 LPFC_MBOXQ_t * pmb)
@@ -440,10 +676,21 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
440 return; 676 return;
441} 677}
442 678
443/**************************************************/ 679/**
444/* lpfc_reg_vpi Issue a REG_VPI */ 680 * lpfc_reg_vpi: Prepare a mailbox command for registering vport identifier.
445/* mailbox command */ 681 * @phba: pointer to lpfc hba data structure.
446/**************************************************/ 682 * @vpi: virtual N_Port identifier.
683 * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
684 * @pmb: pointer to the driver internal queue element for mailbox command.
685 *
686 * The registration vport identifier mailbox command is used to activate a
687 * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
688 * N_Port_ID against the information in the selected virtual N_Port context
689 * block and marks it active to allow normal processing of IOCB commands and
690 * received unsolicited exchanges.
691 *
692 * This routine prepares the mailbox command for registering a virtual N_Port.
693 **/
447void 694void
448lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 695lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
449 LPFC_MBOXQ_t *pmb) 696 LPFC_MBOXQ_t *pmb)
@@ -461,10 +708,22 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
461 708
462} 709}
463 710
464/**************************************************/ 711/**
465/* lpfc_unreg_vpi Issue a UNREG_VNPI */ 712 * lpfc_unreg_vpi: Prepare a mailbox command for unregistering vport id.
466/* mailbox command */ 713 * @phba: pointer to lpfc hba data structure.
467/**************************************************/ 714 * @vpi: virtual N_Port identifier.
715 * @pmb: pointer to the driver internal queue element for mailbox command.
716 *
717 * The unregistration vport identifier mailbox command is used to inactivate
718 * a virtual N_Port. The driver must have logged out and unregistered all
719 * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
720 * unregisters any default RPIs associated with the specified vpi, aborting
721 * any active exchanges. The HBA will post the mailbox response after making
722 * the virtual N_Port inactive.
723 *
724 * This routine prepares the mailbox command for unregistering a virtual
725 * N_Port.
726 **/
468void 727void
469lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 728lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
470{ 729{
@@ -479,12 +738,19 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
479 738
480} 739}
481 740
741/**
742 * lpfc_config_pcb_setup: Set up IOCB rings in the Port Control Block (PCB)
743 * @phba: pointer to lpfc hba data structure.
744 *
745 * This routine sets up and initializes the IOCB rings in the Port Control
746 * Block (PCB).
747 **/
482static void 748static void
483lpfc_config_pcb_setup(struct lpfc_hba * phba) 749lpfc_config_pcb_setup(struct lpfc_hba * phba)
484{ 750{
485 struct lpfc_sli *psli = &phba->sli; 751 struct lpfc_sli *psli = &phba->sli;
486 struct lpfc_sli_ring *pring; 752 struct lpfc_sli_ring *pring;
487 PCB_t *pcbp = &phba->slim2p->pcb; 753 PCB_t *pcbp = phba->pcb;
488 dma_addr_t pdma_addr; 754 dma_addr_t pdma_addr;
489 uint32_t offset; 755 uint32_t offset;
490 uint32_t iocbCnt = 0; 756 uint32_t iocbCnt = 0;
@@ -513,29 +779,43 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
513 continue; 779 continue;
514 } 780 }
515 /* Command ring setup for ring */ 781 /* Command ring setup for ring */
516 pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; 782 pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
517 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 783 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
518 784
519 offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] - 785 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
520 (uint8_t *) phba->slim2p; 786 (uint8_t *) phba->slim2p.virt;
521 pdma_addr = phba->slim2p_mapping + offset; 787 pdma_addr = phba->slim2p.phys + offset;
522 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 788 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
523 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 789 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
524 iocbCnt += pring->numCiocb; 790 iocbCnt += pring->numCiocb;
525 791
526 /* Response ring setup for ring */ 792 /* Response ring setup for ring */
527 pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; 793 pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
528 794
529 pcbp->rdsc[i].rspEntries = pring->numRiocb; 795 pcbp->rdsc[i].rspEntries = pring->numRiocb;
530 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 796 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
531 (uint8_t *)phba->slim2p; 797 (uint8_t *)phba->slim2p.virt;
532 pdma_addr = phba->slim2p_mapping + offset; 798 pdma_addr = phba->slim2p.phys + offset;
533 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); 799 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
534 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); 800 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
535 iocbCnt += pring->numRiocb; 801 iocbCnt += pring->numRiocb;
536 } 802 }
537} 803}
538 804
805/**
806 * lpfc_read_rev: Prepare a mailbox command for reading HBA revision.
807 * @phba: pointer to lpfc hba data structure.
808 * @pmb: pointer to the driver internal queue element for mailbox command.
809 *
810 * The read revision mailbox command is used to read the revision levels of
811 * the HBA components. These components include hardware units, resident
812 * firmware, and available firmware. HBAs that supports SLI-3 mode of
813 * operation provide different response information depending on the version
814 * requested by the driver.
815 *
816 * This routine prepares the mailbox command for reading HBA revision
817 * information.
818 **/
539void 819void
540lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 820lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
541{ 821{
@@ -548,6 +828,16 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
548 return; 828 return;
549} 829}
550 830
831/**
832 * lpfc_build_hbq_profile2: Set up the HBQ Selection Profile 2.
833 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
834 * @hbq_desc: pointer to the HBQ selection profile descriptor.
835 *
836 * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
837 * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
838 * the Sequence Length Test using the fields in the Selection Profile 2
839 * extension in words 20:31.
840 **/
551static void 841static void
552lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, 842lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
553 struct lpfc_hbq_init *hbq_desc) 843 struct lpfc_hbq_init *hbq_desc)
@@ -557,6 +847,16 @@ lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
557 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; 847 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
558} 848}
559 849
850/**
851 * lpfc_build_hbq_profile3: Set up the HBQ Selection Profile 3.
852 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
853 * @hbq_desc: pointer to the HBQ selection profile descriptor.
854 *
855 * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
856 * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
857 * the Sequence Length Test and Byte Field Test using the fields in the
858 * Selection Profile 3 extension in words 20:31.
859 **/
560static void 860static void
561lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, 861lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
562 struct lpfc_hbq_init *hbq_desc) 862 struct lpfc_hbq_init *hbq_desc)
@@ -569,6 +869,17 @@ lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
569 sizeof(hbqmb->profiles.profile3.cmdmatch)); 869 sizeof(hbqmb->profiles.profile3.cmdmatch));
570} 870}
571 871
872/**
873 * lpfc_build_hbq_profile5: Set up the HBQ Selection Profile 5.
874 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
875 * @hbq_desc: pointer to the HBQ selection profile descriptor.
876 *
877 * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
878 * HBA tests the initial frame of an incoming sequence using the frame's
879 * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
880 * and Byte Field Test using the fields in the Selection Profile 5 extension
881 * words 20:31.
882 **/
572static void 883static void
573lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, 884lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
574 struct lpfc_hbq_init *hbq_desc) 885 struct lpfc_hbq_init *hbq_desc)
@@ -581,6 +892,20 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
581 sizeof(hbqmb->profiles.profile5.cmdmatch)); 892 sizeof(hbqmb->profiles.profile5.cmdmatch));
582} 893}
583 894
895/**
896 * lpfc_config_hbq: Prepare a mailbox command for configuring an HBQ.
897 * @phba: pointer to lpfc hba data structure.
898 * @id: HBQ identifier.
899 * @hbq_desc: pointer to the HBA descriptor data structure.
900 * @hbq_entry_index: index of the HBQ entry data structures.
901 * @pmb: pointer to the driver internal queue element for mailbox command.
902 *
903 * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
904 * an HBQ. The configuration binds events that require buffers to a particular
905 * ring and HBQ based on a selection profile.
906 *
907 * This routine prepares the mailbox command for configuring an HBQ.
908 **/
584void 909void
585lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, 910lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
586 struct lpfc_hbq_init *hbq_desc, 911 struct lpfc_hbq_init *hbq_desc,
@@ -641,8 +966,23 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
641 return; 966 return;
642} 967}
643 968
644 969/**
645 970 * lpfc_config_ring: Prepare a mailbox command for configuring an IOCB ring.
971 * @phba: pointer to lpfc hba data structure.
972 * @ring:
973 * @pmb: pointer to the driver internal queue element for mailbox command.
974 *
975 * The configure ring mailbox command is used to configure an IOCB ring. This
976 * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
977 * ring. This is used to map incoming sequences to a particular ring whose
978 * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
979 * attempt to configure a ring whose number is greater than the number
980 * specified in the Port Control Block (PCB). It is an error to issue the
981 * configure ring command more than once with the same ring number. The HBA
982 * returns an error if the driver attempts this.
983 *
984 * This routine prepares the mailbox command for configuring IOCB ring.
985 **/
646void 986void
647lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 987lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
648{ 988{
@@ -684,6 +1024,20 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
684 return; 1024 return;
685} 1025}
686 1026
1027/**
1028 * lpfc_config_port: Prepare a mailbox command for configuring port.
1029 * @phba: pointer to lpfc hba data structure.
1030 * @pmb: pointer to the driver internal queue element for mailbox command.
1031 *
1032 * The configure port mailbox command is used to identify the Port Control
1033 * Block (PCB) in the driver memory. After this command is issued, the
1034 * driver must not access the mailbox in the HBA without first resetting
1035 * the HBA. The HBA may copy the PCB information to internal storage for
1036 * subsequent use; the driver can not change the PCB information unless it
1037 * resets the HBA.
1038 *
1039 * This routine prepares the mailbox command for configuring port.
1040 **/
687void 1041void
688lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1042lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
689{ 1043{
@@ -702,8 +1056,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
702 1056
703 mb->un.varCfgPort.pcbLen = sizeof(PCB_t); 1057 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
704 1058
705 offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p; 1059 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
706 pdma_addr = phba->slim2p_mapping + offset; 1060 pdma_addr = phba->slim2p.phys + offset;
707 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 1061 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
708 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 1062 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
709 1063
@@ -711,12 +1065,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
711 1065
712 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1066 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
713 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1067 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1068 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1069 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
714 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1070 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
715 if (phba->max_vpi && phba->cfg_enable_npiv && 1071 if (phba->max_vpi && phba->cfg_enable_npiv &&
716 phba->vpd.sli3Feat.cmv) { 1072 phba->vpd.sli3Feat.cmv) {
717 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1073 mb->un.varCfgPort.max_vpi = phba->max_vpi;
718 mb->un.varCfgPort.cmv = 1; 1074 mb->un.varCfgPort.cmv = 1;
719 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
720 } else 1075 } else
721 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1076 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
722 } else 1077 } else
@@ -724,16 +1079,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
724 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1079 mb->un.varCfgPort.sli_mode = phba->sli_rev;
725 1080
726 /* Now setup pcb */ 1081 /* Now setup pcb */
727 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; 1082 phba->pcb->type = TYPE_NATIVE_SLI2;
728 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2; 1083 phba->pcb->feature = FEATURE_INITIAL_SLI2;
729 1084
730 /* Setup Mailbox pointers */ 1085 /* Setup Mailbox pointers */
731 phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) + 1086 phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
732 sizeof(struct sli2_desc); 1087 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
733 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; 1088 pdma_addr = phba->slim2p.phys + offset;
734 pdma_addr = phba->slim2p_mapping + offset; 1089 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
735 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); 1090 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
736 phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
737 1091
738 /* 1092 /*
739 * Setup Host Group ring pointer. 1093 * Setup Host Group ring pointer.
@@ -794,13 +1148,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
794 } 1148 }
795 1149
796 /* mask off BAR0's flag bits 0 - 3 */ 1150 /* mask off BAR0's flag bits 0 - 3 */
797 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + 1151 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
798 (void __iomem *) phba->host_gp - 1152 (void __iomem *)phba->host_gp -
799 (void __iomem *)phba->MBslimaddr; 1153 (void __iomem *)phba->MBslimaddr;
800 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) 1154 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
801 phba->slim2p->pcb.hgpAddrHigh = bar_high; 1155 phba->pcb->hgpAddrHigh = bar_high;
802 else 1156 else
803 phba->slim2p->pcb.hgpAddrHigh = 0; 1157 phba->pcb->hgpAddrHigh = 0;
804 /* write HGP data to SLIM at the required longword offset */ 1158 /* write HGP data to SLIM at the required longword offset */
805 memset(&hgp, 0, sizeof(struct lpfc_hgp)); 1159 memset(&hgp, 0, sizeof(struct lpfc_hgp));
806 1160
@@ -810,17 +1164,19 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
810 } 1164 }
811 1165
812 /* Setup Port Group ring pointer */ 1166 /* Setup Port Group ring pointer */
813 if (phba->sli_rev == 3) 1167 if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
814 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port - 1168 pgp_offset = offsetof(struct lpfc_sli2_slim,
815 (uint8_t *)phba->slim2p; 1169 mbx.us.s3_inb_pgp.port);
816 else 1170 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
817 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - 1171 } else if (phba->sli_rev == 3) {
818 (uint8_t *)phba->slim2p; 1172 pgp_offset = offsetof(struct lpfc_sli2_slim,
819 1173 mbx.us.s3_pgp.port);
820 pdma_addr = phba->slim2p_mapping + pgp_offset; 1174 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
821 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr); 1175 } else
822 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr); 1176 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
823 phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0]; 1177 pdma_addr = phba->slim2p.phys + pgp_offset;
1178 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1179 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
824 1180
825 /* Use callback routine to setp rings in the pcb */ 1181 /* Use callback routine to setp rings in the pcb */
826 lpfc_config_pcb_setup(phba); 1182 lpfc_config_pcb_setup(phba);
@@ -835,10 +1191,24 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
835 } 1191 }
836 1192
837 /* Swap PCB if needed */ 1193 /* Swap PCB if needed */
838 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, 1194 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
839 sizeof(PCB_t));
840} 1195}
841 1196
1197/**
1198 * lpfc_kill_board: Prepare a mailbox command for killing board.
1199 * @phba: pointer to lpfc hba data structure.
1200 * @pmb: pointer to the driver internal queue element for mailbox command.
1201 *
1202 * The kill board mailbox command is used to tell firmware to perform a
1203 * graceful shutdown of a channel on a specified board to prepare for reset.
1204 * When the kill board mailbox command is received, the ER3 bit is set to 1
1205 * in the Host Status register and the ER Attention bit is set to 1 in the
1206 * Host Attention register of the HBA function that received the kill board
1207 * command.
1208 *
1209 * This routine prepares the mailbox command for killing the board in
1210 * preparation for a graceful shutdown.
1211 **/
842void 1212void
843lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1213lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
844{ 1214{
@@ -850,6 +1220,16 @@ lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
850 return; 1220 return;
851} 1221}
852 1222
1223/**
1224 * lpfc_mbox_put: Put a mailbox cmd into the tail of driver's mailbox queue.
1225 * @phba: pointer to lpfc hba data structure.
1226 * @mbq: pointer to the driver internal queue element for mailbox command.
1227 *
1228 * Driver maintains a internal mailbox command queue implemented as a linked
1229 * list. When a mailbox command is issued, it shall be put into the mailbox
1230 * command queue such that they shall be processed orderly as HBA can process
1231 * one mailbox command at a time.
1232 **/
853void 1233void
854lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1234lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
855{ 1235{
@@ -864,6 +1244,20 @@ lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
864 return; 1244 return;
865} 1245}
866 1246
1247/**
1248 * lpfc_mbox_get: Remove a mailbox cmd from the head of driver's mailbox queue.
1249 * @phba: pointer to lpfc hba data structure.
1250 *
1251 * Driver maintains a internal mailbox command queue implemented as a linked
1252 * list. When a mailbox command is issued, it shall be put into the mailbox
1253 * command queue such that they shall be processed orderly as HBA can process
1254 * one mailbox command at a time. After HBA finished processing a mailbox
1255 * command, the driver will remove a pending mailbox command from the head of
1256 * the mailbox command queue and send to the HBA for processing.
1257 *
1258 * Return codes
1259 * pointer to the driver internal queue element for mailbox command.
1260 **/
867LPFC_MBOXQ_t * 1261LPFC_MBOXQ_t *
868lpfc_mbox_get(struct lpfc_hba * phba) 1262lpfc_mbox_get(struct lpfc_hba * phba)
869{ 1263{
@@ -877,6 +1271,17 @@ lpfc_mbox_get(struct lpfc_hba * phba)
877 return mbq; 1271 return mbq;
878} 1272}
879 1273
1274/**
1275 * lpfc_mbox_cmpl_put: Put mailbox command into mailbox command complete list.
1276 * @phba: pointer to lpfc hba data structure.
1277 * @mbq: pointer to the driver internal queue element for mailbox command.
1278 *
1279 * This routine put the completed mailbox command into the mailbox command
1280 * complete list. This routine is called from driver interrupt handler
1281 * context.The mailbox complete list is used by the driver worker thread
1282 * to process mailbox complete callback functions outside the driver interrupt
1283 * handler.
1284 **/
880void 1285void
881lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1286lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
882{ 1287{
@@ -887,6 +1292,17 @@ lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
887 return; 1292 return;
888} 1293}
889 1294
1295/**
1296 * lpfc_mbox_tmo_val: Retrieve mailbox command timeout value.
1297 * @phba: pointer to lpfc hba data structure.
1298 * @cmd: mailbox command code.
1299 *
1300 * This routine retrieves the proper timeout value according to the mailbox
1301 * command code.
1302 *
1303 * Return codes
1304 * Timeout value to be used for the given mailbox command
1305 **/
890int 1306int
891lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) 1307lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
892{ 1308{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 3c0cebc71800..a4bba2069248 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
30 30
31#include "lpfc_hw.h" 31#include "lpfc_hw.h"
32#include "lpfc_sli.h" 32#include "lpfc_sli.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_scsi.h" 35#include "lpfc_scsi.h"
35#include "lpfc.h" 36#include "lpfc.h"
@@ -39,7 +40,21 @@
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 40#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 41
41 42
42 43/**
44 * lpfc_mem_alloc: create and allocate all PCI and memory pools
45 * @phba: HBA to allocate pools for
46 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any
52 * allocation fails, frees all successfully allocated memory before returning.
53 *
54 * Returns:
55 * 0 on success
56 * -ENOMEM on failure (if any memory allocations fail)
57 **/
43int 58int
44lpfc_mem_alloc(struct lpfc_hba * phba) 59lpfc_mem_alloc(struct lpfc_hba * phba)
45{ 60{
@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
120 return -ENOMEM; 135 return -ENOMEM;
121} 136}
122 137
138/**
139 * lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for
141 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
144 * lpfc_nodelist. Also frees the VPI bitmask.
145 *
146 * Returns: None
147 **/
123void 148void
124lpfc_mem_free(struct lpfc_hba * phba) 149lpfc_mem_free(struct lpfc_hba * phba)
125{ 150{
@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba)
181 phba->lpfc_scsi_dma_buf_pool = NULL; 206 phba->lpfc_scsi_dma_buf_pool = NULL;
182 phba->lpfc_mbuf_pool = NULL; 207 phba->lpfc_mbuf_pool = NULL;
183 208
184 /* Free the iocb lookup array */ 209 /* Free the iocb lookup array */
185 kfree(psli->iocbq_lookup); 210 kfree(psli->iocbq_lookup);
186 psli->iocbq_lookup = NULL; 211 psli->iocbq_lookup = NULL;
187
188} 212}
189 213
214/**
215 * lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
216 * @phba: HBA which owns the pool to allocate from
217 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
218 * @handle: used to return the DMA-mapped address of the mbuf
219 *
220 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
221 * Allocates from generic pci_pool_alloc function first and if that fails and
222 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
223 * HBA's pool.
224 *
225 * Notes: Not interrupt-safe. Must be called with no locks held. Takes
226 * phba->hbalock.
227 *
228 * Returns:
229 * pointer to the allocated mbuf on success
230 * NULL on failure
231 **/
190void * 232void *
191lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 233lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
192{ 234{
@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
206 return ret; 248 return ret;
207} 249}
208 250
251/**
252 * __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
253 * @phba: HBA which owns the pool to return to
254 * @virt: mbuf to free
255 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
256 *
257 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
258 * it is below its max_count, frees the mbuf otherwise.
259 *
260 * Notes: Must be called with phba->hbalock held to synchronize access to
261 * lpfc_mbuf_safety_pool.
262 *
263 * Returns: None
264 **/
209void 265void
210__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 266__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
211{ 267{
@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
221 return; 277 return;
222} 278}
223 279
280/**
281 * lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
282 * @phba: HBA which owns the pool to return to
283 * @virt: mbuf to free
284 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
285 *
286 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
287 * it is below its max_count, frees the mbuf otherwise.
288 *
289 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
290 *
291 * Returns: None
292 **/
224void 293void
294
225lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 295lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
226{ 296{
227 unsigned long iflags; 297 unsigned long iflags;
@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
232 return; 302 return;
233} 303}
234 304
305/**
306 * lpfc_els_hbq_alloc: Allocate an HBQ buffer
307 * @phba: HBA to allocate HBQ buffer for
308 *
309 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
310 * pool along a non-DMA-mapped container for it.
311 *
312 * Notes: Not interrupt-safe. Must be called with no locks held.
313 *
314 * Returns:
315 * pointer to HBQ on success
316 * NULL on failure
317 **/
235struct hbq_dmabuf * 318struct hbq_dmabuf *
236lpfc_els_hbq_alloc(struct lpfc_hba *phba) 319lpfc_els_hbq_alloc(struct lpfc_hba *phba)
237{ 320{
@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
251 return hbqbp; 334 return hbqbp;
252} 335}
253 336
337/**
338 * lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
339 * @phba: HBA buffer was allocated for
340 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
341 *
342 * Description: Frees both the container and the DMA-mapped buffer returned by
343 * lpfc_els_hbq_alloc.
344 *
345 * Notes: Can be called with or without locks held.
346 *
347 * Returns: None
348 **/
254void 349void
255lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 350lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
256{ 351{
@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
259 return; 354 return;
260} 355}
261 356
262/* This is ONLY called for the LPFC_ELS_HBQ */ 357/**
358 * lpfc_in_buf_free: Free a DMA buffer
359 * @phba: HBA buffer is associated with
360 * @mp: Buffer to free
361 *
362 * Description: Frees the given DMA buffer in the appropriate way given if the
363 * HBA is running in SLI3 mode with HBQs enabled.
364 *
365 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
366 *
367 * Returns: None
368 **/
263void 369void
264lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 370lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
265{ 371{
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 000000000000..1accb5a9f4e6
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,163 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Event definitions for RegisterForEvent */
22#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
23#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
24#define FC_REG_CT_EVENT 0x0004 /* CT request events */
25#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */
26#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */
27#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */
28#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */
29#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */
30#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */
31#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */
32#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
33 FC_REG_RSCN_EVENT | \
34 FC_REG_CT_EVENT | \
35 FC_REG_DUMP_EVENT | \
36 FC_REG_TEMPERATURE_EVENT | \
37 FC_REG_ELS_EVENT | \
38 FC_REG_FABRIC_EVENT | \
39 FC_REG_SCSI_EVENT | \
40 FC_REG_BOARD_EVENT | \
41 FC_REG_ADAPTER_EVENT)
42/* Temperature events */
43#define LPFC_CRIT_TEMP 0x1
44#define LPFC_THRESHOLD_TEMP 0x2
45#define LPFC_NORMAL_TEMP 0x3
46/*
47 * All net link event payloads will begin with and event type
48 * and subcategory. The event type must come first.
49 * The subcategory further defines the data that follows in the rest
50 * of the payload. Each category will have its own unique header plus
51 * any addtional data unique to the subcategory.
52 * The payload sent via the fc transport is one-way driver->application.
53 */
54
55/* els event header */
56struct lpfc_els_event_header {
57 uint32_t event_type;
58 uint32_t subcategory;
59 uint8_t wwpn[8];
60 uint8_t wwnn[8];
61};
62
63/* subcategory codes for FC_REG_ELS_EVENT */
64#define LPFC_EVENT_PLOGI_RCV 0x01
65#define LPFC_EVENT_PRLO_RCV 0x02
66#define LPFC_EVENT_ADISC_RCV 0x04
67#define LPFC_EVENT_LSRJT_RCV 0x08
68
69/* special els lsrjt event */
70struct lpfc_lsrjt_event {
71 struct lpfc_els_event_header header;
72 uint32_t command;
73 uint32_t reason_code;
74 uint32_t explanation;
75};
76
77
78/* fabric event header */
79struct lpfc_fabric_event_header {
80 uint32_t event_type;
81 uint32_t subcategory;
82 uint8_t wwpn[8];
83 uint8_t wwnn[8];
84};
85
86/* subcategory codes for FC_REG_FABRIC_EVENT */
87#define LPFC_EVENT_FABRIC_BUSY 0x01
88#define LPFC_EVENT_PORT_BUSY 0x02
89#define LPFC_EVENT_FCPRDCHKERR 0x04
90
91/* special case fabric fcprdchkerr event */
92struct lpfc_fcprdchkerr_event {
93 struct lpfc_fabric_event_header header;
94 uint32_t lun;
95 uint32_t opcode;
96 uint32_t fcpiparam;
97};
98
99
100/* scsi event header */
101struct lpfc_scsi_event_header {
102 uint32_t event_type;
103 uint32_t subcategory;
104 uint32_t lun;
105 uint8_t wwpn[8];
106 uint8_t wwnn[8];
107};
108
109/* subcategory codes for FC_REG_SCSI_EVENT */
110#define LPFC_EVENT_QFULL 0x0001
111#define LPFC_EVENT_DEVBSY 0x0002
112#define LPFC_EVENT_CHECK_COND 0x0004
113#define LPFC_EVENT_LUNRESET 0x0008
114#define LPFC_EVENT_TGTRESET 0x0010
115#define LPFC_EVENT_BUSRESET 0x0020
116#define LPFC_EVENT_VARQUEDEPTH 0x0040
117
118/* special case scsi varqueuedepth event */
119struct lpfc_scsi_varqueuedepth_event {
120 struct lpfc_scsi_event_header scsi_event;
121 uint32_t oldval;
122 uint32_t newval;
123};
124
125/* special case scsi check condition event */
126struct lpfc_scsi_check_condition_event {
127 struct lpfc_scsi_event_header scsi_event;
128 uint8_t sense_key;
129 uint8_t asc;
130 uint8_t ascq;
131};
132
133/* event codes for FC_REG_BOARD_EVENT */
134#define LPFC_EVENT_PORTINTERR 0x01
135
136/* board event header */
137struct lpfc_board_event_header {
138 uint32_t event_type;
139 uint32_t subcategory;
140};
141
142
143/* event codes for FC_REG_ADAPTER_EVENT */
144#define LPFC_EVENT_ARRIVAL 0x01
145
146/* adapter event header */
147struct lpfc_adapter_event_header {
148 uint32_t event_type;
149 uint32_t subcategory;
150};
151
152
153/* event codes for temp_event */
154#define LPFC_CRIT_TEMP 0x1
155#define LPFC_THRESHOLD_TEMP 0x2
156#define LPFC_NORMAL_TEMP 0x3
157
158struct temp_event {
159 uint32_t event_type;
160 uint32_t event_code;
161 uint32_t data;
162};
163
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6688a8689b56..0c25d97acb42 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -30,6 +30,7 @@
30 30
31#include "lpfc_hw.h" 31#include "lpfc_hw.h"
32#include "lpfc_sli.h" 32#include "lpfc_sli.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_scsi.h" 35#include "lpfc_scsi.h"
35#include "lpfc.h" 36#include "lpfc.h"
@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1003 spin_lock_irq(shost->host_lock); 1004 spin_lock_irq(shost->host_lock);
1004 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1005 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1005 spin_unlock_irq(shost->host_lock); 1006 spin_unlock_irq(shost->host_lock);
1006 1007 if (vport->num_disc_nodes)
1007 if (vport->num_disc_nodes) {
1008 lpfc_more_adisc(vport); 1008 lpfc_more_adisc(vport);
1009 if ((vport->num_disc_nodes == 0) &&
1010 (vport->fc_npr_cnt))
1011 lpfc_els_disc_plogi(vport);
1012 if (vport->num_disc_nodes == 0) {
1013 spin_lock_irq(shost->host_lock);
1014 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1015 spin_unlock_irq(shost->host_lock);
1016 lpfc_can_disctmo(vport);
1017 lpfc_end_rscn(vport);
1018 }
1019 }
1020 } 1009 }
1021 return ndlp->nlp_state; 1010 return ndlp->nlp_state;
1022 } 1011 }
@@ -1865,8 +1854,13 @@ static uint32_t
1865lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1854lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1866 void *arg, uint32_t evt) 1855 void *arg, uint32_t evt)
1867{ 1856{
1857 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1858 if (ndlp->nlp_DID == Fabric_DID) {
1859 spin_lock_irq(shost->host_lock);
1860 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1861 spin_unlock_irq(shost->host_lock);
1862 }
1868 lpfc_unreg_rpi(vport, ndlp); 1863 lpfc_unreg_rpi(vport, ndlp);
1869 /* This routine does nothing, just return the current state */
1870 return ndlp->nlp_state; 1864 return ndlp->nlp_state;
1871} 1865}
1872 1866
@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2155 lpfc_nlp_put(ndlp); 2149 lpfc_nlp_put(ndlp);
2156 } else { 2150 } else {
2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2151 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2158 "0212 DSM out state %d on NPort free\n", rc); 2152 "0213 DSM out state %d on NPort free\n", rc);
2159 2153
2160 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2154 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2161 "DSM out: ste:%d did:x%x flg:x%x", 2155 "DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1bcebbd3dfac..bd1867411821 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -32,6 +32,7 @@
32#include "lpfc_version.h" 32#include "lpfc_version.h"
33#include "lpfc_hw.h" 33#include "lpfc_hw.h"
34#include "lpfc_sli.h" 34#include "lpfc_sli.h"
35#include "lpfc_nl.h"
35#include "lpfc_disc.h" 36#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
37#include "lpfc.h" 38#include "lpfc.h"
@@ -42,6 +43,111 @@
42#define LPFC_RESET_WAIT 2 43#define LPFC_RESET_WAIT 2
43#define LPFC_ABORT_WAIT 2 44#define LPFC_ABORT_WAIT 2
44 45
46/**
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
50 *
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
53 **/
54static void
55lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
56{
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
60 unsigned long flags;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
64 int i;
65
66 if (cmd->result)
67 return;
68
69 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked ||
72 !pnode->lat_data ||
73 (phba->bucket_type == LPFC_NO_BUCKET)) {
74 spin_unlock_irqrestore(shost->host_lock, flags);
75 return;
76 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT)
83 i = LPFC_MAX_BUCKET_COUNT;
84 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base +
87 ((1<<i)*phba->bucket_step)))
88 break;
89 }
90
91 pnode->lat_data[i].cmd_count++;
92 spin_unlock_irqrestore(shost->host_lock, flags);
93}
94
95
96/**
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * event.
99 * @phba: Pointer to HBA context object.
100 * @vport: Pointer to vport object.
101 * @ndlp: Pointer to FC node associated with the target.
102 * @lun: Lun number of the scsi device.
103 * @old_val: Old value of the queue depth.
104 * @new_val: New value of the queue depth.
105 *
106 * This function sends an event to the mgmt application indicating
107 * there is a change in the scsi device queue depth.
108 **/
109static void
110lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
111 struct lpfc_vport *vport,
112 struct lpfc_nodelist *ndlp,
113 uint32_t lun,
114 uint32_t old_val,
115 uint32_t new_val)
116{
117 struct lpfc_fast_path_event *fast_path_evt;
118 unsigned long flags;
119
120 fast_path_evt = lpfc_alloc_fast_evt(phba);
121 if (!fast_path_evt)
122 return;
123
124 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
125 FC_REG_SCSI_EVENT;
126 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
127 LPFC_EVENT_VARQUEDEPTH;
128
129 /* Report all luns with change in queue depth */
130 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
131 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
132 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
133 &ndlp->nlp_portname, sizeof(struct lpfc_name));
134 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
135 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
136 }
137
138 fast_path_evt->un.queue_depth_evt.oldval = old_val;
139 fast_path_evt->un.queue_depth_evt.newval = new_val;
140 fast_path_evt->vport = vport;
141
142 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
143 spin_lock_irqsave(&phba->hbalock, flags);
144 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
145 spin_unlock_irqrestore(&phba->hbalock, flags);
146 lpfc_worker_wake_up(phba);
147
148 return;
149}
150
45/* 151/*
46 * This function is called with no lock held when there is a resource 152 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware. 153 * error in driver or in firmware.
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
117 struct lpfc_vport **vports; 223 struct lpfc_vport **vports;
118 struct Scsi_Host *shost; 224 struct Scsi_Host *shost;
119 struct scsi_device *sdev; 225 struct scsi_device *sdev;
120 unsigned long new_queue_depth; 226 unsigned long new_queue_depth, old_queue_depth;
121 unsigned long num_rsrc_err, num_cmd_success; 227 unsigned long num_rsrc_err, num_cmd_success;
122 int i; 228 int i;
229 struct lpfc_rport_data *rdata;
123 230
124 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 231 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
125 num_cmd_success = atomic_read(&phba->num_cmd_success); 232 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
137 else 244 else
138 new_queue_depth = sdev->queue_depth - 245 new_queue_depth = sdev->queue_depth -
139 new_queue_depth; 246 new_queue_depth;
247 old_queue_depth = sdev->queue_depth;
140 if (sdev->ordered_tags) 248 if (sdev->ordered_tags)
141 scsi_adjust_queue_depth(sdev, 249 scsi_adjust_queue_depth(sdev,
142 MSG_ORDERED_TAG, 250 MSG_ORDERED_TAG,
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
145 scsi_adjust_queue_depth(sdev, 253 scsi_adjust_queue_depth(sdev,
146 MSG_SIMPLE_TAG, 254 MSG_SIMPLE_TAG,
147 new_queue_depth); 255 new_queue_depth);
256 rdata = sdev->hostdata;
257 if (rdata)
258 lpfc_send_sdev_queuedepth_change_event(
259 phba, vports[i],
260 rdata->pnode,
261 sdev->lun, old_queue_depth,
262 new_queue_depth);
148 } 263 }
149 } 264 }
150 lpfc_destroy_vport_work_array(phba, vports); 265 lpfc_destroy_vport_work_array(phba, vports);
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
159 struct Scsi_Host *shost; 274 struct Scsi_Host *shost;
160 struct scsi_device *sdev; 275 struct scsi_device *sdev;
161 int i; 276 int i;
277 struct lpfc_rport_data *rdata;
162 278
163 vports = lpfc_create_vport_work_array(phba); 279 vports = lpfc_create_vport_work_array(phba);
164 if (vports != NULL) 280 if (vports != NULL)
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
176 scsi_adjust_queue_depth(sdev, 292 scsi_adjust_queue_depth(sdev,
177 MSG_SIMPLE_TAG, 293 MSG_SIMPLE_TAG,
178 sdev->queue_depth+1); 294 sdev->queue_depth+1);
295 rdata = sdev->hostdata;
296 if (rdata)
297 lpfc_send_sdev_queuedepth_change_event(
298 phba, vports[i],
299 rdata->pnode,
300 sdev->lun,
301 sdev->queue_depth - 1,
302 sdev->queue_depth);
179 } 303 }
180 } 304 }
181 lpfc_destroy_vport_work_array(phba, vports); 305 lpfc_destroy_vport_work_array(phba, vports);
@@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
183 atomic_set(&phba->num_cmd_success, 0); 307 atomic_set(&phba->num_cmd_success, 0);
184} 308}
185 309
310/**
311 * lpfc_scsi_dev_block: set all scsi hosts to block state.
312 * @phba: Pointer to HBA context object.
313 *
314 * This function walks vport list and set each SCSI host to block state
315 * by invoking fc_remote_port_delete() routine. This function is invoked
316 * with EEH when device's PCI slot has been permanently disabled.
317 **/
318void
319lpfc_scsi_dev_block(struct lpfc_hba *phba)
320{
321 struct lpfc_vport **vports;
322 struct Scsi_Host *shost;
323 struct scsi_device *sdev;
324 struct fc_rport *rport;
325 int i;
326
327 vports = lpfc_create_vport_work_array(phba);
328 if (vports != NULL)
329 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
330 shost = lpfc_shost_from_vport(vports[i]);
331 shost_for_each_device(sdev, shost) {
332 rport = starget_to_rport(scsi_target(sdev));
333 fc_remote_port_delete(rport);
334 }
335 }
336 lpfc_destroy_vport_work_array(phba, vports);
337}
338
186/* 339/*
187 * This routine allocates a scsi buffer, which contains all the necessary 340 * This routine allocates a scsi buffer, which contains all the necessary
188 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 341 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
@@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
198 struct lpfc_scsi_buf *psb; 351 struct lpfc_scsi_buf *psb;
199 struct ulp_bde64 *bpl; 352 struct ulp_bde64 *bpl;
200 IOCB_t *iocb; 353 IOCB_t *iocb;
201 dma_addr_t pdma_phys; 354 dma_addr_t pdma_phys_fcp_cmd;
355 dma_addr_t pdma_phys_fcp_rsp;
356 dma_addr_t pdma_phys_bpl;
202 uint16_t iotag; 357 uint16_t iotag;
203 358
204 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 359 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
238 393
239 /* Initialize local short-hand pointers. */ 394 /* Initialize local short-hand pointers. */
240 bpl = psb->fcp_bpl; 395 bpl = psb->fcp_bpl;
241 pdma_phys = psb->dma_handle; 396 pdma_phys_fcp_cmd = psb->dma_handle;
397 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
398 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
399 sizeof(struct fcp_rsp);
242 400
243 /* 401 /*
244 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 402 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
245 * list bdes. Initialize the first two and leave the rest for 403 * list bdes. Initialize the first two and leave the rest for
246 * queuecommand. 404 * queuecommand.
247 */ 405 */
248 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 406 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
249 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 407 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
250 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 408 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
251 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 409 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
252 bpl->tus.w = le32_to_cpu(bpl->tus.w); 410 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
253 bpl++;
254 411
255 /* Setup the physical region for the FCP RSP */ 412 /* Setup the physical region for the FCP RSP */
256 pdma_phys += sizeof (struct fcp_cmnd); 413 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
257 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 414 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
258 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 415 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
259 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 416 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
260 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 417 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
261 bpl->tus.w = le32_to_cpu(bpl->tus.w);
262 418
263 /* 419 /*
264 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 420 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
265 * initialize it with all known data now. 421 * initialize it with all known data now.
266 */ 422 */
267 pdma_phys += (sizeof (struct fcp_rsp));
268 iocb = &psb->cur_iocbq.iocb; 423 iocb = &psb->cur_iocbq.iocb;
269 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 424 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
270 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 425 if (phba->sli_rev == 3) {
271 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 426 /* fill in immediate fcp command BDE */
272 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 427 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
273 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 428 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
274 iocb->ulpBdeCount = 1; 429 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
430 unsli3.fcp_ext.icd);
431 iocb->un.fcpi64.bdl.addrHigh = 0;
432 iocb->ulpBdeCount = 0;
433 iocb->ulpLe = 0;
434 /* fill in responce BDE */
435 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
436 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
437 sizeof(struct fcp_rsp);
438 iocb->unsli3.fcp_ext.rbde.addrLow =
439 putPaddrLow(pdma_phys_fcp_rsp);
440 iocb->unsli3.fcp_ext.rbde.addrHigh =
441 putPaddrHigh(pdma_phys_fcp_rsp);
442 } else {
443 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
444 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
445 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
446 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
447 iocb->ulpBdeCount = 1;
448 iocb->ulpLe = 1;
449 }
275 iocb->ulpClass = CLASS3; 450 iocb->ulpClass = CLASS3;
276 451
277 return psb; 452 return psb;
@@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
313 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 488 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
314 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 489 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
315 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 490 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
491 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
316 dma_addr_t physaddr; 492 dma_addr_t physaddr;
317 uint32_t i, num_bde = 0; 493 uint32_t num_bde = 0;
318 int nseg, datadir = scsi_cmnd->sc_data_direction; 494 int nseg, datadir = scsi_cmnd->sc_data_direction;
319 495
320 /* 496 /*
@@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
352 * during probe that limits the number of sg elements in any 528 * during probe that limits the number of sg elements in any
353 * single scsi command. Just run through the seg_cnt and format 529 * single scsi command. Just run through the seg_cnt and format
354 * the bde's. 530 * the bde's.
531 * When using SLI-3 the driver will try to fit all the BDEs into
532 * the IOCB. If it can't then the BDEs get added to a BPL as it
533 * does for SLI-2 mode.
355 */ 534 */
356 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { 535 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
357 physaddr = sg_dma_address(sgel); 536 physaddr = sg_dma_address(sgel);
358 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 537 if (phba->sli_rev == 3 &&
359 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 538 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
360 bpl->tus.f.bdeSize = sg_dma_len(sgel); 539 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
361 if (datadir == DMA_TO_DEVICE) 540 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
362 bpl->tus.f.bdeFlags = 0; 541 data_bde->addrLow = putPaddrLow(physaddr);
363 else 542 data_bde->addrHigh = putPaddrHigh(physaddr);
364 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 543 data_bde++;
365 bpl->tus.w = le32_to_cpu(bpl->tus.w); 544 } else {
366 bpl++; 545 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
367 num_bde++; 546 bpl->tus.f.bdeSize = sg_dma_len(sgel);
547 bpl->tus.w = le32_to_cpu(bpl->tus.w);
548 bpl->addrLow =
549 le32_to_cpu(putPaddrLow(physaddr));
550 bpl->addrHigh =
551 le32_to_cpu(putPaddrHigh(physaddr));
552 bpl++;
553 }
368 } 554 }
369 } 555 }
370 556
371 /* 557 /*
372 * Finish initializing those IOCB fields that are dependent on the 558 * Finish initializing those IOCB fields that are dependent on the
373 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 559 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
374 * reinitialized since all iocb memory resources are used many times 560 * explicitly reinitialized and for SLI-3 the extended bde count is
375 * for transmit, receive, and continuation bpl's. 561 * explicitly reinitialized since all iocb memory resources are reused.
376 */ 562 */
377 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 563 if (phba->sli_rev == 3) {
378 iocb_cmd->un.fcpi64.bdl.bdeSize += 564 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
379 (num_bde * sizeof (struct ulp_bde64)); 565 /*
380 iocb_cmd->ulpBdeCount = 1; 566 * The extended IOCB format can only fit 3 BDE or a BPL.
381 iocb_cmd->ulpLe = 1; 567 * This I/O has more than 3 BDE so the 1st data bde will
568 * be a BPL that is filled in here.
569 */
570 physaddr = lpfc_cmd->dma_handle;
571 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
572 data_bde->tus.f.bdeSize = (num_bde *
573 sizeof(struct ulp_bde64));
574 physaddr += (sizeof(struct fcp_cmnd) +
575 sizeof(struct fcp_rsp) +
576 (2 * sizeof(struct ulp_bde64)));
577 data_bde->addrHigh = putPaddrHigh(physaddr);
578 data_bde->addrLow = putPaddrLow(physaddr);
579 /* ebde count includes the responce bde and data bpl */
580 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
581 } else {
582 /* ebde count includes the responce bde and data bdes */
583 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
584 }
585 } else {
586 iocb_cmd->un.fcpi64.bdl.bdeSize =
587 ((num_bde + 2) * sizeof(struct ulp_bde64));
588 }
382 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 589 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
383 return 0; 590 return 0;
384} 591}
385 592
593/**
594 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
595 * @phba: Pointer to hba context object.
596 * @vport: Pointer to vport object.
597 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
598 * @rsp_iocb: Pointer to response iocb object which reported error.
599 *
600 * This function posts an event when there is a SCSI command reporting
601 * error from the scsi device.
602 **/
603static void
604lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
605 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
606 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
607 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
608 uint32_t resp_info = fcprsp->rspStatus2;
609 uint32_t scsi_status = fcprsp->rspStatus3;
610 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
611 struct lpfc_fast_path_event *fast_path_evt = NULL;
612 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
613 unsigned long flags;
614
615 /* If there is queuefull or busy condition send a scsi event */
616 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
617 (cmnd->result == SAM_STAT_BUSY)) {
618 fast_path_evt = lpfc_alloc_fast_evt(phba);
619 if (!fast_path_evt)
620 return;
621 fast_path_evt->un.scsi_evt.event_type =
622 FC_REG_SCSI_EVENT;
623 fast_path_evt->un.scsi_evt.subcategory =
624 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
625 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
626 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
627 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
628 &pnode->nlp_portname, sizeof(struct lpfc_name));
629 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
630 &pnode->nlp_nodename, sizeof(struct lpfc_name));
631 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
632 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
633 fast_path_evt = lpfc_alloc_fast_evt(phba);
634 if (!fast_path_evt)
635 return;
636 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
637 FC_REG_SCSI_EVENT;
638 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
639 LPFC_EVENT_CHECK_COND;
640 fast_path_evt->un.check_cond_evt.scsi_event.lun =
641 cmnd->device->lun;
642 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
643 &pnode->nlp_portname, sizeof(struct lpfc_name));
644 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
645 &pnode->nlp_nodename, sizeof(struct lpfc_name));
646 fast_path_evt->un.check_cond_evt.sense_key =
647 cmnd->sense_buffer[2] & 0xf;
648 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
649 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
650 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
651 fcpi_parm &&
652 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
653 ((scsi_status == SAM_STAT_GOOD) &&
654 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
655 /*
656 * If status is good or resid does not match with fcp_param and
657 * there is valid fcpi_parm, then there is a read_check error
658 */
659 fast_path_evt = lpfc_alloc_fast_evt(phba);
660 if (!fast_path_evt)
661 return;
662 fast_path_evt->un.read_check_error.header.event_type =
663 FC_REG_FABRIC_EVENT;
664 fast_path_evt->un.read_check_error.header.subcategory =
665 LPFC_EVENT_FCPRDCHKERR;
666 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
667 &pnode->nlp_portname, sizeof(struct lpfc_name));
668 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
669 &pnode->nlp_nodename, sizeof(struct lpfc_name));
670 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
671 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
672 fast_path_evt->un.read_check_error.fcpiparam =
673 fcpi_parm;
674 } else
675 return;
676
677 fast_path_evt->vport = vport;
678 spin_lock_irqsave(&phba->hbalock, flags);
679 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 lpfc_worker_wake_up(phba);
682 return;
683}
386static void 684static void
387lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 685lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
388{ 686{
@@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
411 uint32_t rsplen = 0; 709 uint32_t rsplen = 0;
412 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 710 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
413 711
712
414 /* 713 /*
415 * If this is a task management command, there is no 714 * If this is a task management command, there is no
416 * scsi packet associated with this lpfc_cmd. The driver 715 * scsi packet associated with this lpfc_cmd. The driver
@@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
526 825
527 out: 826 out:
528 cmnd->result = ScsiResult(host_status, scsi_status); 827 cmnd->result = ScsiResult(host_status, scsi_status);
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
529} 829}
530 830
531static void 831static void
@@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
542 struct scsi_device *sdev, *tmp_sdev; 842 struct scsi_device *sdev, *tmp_sdev;
543 int depth = 0; 843 int depth = 0;
544 unsigned long flags; 844 unsigned long flags;
845 struct lpfc_fast_path_event *fast_path_evt;
545 846
546 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
547 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 atomic_dec(&pnode->cmd_pending);
548 850
549 if (lpfc_cmd->status) { 851 if (lpfc_cmd->status) {
550 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
570 break; 872 break;
571 case IOSTAT_NPORT_BSY: 873 case IOSTAT_NPORT_BSY:
572 case IOSTAT_FABRIC_BSY: 874 case IOSTAT_FABRIC_BSY:
573 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 875 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
876 fast_path_evt = lpfc_alloc_fast_evt(phba);
877 if (!fast_path_evt)
878 break;
879 fast_path_evt->un.fabric_evt.event_type =
880 FC_REG_FABRIC_EVENT;
881 fast_path_evt->un.fabric_evt.subcategory =
882 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
883 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
884 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
885 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
886 &pnode->nlp_portname,
887 sizeof(struct lpfc_name));
888 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
889 &pnode->nlp_nodename,
890 sizeof(struct lpfc_name));
891 }
892 fast_path_evt->vport = vport;
893 fast_path_evt->work_evt.evt =
894 LPFC_EVT_FASTPATH_MGMT_EVT;
895 spin_lock_irqsave(&phba->hbalock, flags);
896 list_add_tail(&fast_path_evt->work_evt.evt_listp,
897 &phba->work_list);
898 spin_unlock_irqrestore(&phba->hbalock, flags);
899 lpfc_worker_wake_up(phba);
574 break; 900 break;
575 case IOSTAT_LOCAL_REJECT: 901 case IOSTAT_LOCAL_REJECT:
576 if (lpfc_cmd->result == RJT_UNAVAIL_PERM || 902 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
577 lpfc_cmd->result == IOERR_NO_RESOURCES || 903 lpfc_cmd->result == IOERR_NO_RESOURCES ||
578 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 904 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
579 cmd->result = ScsiResult(DID_REQUEUE, 0); 905 cmd->result = ScsiResult(DID_REQUEUE, 0);
580 break; 906 break;
581 } /* else: fall through */ 907 } /* else: fall through */
@@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
586 912
587 if (!pnode || !NLP_CHK_NODE_ACT(pnode) 913 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
588 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 914 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
589 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 915 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
916 SAM_STAT_BUSY);
590 } else { 917 } else {
591 cmd->result = ScsiResult(DID_OK, 0); 918 cmd->result = ScsiResult(DID_OK, 0);
592 } 919 }
@@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
602 scsi_get_resid(cmd)); 929 scsi_get_resid(cmd));
603 } 930 }
604 931
932 lpfc_update_stats(phba, lpfc_cmd);
605 result = cmd->result; 933 result = cmd->result;
606 sdev = cmd->device; 934 sdev = cmd->device;
935 if (vport->cfg_max_scsicmpl_time &&
936 time_after(jiffies, lpfc_cmd->start_time +
937 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 spin_lock_irqsave(sdev->host->host_lock, flags);
939 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
940 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
941 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
942 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
943
944 pnode->last_change_time = jiffies;
945 spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 time_after(jiffies, pnode->last_change_time +
948 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 spin_lock_irqsave(sdev->host->host_lock, flags);
950 pnode->cmd_qdepth += pnode->cmd_qdepth *
951 LPFC_TGTQ_RAMPUP_PCENT / 100;
952 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 pnode->last_change_time = jiffies;
955 spin_unlock_irqrestore(sdev->host->host_lock, flags);
956 }
957
607 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 958 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
608 cmd->scsi_done(cmd); 959 cmd->scsi_done(cmd);
609 960
@@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
647 pnode->last_ramp_up_time = jiffies; 998 pnode->last_ramp_up_time = jiffies;
648 } 999 }
649 } 1000 }
1001 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1002 0xFFFFFFFF,
1003 sdev->queue_depth - 1, sdev->queue_depth);
650 } 1004 }
651 1005
652 /* 1006 /*
@@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
676 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1030 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
677 "0711 detected queue full - lun queue " 1031 "0711 detected queue full - lun queue "
678 "depth adjusted to %d.\n", depth); 1032 "depth adjusted to %d.\n", depth);
1033 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1034 pnode, 0xFFFFFFFF,
1035 depth+1, depth);
679 } 1036 }
680 } 1037 }
681 1038
@@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
692 lpfc_release_scsi_buf(phba, lpfc_cmd); 1049 lpfc_release_scsi_buf(phba, lpfc_cmd);
693} 1050}
694 1051
1052/**
1053 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1054 * @data: A pointer to the immediate command data portion of the IOCB.
1055 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1056 *
1057 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1058 * byte swapping the data to big endian format for transmission on the wire.
1059 **/
1060static void
1061lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1062{
1063 int i, j;
1064 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1065 i += sizeof(uint32_t), j++) {
1066 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1067 }
1068}
1069
695static void 1070static void
696lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1071lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
697 struct lpfc_nodelist *pnode) 1072 struct lpfc_nodelist *pnode)
@@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
758 fcp_cmnd->fcpCntl3 = 0; 1133 fcp_cmnd->fcpCntl3 = 0;
759 phba->fc4ControlRequests++; 1134 phba->fc4ControlRequests++;
760 } 1135 }
761 1136 if (phba->sli_rev == 3)
1137 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
762 /* 1138 /*
763 * Finish initializing those IOCB fields that are independent 1139 * Finish initializing those IOCB fields that are independent
764 * of the scsi_cmnd request_buffer 1140 * of the scsi_cmnd request_buffer
@@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
798 piocb = &piocbq->iocb; 1174 piocb = &piocbq->iocb;
799 1175
800 fcp_cmnd = lpfc_cmd->fcp_cmnd; 1176 fcp_cmnd = lpfc_cmd->fcp_cmnd;
801 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 1177 /* Clear out any old data in the FCP command area */
1178 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
802 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 1180 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
803 1181 if (vport->phba->sli_rev == 3)
1182 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
804 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 1183 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
805
806 piocb->ulpContext = ndlp->nlp_rpi; 1184 piocb->ulpContext = ndlp->nlp_rpi;
807 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 1185 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
808 piocb->ulpFCP2Rcvy = 1; 1186 piocb->ulpFCP2Rcvy = 1;
@@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
967 * transport is still transitioning. 1345 * transport is still transitioning.
968 */ 1346 */
969 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1347 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
970 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 1348 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
971 goto out_fail_command; 1349 goto out_fail_command;
972 } 1350 }
1351 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
1352 goto out_host_busy;
1353
973 lpfc_cmd = lpfc_get_scsi_buf(phba); 1354 lpfc_cmd = lpfc_get_scsi_buf(phba);
974 if (lpfc_cmd == NULL) { 1355 if (lpfc_cmd == NULL) {
975 lpfc_adjust_queue_depth(phba); 1356 lpfc_adjust_queue_depth(phba);
@@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
980 goto out_host_busy; 1361 goto out_host_busy;
981 } 1362 }
982 1363
1364 lpfc_cmd->start_time = jiffies;
983 /* 1365 /*
984 * Store the midlayer's command structure for the completion phase 1366 * Store the midlayer's command structure for the completion phase
985 * and complete the command initialization. 1367 * and complete the command initialization.
@@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
987 lpfc_cmd->pCmd = cmnd; 1369 lpfc_cmd->pCmd = cmnd;
988 lpfc_cmd->rdata = rdata; 1370 lpfc_cmd->rdata = rdata;
989 lpfc_cmd->timeout = 0; 1371 lpfc_cmd->timeout = 0;
1372 lpfc_cmd->start_time = jiffies;
990 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 1373 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
991 cmnd->scsi_done = done; 1374 cmnd->scsi_done = done;
992 1375
@@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
996 1379
997 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 1380 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
998 1381
1382 atomic_inc(&ndlp->cmd_pending);
999 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 1383 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1000 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 1384 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1001 if (err) 1385 if (err)
@@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1010 return 0; 1394 return 0;
1011 1395
1012 out_host_busy_free_buf: 1396 out_host_busy_free_buf:
1397 atomic_dec(&ndlp->cmd_pending);
1013 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1398 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1014 lpfc_release_scsi_buf(phba, lpfc_cmd); 1399 lpfc_release_scsi_buf(phba, lpfc_cmd);
1015 out_host_busy: 1400 out_host_busy:
@@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1145 int ret = SUCCESS; 1530 int ret = SUCCESS;
1146 int status; 1531 int status;
1147 int cnt; 1532 int cnt;
1533 struct lpfc_scsi_event_header scsi_event;
1148 1534
1149 lpfc_block_error_handler(cmnd); 1535 lpfc_block_error_handler(cmnd);
1150 /* 1536 /*
@@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1163 break; 1549 break;
1164 pnode = rdata->pnode; 1550 pnode = rdata->pnode;
1165 } 1551 }
1552
1553 scsi_event.event_type = FC_REG_SCSI_EVENT;
1554 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1555 scsi_event.lun = 0;
1556 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1557 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1558
1559 fc_host_post_vendor_event(shost,
1560 fc_get_event_number(),
1561 sizeof(scsi_event),
1562 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1564
1166 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1167 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1168 "0721 LUN Reset rport " 1567 "0721 LUN Reset rport "
@@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1242 struct lpfc_hba *phba = vport->phba; 1641 struct lpfc_hba *phba = vport->phba;
1243 struct lpfc_nodelist *ndlp = NULL; 1642 struct lpfc_nodelist *ndlp = NULL;
1244 int match; 1643 int match;
1245 int ret = SUCCESS, status, i; 1644 int ret = SUCCESS, status = SUCCESS, i;
1246 int cnt; 1645 int cnt;
1247 struct lpfc_scsi_buf * lpfc_cmd; 1646 struct lpfc_scsi_buf * lpfc_cmd;
1248 unsigned long later; 1647 unsigned long later;
1648 struct lpfc_scsi_event_header scsi_event;
1649
1650 scsi_event.event_type = FC_REG_SCSI_EVENT;
1651 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1652 scsi_event.lun = 0;
1653 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1654 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1655
1656 fc_host_post_vendor_event(shost,
1657 fc_get_event_number(),
1658 sizeof(scsi_event),
1659 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1249 1661
1250 lpfc_block_error_handler(cmnd); 1662 lpfc_block_error_handler(cmnd);
1251 /* 1663 /*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index daba92374985..437f182e2322 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -107,6 +107,10 @@ struct fcp_cmnd {
107 107
108}; 108};
109 109
110struct lpfc_scsicmd_bkt {
111 uint32_t cmd_count;
112};
113
110struct lpfc_scsi_buf { 114struct lpfc_scsi_buf {
111 struct list_head list; 115 struct list_head list;
112 struct scsi_cmnd *pCmd; 116 struct scsi_cmnd *pCmd;
@@ -139,6 +143,7 @@ struct lpfc_scsi_buf {
139 */ 143 */
140 struct lpfc_iocbq cur_iocbq; 144 struct lpfc_iocbq cur_iocbq;
141 wait_queue_head_t *waitq; 145 wait_queue_head_t *waitq;
146 unsigned long start_time;
142}; 147};
143 148
144#define LPFC_SCSI_DMA_EXT_SIZE 264 149#define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 50fe07646738..8ab5babdeebc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -32,6 +32,7 @@
32 32
33#include "lpfc_hw.h" 33#include "lpfc_hw.h"
34#include "lpfc_sli.h" 34#include "lpfc_sli.h"
35#include "lpfc_nl.h"
35#include "lpfc_disc.h" 36#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
37#include "lpfc.h" 38#include "lpfc.h"
@@ -66,10 +67,16 @@ typedef enum _lpfc_iocb_type {
66 LPFC_ABORT_IOCB 67 LPFC_ABORT_IOCB
67} lpfc_iocb_type; 68} lpfc_iocb_type;
68 69
69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer 70/**
70 * to the start of the ring, and the slot number of the 71 * lpfc_cmd_iocb: Get next command iocb entry in the ring.
71 * desired iocb entry, calc a pointer to that entry. 72 * @phba: Pointer to HBA context object.
72 */ 73 * @pring: Pointer to driver SLI ring object.
74 *
75 * This function returns pointer to next command iocb entry
76 * in the command ring. The caller must hold hbalock to prevent
77 * other threads consume the next command iocb.
78 * SLI-2/SLI-3 provide different sized iocbs.
79 **/
73static inline IOCB_t * 80static inline IOCB_t *
74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 81lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75{ 82{
@@ -77,6 +84,16 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
77 pring->cmdidx * phba->iocb_cmd_size); 84 pring->cmdidx * phba->iocb_cmd_size);
78} 85}
79 86
87/**
88 * lpfc_resp_iocb: Get next response iocb entry in the ring.
89 * @phba: Pointer to HBA context object.
90 * @pring: Pointer to driver SLI ring object.
91 *
92 * This function returns pointer to next response iocb entry
93 * in the response ring. The caller must hold hbalock to make sure
94 * that no other thread consume the next response iocb.
95 * SLI-2/SLI-3 provide different sized iocbs.
96 **/
80static inline IOCB_t * 97static inline IOCB_t *
81lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 98lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82{ 99{
@@ -84,6 +101,15 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
84 pring->rspidx * phba->iocb_rsp_size); 101 pring->rspidx * phba->iocb_rsp_size);
85} 102}
86 103
104/**
105 * __lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
106 * @phba: Pointer to HBA context object.
107 *
108 * This function is called with hbalock held. This function
109 * allocates a new driver iocb object from the iocb pool. If the
110 * allocation is successful, it returns pointer to the newly
111 * allocated iocb object else it returns NULL.
112 **/
87static struct lpfc_iocbq * 113static struct lpfc_iocbq *
88__lpfc_sli_get_iocbq(struct lpfc_hba *phba) 114__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
89{ 115{
@@ -94,6 +120,15 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
94 return iocbq; 120 return iocbq;
95} 121}
96 122
123/**
124 * lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
125 * @phba: Pointer to HBA context object.
126 *
127 * This function is called with no lock held. This function
128 * allocates a new driver iocb object from the iocb pool. If the
129 * allocation is successful, it returns pointer to the newly
130 * allocated iocb object else it returns NULL.
131 **/
97struct lpfc_iocbq * 132struct lpfc_iocbq *
98lpfc_sli_get_iocbq(struct lpfc_hba *phba) 133lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99{ 134{
@@ -106,6 +141,16 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
106 return iocbq; 141 return iocbq;
107} 142}
108 143
144/**
145 * __lpfc_sli_release_iocbq: Release iocb to the iocb pool.
146 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object.
148 *
149 * This function is called with hbalock held to release driver
150 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed.
153 **/
109static void 154static void
110__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111{ 156{
@@ -118,6 +163,14 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119} 164}
120 165
166/**
167 * lpfc_sli_release_iocbq: Release iocb to the iocb pool.
168 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object.
170 *
171 * This function is called with no lock held to release the iocb to
172 * iocb pool.
173 **/
121void 174void
122lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 175lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123{ 176{
@@ -131,10 +184,21 @@ lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
131 spin_unlock_irqrestore(&phba->hbalock, iflags); 184 spin_unlock_irqrestore(&phba->hbalock, iflags);
132} 185}
133 186
134/* 187/**
135 * Translate the iocb command to an iocb command type used to decide the final 188 * lpfc_sli_iocb_cmd_type: Get the iocb type.
136 * disposition of each completed IOCB. 189 * @iocb_cmnd : iocb command code.
137 */ 190 *
191 * This function is called by ring event handler function to get the iocb type.
192 * This function translates the iocb command to an iocb command type used to
193 * decide the final disposition of each completed IOCB.
194 * The function returns
195 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
196 * LPFC_SOL_IOCB if it is a solicited iocb completion
197 * LPFC_ABORT_IOCB if it is an abort iocb
198 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
199 *
200 * The caller is not required to hold any lock.
201 **/
138static lpfc_iocb_type 202static lpfc_iocb_type
139lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 203lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140{ 204{
@@ -230,6 +294,17 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
230 return type; 294 return type;
231} 295}
232 296
297/**
298 * lpfc_sli_ring_map: Issue config_ring mbox for all rings.
299 * @phba: Pointer to HBA context object.
300 *
301 * This function is called from SLI initialization code
302 * to configure every ring of the HBA's SLI interface. The
303 * caller is not required to hold any lock. This function issues
304 * a config_ring mailbox command for each ring.
305 * This function returns zero if successful else returns a negative
306 * error code.
307 **/
233static int 308static int
234lpfc_sli_ring_map(struct lpfc_hba *phba) 309lpfc_sli_ring_map(struct lpfc_hba *phba)
235{ 310{
@@ -262,6 +337,18 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
262 return ret; 337 return ret;
263} 338}
264 339
340/**
341 * lpfc_sli_ringtxcmpl_put: Adds new iocb to the txcmplq.
342 * @phba: Pointer to HBA context object.
343 * @pring: Pointer to driver SLI ring object.
344 * @piocb: Pointer to the driver iocb object.
345 *
346 * This function is called with hbalock held. The function adds the
347 * new iocb to txcmplq of the given ring. This function always returns
348 * 0. If this function is called for ELS ring, this function checks if
349 * there is a vport associated with the ELS command. This function also
350 * starts els_tmofunc timer if this is an ELS command.
351 **/
265static int 352static int
266lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 353lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
267 struct lpfc_iocbq *piocb) 354 struct lpfc_iocbq *piocb)
@@ -282,6 +369,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
282 return 0; 369 return 0;
283} 370}
284 371
372/**
373 * lpfc_sli_ringtx_get: Get first element of the txq.
374 * @phba: Pointer to HBA context object.
375 * @pring: Pointer to driver SLI ring object.
376 *
377 * This function is called with hbalock held to get next
378 * iocb in txq of the given ring. If there is any iocb in
379 * the txq, the function returns first iocb in the list after
380 * removing the iocb from the list, else it returns NULL.
381 **/
285static struct lpfc_iocbq * 382static struct lpfc_iocbq *
286lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 383lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
287{ 384{
@@ -293,14 +390,25 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
293 return cmd_iocb; 390 return cmd_iocb;
294} 391}
295 392
393/**
394 * lpfc_sli_next_iocb_slot: Get next iocb slot in the ring.
395 * @phba: Pointer to HBA context object.
396 * @pring: Pointer to driver SLI ring object.
397 *
398 * This function is called with hbalock held and the caller must post the
399 * iocb without releasing the lock. If the caller releases the lock,
400 * iocb slot returned by the function is not guaranteed to be available.
401 * The function returns pointer to the next available iocb slot if there
402 * is available slot in the ring, else it returns NULL.
403 * If the get index of the ring is ahead of the put index, the function
404 * will post an error attention event to the worker thread to take the
405 * HBA to offline state.
406 **/
296static IOCB_t * 407static IOCB_t *
297lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 408lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
298{ 409{
299 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 410 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
300 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
301 &phba->slim2p->mbx.us.s2.port[pring->ringno];
302 uint32_t max_cmd_idx = pring->numCiocb; 411 uint32_t max_cmd_idx = pring->numCiocb;
303
304 if ((pring->next_cmdidx == pring->cmdidx) && 412 if ((pring->next_cmdidx == pring->cmdidx) &&
305 (++pring->next_cmdidx >= max_cmd_idx)) 413 (++pring->next_cmdidx >= max_cmd_idx))
306 pring->next_cmdidx = 0; 414 pring->next_cmdidx = 0;
@@ -336,6 +444,18 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
336 return lpfc_cmd_iocb(phba, pring); 444 return lpfc_cmd_iocb(phba, pring);
337} 445}
338 446
447/**
448 * lpfc_sli_next_iotag: Get an iotag for the iocb.
449 * @phba: Pointer to HBA context object.
450 * @iocbq: Pointer to driver iocb object.
451 *
452 * This function gets an iotag for the iocb. If there is no unused iotag and
453 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
454 * array and assigns a new iotag.
455 * The function returns the allocated iotag if successful, else returns zero.
456 * Zero is not a valid iotag.
457 * The caller is not required to hold any lock.
458 **/
339uint16_t 459uint16_t
340lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 460lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
341{ 461{
@@ -399,6 +519,20 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
399 return 0; 519 return 0;
400} 520}
401 521
522/**
523 * lpfc_sli_submit_iocb: Submit an iocb to the firmware.
524 * @phba: Pointer to HBA context object.
525 * @pring: Pointer to driver SLI ring object.
526 * @iocb: Pointer to iocb slot in the ring.
527 * @nextiocb: Pointer to driver iocb object which need to be
528 * posted to firmware.
529 *
530 * This function is called with hbalock held to post a new iocb to
531 * the firmware. This function copies the new iocb to ring iocb slot and
532 * updates the ring pointers. It adds the new iocb to txcmplq if there is
533 * a completion call back for this iocb else the function will free the
534 * iocb object.
535 **/
402static void 536static void
403lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 537lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
404 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 538 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
@@ -441,6 +575,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
441 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 575 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
442} 576}
443 577
578/**
579 * lpfc_sli_update_full_ring: Update the chip attention register.
580 * @phba: Pointer to HBA context object.
581 * @pring: Pointer to driver SLI ring object.
582 *
583 * The caller is not required to hold any lock for calling this function.
584 * This function updates the chip attention bits for the ring to inform firmware
585 * that there are pending work to be done for this ring and requests an
586 * interrupt when there is space available in the ring. This function is
587 * called when the driver is unable to post more iocbs to the ring due
588 * to unavailability of space in the ring.
589 **/
444static void 590static void
445lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 591lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
446{ 592{
@@ -460,6 +606,15 @@ lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
460 pring->stats.iocb_cmd_full++; 606 pring->stats.iocb_cmd_full++;
461} 607}
462 608
609/**
610 * lpfc_sli_update_ring: Update chip attention register.
611 * @phba: Pointer to HBA context object.
612 * @pring: Pointer to driver SLI ring object.
613 *
614 * This function updates the chip attention register bit for the
615 * given ring to inform HBA that there is more work to be done
616 * in this ring. The caller is not required to hold any lock.
617 **/
463static void 618static void
464lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 619lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
465{ 620{
@@ -468,11 +623,22 @@ lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
468 /* 623 /*
469 * Tell the HBA that there is work to do in this ring. 624 * Tell the HBA that there is work to do in this ring.
470 */ 625 */
471 wmb(); 626 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
472 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 627 wmb();
473 readl(phba->CAregaddr); /* flush */ 628 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
629 readl(phba->CAregaddr); /* flush */
630 }
474} 631}
475 632
633/**
634 * lpfc_sli_resume_iocb: Process iocbs in the txq.
635 * @phba: Pointer to HBA context object.
636 * @pring: Pointer to driver SLI ring object.
637 *
638 * This function is called with hbalock held to post pending iocbs
639 * in the txq to the firmware. This function is called when driver
640 * detects space available in the ring.
641 **/
476static void 642static void
477lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 643lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
478{ 644{
@@ -504,6 +670,16 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
504 return; 670 return;
505} 671}
506 672
673/**
674 * lpfc_sli_next_hbq_slot: Get next hbq entry for the HBQ.
675 * @phba: Pointer to HBA context object.
676 * @hbqno: HBQ number.
677 *
678 * This function is called with hbalock held to get the next
679 * available slot for the given HBQ. If there is free slot
680 * available for the HBQ it will return pointer to the next available
681 * HBQ entry else it will return NULL.
682 **/
507static struct lpfc_hbq_entry * 683static struct lpfc_hbq_entry *
508lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 684lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
509{ 685{
@@ -539,6 +715,15 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
539 hbqp->hbqPutIdx; 715 hbqp->hbqPutIdx;
540} 716}
541 717
718/**
719 * lpfc_sli_hbqbuf_free_all: Free all the hbq buffers.
720 * @phba: Pointer to HBA context object.
721 *
722 * This function is called with no lock held to free all the
723 * hbq buffers while uninitializing the SLI interface. It also
724 * frees the HBQ buffers returned by the firmware but not yet
725 * processed by the upper layers.
726 **/
542void 727void
543lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 728lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
544{ 729{
@@ -584,6 +769,18 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
584 spin_unlock_irqrestore(&phba->hbalock, flags); 769 spin_unlock_irqrestore(&phba->hbalock, flags);
585} 770}
586 771
772/**
773 * lpfc_sli_hbq_to_firmware: Post the hbq buffer to firmware.
774 * @phba: Pointer to HBA context object.
775 * @hbqno: HBQ number.
776 * @hbq_buf: Pointer to HBQ buffer.
777 *
778 * This function is called with the hbalock held to post a
779 * hbq buffer to the firmware. If the function finds an empty
780 * slot in the HBQ, it will post the buffer. The function will return
781 * pointer to the hbq entry if it successfully post the buffer
782 * else it will return NULL.
783 **/
587static struct lpfc_hbq_entry * 784static struct lpfc_hbq_entry *
588lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 785lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
589 struct hbq_dmabuf *hbq_buf) 786 struct hbq_dmabuf *hbq_buf)
@@ -612,6 +809,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
612 return hbqe; 809 return hbqe;
613} 810}
614 811
812/* HBQ for ELS and CT traffic. */
615static struct lpfc_hbq_init lpfc_els_hbq = { 813static struct lpfc_hbq_init lpfc_els_hbq = {
616 .rn = 1, 814 .rn = 1,
617 .entry_count = 200, 815 .entry_count = 200,
@@ -623,6 +821,7 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
623 .add_count = 5, 821 .add_count = 5,
624}; 822};
625 823
824/* HBQ for the extra ring if needed */
626static struct lpfc_hbq_init lpfc_extra_hbq = { 825static struct lpfc_hbq_init lpfc_extra_hbq = {
627 .rn = 1, 826 .rn = 1,
628 .entry_count = 200, 827 .entry_count = 200,
@@ -634,51 +833,81 @@ static struct lpfc_hbq_init lpfc_extra_hbq = {
634 .add_count = 5, 833 .add_count = 5,
635}; 834};
636 835
836/* Array of HBQs */
637struct lpfc_hbq_init *lpfc_hbq_defs[] = { 837struct lpfc_hbq_init *lpfc_hbq_defs[] = {
638 &lpfc_els_hbq, 838 &lpfc_els_hbq,
639 &lpfc_extra_hbq, 839 &lpfc_extra_hbq,
640}; 840};
641 841
842/**
843 * lpfc_sli_hbqbuf_fill_hbqs: Post more hbq buffers to HBQ.
844 * @phba: Pointer to HBA context object.
845 * @hbqno: HBQ number.
846 * @count: Number of HBQ buffers to be posted.
847 *
848 * This function is called with no lock held to post more hbq buffers to the
849 * given HBQ. The function returns the number of HBQ buffers successfully
850 * posted.
851 **/
642static int 852static int
643lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 853lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
644{ 854{
645 uint32_t i, start, end; 855 uint32_t i, posted = 0;
646 unsigned long flags; 856 unsigned long flags;
647 struct hbq_dmabuf *hbq_buffer; 857 struct hbq_dmabuf *hbq_buffer;
648 858 LIST_HEAD(hbq_buf_list);
649 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 859 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
650 return 0; 860 return 0;
651 861
652 start = phba->hbqs[hbqno].buffer_count; 862 if ((phba->hbqs[hbqno].buffer_count + count) >
653 end = count + start; 863 lpfc_hbq_defs[hbqno]->entry_count)
654 if (end > lpfc_hbq_defs[hbqno]->entry_count) 864 count = lpfc_hbq_defs[hbqno]->entry_count -
655 end = lpfc_hbq_defs[hbqno]->entry_count; 865 phba->hbqs[hbqno].buffer_count;
656 866 if (!count)
867 return 0;
868 /* Allocate HBQ entries */
869 for (i = 0; i < count; i++) {
870 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
871 if (!hbq_buffer)
872 break;
873 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
874 }
657 /* Check whether HBQ is still in use */ 875 /* Check whether HBQ is still in use */
658 spin_lock_irqsave(&phba->hbalock, flags); 876 spin_lock_irqsave(&phba->hbalock, flags);
659 if (!phba->hbq_in_use) 877 if (!phba->hbq_in_use)
660 goto out; 878 goto err;
661 879 while (!list_empty(&hbq_buf_list)) {
662 /* Populate HBQ entries */ 880 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
663 for (i = start; i < end; i++) { 881 dbuf.list);
664 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 882 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
665 if (!hbq_buffer) 883 (hbqno << 16));
666 goto err; 884 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
667 hbq_buffer->tag = (i | (hbqno << 16));
668 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
669 phba->hbqs[hbqno].buffer_count++; 885 phba->hbqs[hbqno].buffer_count++;
670 else 886 posted++;
887 } else
671 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 888 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
672 } 889 }
673
674 out:
675 spin_unlock_irqrestore(&phba->hbalock, flags); 890 spin_unlock_irqrestore(&phba->hbalock, flags);
676 return 0; 891 return posted;
677 err: 892err:
678 spin_unlock_irqrestore(&phba->hbalock, flags); 893 spin_unlock_irqrestore(&phba->hbalock, flags);
679 return 1; 894 while (!list_empty(&hbq_buf_list)) {
895 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
896 dbuf.list);
897 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
898 }
899 return 0;
680} 900}
681 901
902/**
903 * lpfc_sli_hbqbuf_add_hbqs: Post more HBQ buffers to firmware.
904 * @phba: Pointer to HBA context object.
905 * @qno: HBQ number.
906 *
907 * This function posts more buffers to the HBQ. This function
908 * is called with no lock held. The function returns the number of HBQ entries
909 * successfully allocated.
910 **/
682int 911int
683lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 912lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
684{ 913{
@@ -686,6 +915,15 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
686 lpfc_hbq_defs[qno]->add_count)); 915 lpfc_hbq_defs[qno]->add_count));
687} 916}
688 917
918/**
919 * lpfc_sli_hbqbuf_init_hbqs: Post initial buffers to the HBQ.
920 * @phba: Pointer to HBA context object.
921 * @qno: HBQ queue number.
922 *
923 * This function is called from SLI initialization code path with
924 * no lock held to post initial HBQ buffers to firmware. The
925 * function returns the number of HBQ entries successfully allocated.
926 **/
689static int 927static int
690lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 928lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
691{ 929{
@@ -693,6 +931,16 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
693 lpfc_hbq_defs[qno]->init_count)); 931 lpfc_hbq_defs[qno]->init_count));
694} 932}
695 933
934/**
935 * lpfc_sli_hbqbuf_find: Find the hbq buffer associated with a tag.
936 * @phba: Pointer to HBA context object.
937 * @tag: Tag of the hbq buffer.
938 *
939 * This function is called with hbalock held. This function searches
940 * for the hbq buffer associated with the given tag in the hbq buffer
941 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
942 * it returns NULL.
943 **/
696static struct hbq_dmabuf * 944static struct hbq_dmabuf *
697lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 945lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
698{ 946{
@@ -716,6 +964,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
716 return NULL; 964 return NULL;
717} 965}
718 966
967/**
968 * lpfc_sli_free_hbq: Give back the hbq buffer to firmware.
969 * @phba: Pointer to HBA context object.
970 * @hbq_buffer: Pointer to HBQ buffer.
971 *
972 * This function is called with hbalock. This function gives back
973 * the hbq buffer to firmware. If the HBQ does not have space to
974 * post the buffer, it will free the buffer.
975 **/
719void 976void
720lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 977lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
721{ 978{
@@ -729,6 +986,15 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
729 } 986 }
730} 987}
731 988
989/**
990 * lpfc_sli_chk_mbx_command: Check if the mailbox is a legitimate mailbox.
991 * @mbxCommand: mailbox command code.
992 *
993 * This function is called by the mailbox event handler function to verify
994 * that the completed mailbox command is a legitimate mailbox command. If the
995 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
996 * and the mailbox event handler will take the HBA offline.
997 **/
732static int 998static int
733lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 999lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
734{ 1000{
@@ -785,6 +1051,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
785 case MBX_REG_VPI: 1051 case MBX_REG_VPI:
786 case MBX_UNREG_VPI: 1052 case MBX_UNREG_VPI:
787 case MBX_HEARTBEAT: 1053 case MBX_HEARTBEAT:
1054 case MBX_PORT_CAPABILITIES:
1055 case MBX_PORT_IOV_CONTROL:
788 ret = mbxCommand; 1056 ret = mbxCommand;
789 break; 1057 break;
790 default: 1058 default:
@@ -793,6 +1061,19 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
793 } 1061 }
794 return ret; 1062 return ret;
795} 1063}
1064
1065/**
1066 * lpfc_sli_wake_mbox_wait: Completion handler for mbox issued from
1067 * lpfc_sli_issue_mbox_wait.
1068 * @phba: Pointer to HBA context object.
1069 * @pmboxq: Pointer to mailbox command.
1070 *
1071 * This is completion handler function for mailbox commands issued from
1072 * lpfc_sli_issue_mbox_wait function. This function is called by the
1073 * mailbox event handler function with no lock held. This function
1074 * will wake up thread waiting on the wait queue pointed by context1
1075 * of the mailbox.
1076 **/
796static void 1077static void
797lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1078lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
798{ 1079{
@@ -812,6 +1093,17 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
812 return; 1093 return;
813} 1094}
814 1095
1096
1097/**
1098 * lpfc_sli_def_mbox_cmpl: Default mailbox completion handler.
1099 * @phba: Pointer to HBA context object.
1100 * @pmb: Pointer to mailbox object.
1101 *
1102 * This function is the default mailbox completion handler. It
1103 * frees the memory resources associated with the completed mailbox
1104 * command. If the completed command is a REG_LOGIN mailbox command,
1105 * this function will issue a UREG_LOGIN to re-claim the RPI.
1106 **/
815void 1107void
816lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1108lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
817{ 1109{
@@ -846,6 +1138,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
846 return; 1138 return;
847} 1139}
848 1140
1141/**
1142 * lpfc_sli_handle_mb_event: Handle mailbox completions from firmware.
1143 * @phba: Pointer to HBA context object.
1144 *
1145 * This function is called with no lock held. This function processes all
1146 * the completed mailbox commands and gives it to upper layers. The interrupt
1147 * service routine processes mailbox completion interrupt and adds completed
1148 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1149 * Worker thread call lpfc_sli_handle_mb_event, which will return the
1150 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1151 * function returns the mailbox commands to the upper layer by calling the
1152 * completion handler function of each mailbox.
1153 **/
849int 1154int
850lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 1155lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
851{ 1156{
@@ -953,6 +1258,18 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
953 return 0; 1258 return 0;
954} 1259}
955 1260
1261/**
1262 * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
1263 * @phba: Pointer to HBA context object.
1264 * @tag: Tag for the HBQ buffer.
1265 *
1266 * This function is called from unsolicited event handler code path to get the
1267 * HBQ buffer associated with an unsolicited iocb. This function is called with
1268 * no lock held. It returns the buffer associated with the given tag and posts
1269 * another buffer to the firmware. Note that the new buffer must be allocated
1270 * before taking the hbalock and that the hba lock must be held until it is
1271 * finished with the hbq entry swap.
1272 **/
956static struct lpfc_dmabuf * 1273static struct lpfc_dmabuf *
957lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) 1274lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
958{ 1275{
@@ -962,22 +1279,28 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
962 dma_addr_t phys; /* mapped address */ 1279 dma_addr_t phys; /* mapped address */
963 unsigned long flags; 1280 unsigned long flags;
964 1281
1282 hbqno = tag >> 16;
1283 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
965 /* Check whether HBQ is still in use */ 1284 /* Check whether HBQ is still in use */
966 spin_lock_irqsave(&phba->hbalock, flags); 1285 spin_lock_irqsave(&phba->hbalock, flags);
967 if (!phba->hbq_in_use) { 1286 if (!phba->hbq_in_use) {
1287 if (new_hbq_entry)
1288 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1289 new_hbq_entry);
968 spin_unlock_irqrestore(&phba->hbalock, flags); 1290 spin_unlock_irqrestore(&phba->hbalock, flags);
969 return NULL; 1291 return NULL;
970 } 1292 }
971 1293
972 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 1294 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
973 if (hbq_entry == NULL) { 1295 if (hbq_entry == NULL) {
1296 if (new_hbq_entry)
1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1298 new_hbq_entry);
974 spin_unlock_irqrestore(&phba->hbalock, flags); 1299 spin_unlock_irqrestore(&phba->hbalock, flags);
975 return NULL; 1300 return NULL;
976 } 1301 }
977 list_del(&hbq_entry->dbuf.list); 1302 list_del(&hbq_entry->dbuf.list);
978 1303
979 hbqno = tag >> 16;
980 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
981 if (new_hbq_entry == NULL) { 1304 if (new_hbq_entry == NULL) {
982 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 1305 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
983 spin_unlock_irqrestore(&phba->hbalock, flags); 1306 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -997,6 +1320,18 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
997 return &new_hbq_entry->dbuf; 1320 return &new_hbq_entry->dbuf;
998} 1321}
999 1322
1323/**
1324 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
1325 * @phba: Pointer to HBA context object.
1326 * @pring: Pointer to driver SLI ring object.
1327 * @tag: buffer tag.
1328 *
1329 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1330 * is set in the tag the buffer is posted for a particular exchange,
1331 * the function will return the buffer without replacing the buffer.
1332 * If the buffer is for unsolicited ELS or CT traffic, this function
1333 * returns the buffer and also posts another buffer to the firmware.
1334 **/
1000static struct lpfc_dmabuf * 1335static struct lpfc_dmabuf *
1001lpfc_sli_get_buff(struct lpfc_hba *phba, 1336lpfc_sli_get_buff(struct lpfc_hba *phba,
1002 struct lpfc_sli_ring *pring, 1337 struct lpfc_sli_ring *pring,
@@ -1008,6 +1343,21 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1008 return lpfc_sli_replace_hbqbuff(phba, tag); 1343 return lpfc_sli_replace_hbqbuff(phba, tag);
1009} 1344}
1010 1345
1346
1347/**
1348 * lpfc_sli_process_unsol_iocb: Unsolicited iocb handler.
1349 * @phba: Pointer to HBA context object.
1350 * @pring: Pointer to driver SLI ring object.
1351 * @saveq: Pointer to the unsolicited iocb.
1352 *
1353 * This function is called with no lock held by the ring event handler
1354 * when there is an unsolicited iocb posted to the response ring by the
1355 * firmware. This function gets the buffer associated with the iocbs
1356 * and calls the event handler for the ring. This function handles both
1357 * qring buffers and hbq buffers.
1358 * When the function returns 1 the caller can free the iocb object otherwise
1359 * upper layer functions will free the iocb objects.
1360 **/
1011static int 1361static int
1012lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1362lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1013 struct lpfc_iocbq *saveq) 1363 struct lpfc_iocbq *saveq)
@@ -1192,6 +1542,18 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1192 return 1; 1542 return 1;
1193} 1543}
1194 1544
1545/**
1546 * lpfc_sli_iocbq_lookup: Find command iocb for the given response iocb.
1547 * @phba: Pointer to HBA context object.
1548 * @pring: Pointer to driver SLI ring object.
1549 * @prspiocb: Pointer to response iocb object.
1550 *
1551 * This function looks up the iocb_lookup table to get the command iocb
1552 * corresponding to the given response iocb using the iotag of the
1553 * response iocb. This function is called with the hbalock held.
1554 * This function returns the command iocb object if it finds the command
1555 * iocb else returns NULL.
1556 **/
1195static struct lpfc_iocbq * 1557static struct lpfc_iocbq *
1196lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 1558lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1197 struct lpfc_sli_ring *pring, 1559 struct lpfc_sli_ring *pring,
@@ -1217,6 +1579,23 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1217 return NULL; 1579 return NULL;
1218} 1580}
1219 1581
1582/**
1583 * lpfc_sli_process_sol_iocb: process solicited iocb completion.
1584 * @phba: Pointer to HBA context object.
1585 * @pring: Pointer to driver SLI ring object.
1586 * @saveq: Pointer to the response iocb to be processed.
1587 *
1588 * This function is called by the ring event handler for non-fcp
1589 * rings when there is a new response iocb in the response ring.
1590 * The caller is not required to hold any locks. This function
1591 * gets the command iocb associated with the response iocb and
1592 * calls the completion handler for the command iocb. If there
1593 * is no completion handler, the function will free the resources
1594 * associated with command iocb. If the response iocb is for
1595 * an already aborted command iocb, the status of the completion
1596 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
1597 * This function always returns 1.
1598 **/
1220static int 1599static int
1221lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1600lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1222 struct lpfc_iocbq *saveq) 1601 struct lpfc_iocbq *saveq)
@@ -1233,6 +1612,17 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1233 if (cmdiocbp) { 1612 if (cmdiocbp) {
1234 if (cmdiocbp->iocb_cmpl) { 1613 if (cmdiocbp->iocb_cmpl) {
1235 /* 1614 /*
1615 * If an ELS command failed send an event to mgmt
1616 * application.
1617 */
1618 if (saveq->iocb.ulpStatus &&
1619 (pring->ringno == LPFC_ELS_RING) &&
1620 (cmdiocbp->iocb.ulpCommand ==
1621 CMD_ELS_REQUEST64_CR))
1622 lpfc_send_els_failure_event(phba,
1623 cmdiocbp, saveq);
1624
1625 /*
1236 * Post all ELS completions to the worker thread. 1626 * Post all ELS completions to the worker thread.
1237 * All other are passed to the completion callback. 1627 * All other are passed to the completion callback.
1238 */ 1628 */
@@ -1282,12 +1672,20 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1282 return rc; 1672 return rc;
1283} 1673}
1284 1674
1675/**
1676 * lpfc_sli_rsp_pointers_error: Response ring pointer error handler.
1677 * @phba: Pointer to HBA context object.
1678 * @pring: Pointer to driver SLI ring object.
1679 *
1680 * This function is called from the iocb ring event handlers when
1681 * put pointer is ahead of the get pointer for a ring. This function signal
1682 * an error attention condition to the worker thread and the worker
1683 * thread will transition the HBA to offline state.
1684 **/
1285static void 1685static void
1286lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1686lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1287{ 1687{
1288 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1688 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1289 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1290 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1291 /* 1689 /*
1292 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1690 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1293 * rsp ring <portRspMax> 1691 * rsp ring <portRspMax>
@@ -1312,6 +1710,51 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1312 return; 1710 return;
1313} 1711}
1314 1712
1713/**
1714 * lpfc_poll_eratt: Error attention polling timer timeout handler.
1715 * @ptr: Pointer to address of HBA context object.
1716 *
1717 * This function is invoked by the Error Attention polling timer when the
1718 * timer times out. It will check the SLI Error Attention register for
1719 * possible attention events. If so, it will post an Error Attention event
1720 * and wake up worker thread to process it. Otherwise, it will set up the
1721 * Error Attention polling timer for the next poll.
1722 **/
1723void lpfc_poll_eratt(unsigned long ptr)
1724{
1725 struct lpfc_hba *phba;
1726 uint32_t eratt = 0;
1727
1728 phba = (struct lpfc_hba *)ptr;
1729
1730 /* Check chip HA register for error event */
1731 eratt = lpfc_sli_check_eratt(phba);
1732
1733 if (eratt)
1734 /* Tell the worker thread there is work to do */
1735 lpfc_worker_wake_up(phba);
1736 else
1737 /* Restart the timer for next eratt poll */
1738 mod_timer(&phba->eratt_poll, jiffies +
1739 HZ * LPFC_ERATT_POLL_INTERVAL);
1740 return;
1741}
1742
1743/**
1744 * lpfc_sli_poll_fcp_ring: Handle FCP ring completion in polling mode.
1745 * @phba: Pointer to HBA context object.
1746 *
1747 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
1748 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
1749 * is enabled.
1750 *
1751 * The caller does not hold any lock.
1752 * The function processes each response iocb in the response ring until it
1753 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
1754 * LE bit set. The function will call the completion handler of the command iocb
1755 * if the response iocb indicates a completion for a command iocb or it is
1756 * an abort completion.
1757 **/
1315void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) 1758void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1316{ 1759{
1317 struct lpfc_sli *psli = &phba->sli; 1760 struct lpfc_sli *psli = &phba->sli;
@@ -1320,7 +1763,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1320 IOCB_t *entry = NULL; 1763 IOCB_t *entry = NULL;
1321 struct lpfc_iocbq *cmdiocbq = NULL; 1764 struct lpfc_iocbq *cmdiocbq = NULL;
1322 struct lpfc_iocbq rspiocbq; 1765 struct lpfc_iocbq rspiocbq;
1323 struct lpfc_pgp *pgp; 1766 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1324 uint32_t status; 1767 uint32_t status;
1325 uint32_t portRspPut, portRspMax; 1768 uint32_t portRspPut, portRspMax;
1326 int type; 1769 int type;
@@ -1330,11 +1773,6 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1330 1773
1331 pring->stats.iocb_event++; 1774 pring->stats.iocb_event++;
1332 1775
1333 pgp = (phba->sli_rev == 3) ?
1334 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1335 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1336
1337
1338 /* 1776 /*
1339 * The next available response entry should never exceed the maximum 1777 * The next available response entry should never exceed the maximum
1340 * entries. If it does, treat it as an adapter hardware error. 1778 * entries. If it does, treat it as an adapter hardware error.
@@ -1372,8 +1810,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1372 irsp->un.ulpWord[3], 1810 irsp->un.ulpWord[3],
1373 irsp->un.ulpWord[4], 1811 irsp->un.ulpWord[4],
1374 irsp->un.ulpWord[5], 1812 irsp->un.ulpWord[5],
1375 *(((uint32_t *) irsp) + 6), 1813 *(uint32_t *)&irsp->un1,
1376 *(((uint32_t *) irsp) + 7)); 1814 *((uint32_t *)&irsp->un1 + 1));
1377 } 1815 }
1378 1816
1379 switch (type) { 1817 switch (type) {
@@ -1465,17 +1903,28 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1465 return; 1903 return;
1466} 1904}
1467 1905
1468/* 1906/**
1907 * lpfc_sli_handle_fast_ring_event: Handle ring events on FCP ring.
1908 * @phba: Pointer to HBA context object.
1909 * @pring: Pointer to driver SLI ring object.
1910 * @mask: Host attention register mask for this ring.
1911 *
1912 * This function is called from the interrupt context when there is a ring
1913 * event for the fcp ring. The caller does not hold any lock.
1914 * The function processes each response iocb in the response ring until it
1915 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
1916 * LE bit set. The function will call the completion handler of the command iocb
1917 * if the response iocb indicates a completion for a command iocb or it is
1918 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
1919 * function if this is an unsolicited iocb.
1469 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1920 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1470 * to check it explicitly. 1921 * to check it explicitly. This function always returns 1.
1471 */ 1922 **/
1472static int 1923static int
1473lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 1924lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1474 struct lpfc_sli_ring *pring, uint32_t mask) 1925 struct lpfc_sli_ring *pring, uint32_t mask)
1475{ 1926{
1476 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1927 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1477 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1478 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1479 IOCB_t *irsp = NULL; 1928 IOCB_t *irsp = NULL;
1480 IOCB_t *entry = NULL; 1929 IOCB_t *entry = NULL;
1481 struct lpfc_iocbq *cmdiocbq = NULL; 1930 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1548,8 +1997,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1548 irsp->un.ulpWord[3], 1997 irsp->un.ulpWord[3],
1549 irsp->un.ulpWord[4], 1998 irsp->un.ulpWord[4],
1550 irsp->un.ulpWord[5], 1999 irsp->un.ulpWord[5],
1551 *(((uint32_t *) irsp) + 6), 2000 *(uint32_t *)&irsp->un1,
1552 *(((uint32_t *) irsp) + 7)); 2001 *((uint32_t *)&irsp->un1 + 1));
1553 } 2002 }
1554 2003
1555 switch (type) { 2004 switch (type) {
@@ -1646,13 +2095,28 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1646 return rc; 2095 return rc;
1647} 2096}
1648 2097
2098/**
2099 * lpfc_sli_handle_slow_ring_event: Handle ring events for non-FCP rings.
2100 * @phba: Pointer to HBA context object.
2101 * @pring: Pointer to driver SLI ring object.
2102 * @mask: Host attention register mask for this ring.
2103 *
2104 * This function is called from the worker thread when there is a ring
2105 * event for non-fcp rings. The caller does not hold any lock .
2106 * The function processes each response iocb in the response ring until it
2107 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2108 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2109 * response iocb indicates a completion of a command iocb. The function
2110 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2111 * iocb. The function frees the resources or calls the completion handler if
2112 * this iocb is an abort completion. The function returns 0 when the allocated
2113 * iocbs are not freed, otherwise returns 1.
2114 **/
1649int 2115int
1650lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2116lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1651 struct lpfc_sli_ring *pring, uint32_t mask) 2117 struct lpfc_sli_ring *pring, uint32_t mask)
1652{ 2118{
1653 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 2119 struct lpfc_pgp *pgp;
1654 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1655 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1656 IOCB_t *entry; 2120 IOCB_t *entry;
1657 IOCB_t *irsp = NULL; 2121 IOCB_t *irsp = NULL;
1658 struct lpfc_iocbq *rspiocbp = NULL; 2122 struct lpfc_iocbq *rspiocbp = NULL;
@@ -1666,6 +2130,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1666 int rc = 1; 2130 int rc = 1;
1667 unsigned long iflag; 2131 unsigned long iflag;
1668 2132
2133 pgp = &phba->port_gp[pring->ringno];
1669 spin_lock_irqsave(&phba->hbalock, iflag); 2134 spin_lock_irqsave(&phba->hbalock, iflag);
1670 pring->stats.iocb_event++; 2135 pring->stats.iocb_event++;
1671 2136
@@ -1904,6 +2369,16 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1904 return rc; 2369 return rc;
1905} 2370}
1906 2371
2372/**
2373 * lpfc_sli_abort_iocb_ring: Abort all iocbs in the ring.
2374 * @phba: Pointer to HBA context object.
2375 * @pring: Pointer to driver SLI ring object.
2376 *
2377 * This function aborts all iocbs in the given ring and frees all the iocb
2378 * objects in txq. This function issues an abort iocb for all the iocb commands
2379 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
2380 * the return of this function. The caller is not required to hold any locks.
2381 **/
1907void 2382void
1908lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2383lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1909{ 2384{
@@ -1943,6 +2418,83 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1943 } 2418 }
1944} 2419}
1945 2420
2421/**
2422 * lpfc_sli_flush_fcp_rings: flush all iocbs in the fcp ring.
2423 * @phba: Pointer to HBA context object.
2424 *
2425 * This function flushes all iocbs in the fcp ring and frees all the iocb
2426 * objects in txq and txcmplq. This function will not issue abort iocbs
2427 * for all the iocb commands in txcmplq, they will just be returned with
2428 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2429 * slot has been permanently disabled.
2430 **/
2431void
2432lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2433{
2434 LIST_HEAD(txq);
2435 LIST_HEAD(txcmplq);
2436 struct lpfc_iocbq *iocb;
2437 IOCB_t *cmd = NULL;
2438 struct lpfc_sli *psli = &phba->sli;
2439 struct lpfc_sli_ring *pring;
2440
2441 /* Currently, only one fcp ring */
2442 pring = &psli->ring[psli->fcp_ring];
2443
2444 spin_lock_irq(&phba->hbalock);
2445 /* Retrieve everything on txq */
2446 list_splice_init(&pring->txq, &txq);
2447 pring->txq_cnt = 0;
2448
2449 /* Retrieve everything on the txcmplq */
2450 list_splice_init(&pring->txcmplq, &txcmplq);
2451 pring->txcmplq_cnt = 0;
2452 spin_unlock_irq(&phba->hbalock);
2453
2454 /* Flush the txq */
2455 while (!list_empty(&txq)) {
2456 iocb = list_get_first(&txq, struct lpfc_iocbq, list);
2457 cmd = &iocb->iocb;
2458 list_del_init(&iocb->list);
2459
2460 if (!iocb->iocb_cmpl)
2461 lpfc_sli_release_iocbq(phba, iocb);
2462 else {
2463 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2464 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2465 (iocb->iocb_cmpl) (phba, iocb, iocb);
2466 }
2467 }
2468
2469 /* Flush the txcmpq */
2470 while (!list_empty(&txcmplq)) {
2471 iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list);
2472 cmd = &iocb->iocb;
2473 list_del_init(&iocb->list);
2474
2475 if (!iocb->iocb_cmpl)
2476 lpfc_sli_release_iocbq(phba, iocb);
2477 else {
2478 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2479 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2480 (iocb->iocb_cmpl) (phba, iocb, iocb);
2481 }
2482 }
2483}
2484
2485/**
2486 * lpfc_sli_brdready: Check for host status bits.
2487 * @phba: Pointer to HBA context object.
2488 * @mask: Bit mask to be checked.
2489 *
2490 * This function reads the host status register and compares
2491 * with the provided bit mask to check if HBA completed
2492 * the restart. This function will wait in a loop for the
2493 * HBA to complete restart. If the HBA does not restart within
2494 * 15 iterations, the function will reset the HBA again. The
2495 * function returns 1 when HBA fail to restart otherwise returns
2496 * zero.
2497 **/
1946int 2498int
1947lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 2499lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1948{ 2500{
@@ -1990,6 +2542,13 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1990 2542
1991#define BARRIER_TEST_PATTERN (0xdeadbeef) 2543#define BARRIER_TEST_PATTERN (0xdeadbeef)
1992 2544
2545/**
2546 * lpfc_reset_barrier: Make HBA ready for HBA reset.
2547 * @phba: Pointer to HBA context object.
2548 *
2549 * This function is called before resetting an HBA. This
2550 * function requests HBA to quiesce DMAs before a reset.
2551 **/
1993void lpfc_reset_barrier(struct lpfc_hba *phba) 2552void lpfc_reset_barrier(struct lpfc_hba *phba)
1994{ 2553{
1995 uint32_t __iomem *resp_buf; 2554 uint32_t __iomem *resp_buf;
@@ -2063,6 +2622,17 @@ restore_hc:
2063 readl(phba->HCregaddr); /* flush */ 2622 readl(phba->HCregaddr); /* flush */
2064} 2623}
2065 2624
2625/**
2626 * lpfc_sli_brdkill: Issue a kill_board mailbox command.
2627 * @phba: Pointer to HBA context object.
2628 *
2629 * This function issues a kill_board mailbox command and waits for
2630 * the error attention interrupt. This function is called for stopping
2631 * the firmware processing. The caller is not required to hold any
2632 * locks. This function calls lpfc_hba_down_post function to free
2633 * any pending commands after the kill. The function will return 1 when it
2634 * fails to kill the board else will return 0.
2635 **/
2066int 2636int
2067lpfc_sli_brdkill(struct lpfc_hba *phba) 2637lpfc_sli_brdkill(struct lpfc_hba *phba)
2068{ 2638{
@@ -2139,6 +2709,17 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2139 return ha_copy & HA_ERATT ? 0 : 1; 2709 return ha_copy & HA_ERATT ? 0 : 1;
2140} 2710}
2141 2711
2712/**
2713 * lpfc_sli_brdreset: Reset the HBA.
2714 * @phba: Pointer to HBA context object.
2715 *
2716 * This function resets the HBA by writing HC_INITFF to the control
2717 * register. After the HBA resets, this function resets all the iocb ring
2718 * indices. This function disables PCI layer parity checking during
2719 * the reset.
2720 * This function returns 0 always.
2721 * The caller is not required to hold any locks.
2722 **/
2142int 2723int
2143lpfc_sli_brdreset(struct lpfc_hba *phba) 2724lpfc_sli_brdreset(struct lpfc_hba *phba)
2144{ 2725{
@@ -2191,6 +2772,19 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2191 return 0; 2772 return 0;
2192} 2773}
2193 2774
2775/**
2776 * lpfc_sli_brdrestart: Restart the HBA.
2777 * @phba: Pointer to HBA context object.
2778 *
2779 * This function is called in the SLI initialization code path to
2780 * restart the HBA. The caller is not required to hold any lock.
2781 * This function writes MBX_RESTART mailbox command to the SLIM and
2782 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
2783 * function to free any pending commands. The function enables
2784 * POST only during the first initialization. The function returns zero.
2785 * The function does not guarantee completion of MBX_RESTART mailbox
2786 * command before the return of this function.
2787 **/
2194int 2788int
2195lpfc_sli_brdrestart(struct lpfc_hba *phba) 2789lpfc_sli_brdrestart(struct lpfc_hba *phba)
2196{ 2790{
@@ -2251,6 +2845,16 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2251 return 0; 2845 return 0;
2252} 2846}
2253 2847
2848/**
2849 * lpfc_sli_chipset_init: Wait for the restart of the HBA after a restart.
2850 * @phba: Pointer to HBA context object.
2851 *
2852 * This function is called after a HBA restart to wait for successful
2853 * restart of the HBA. Successful restart of the HBA is indicated by
2854 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
2855 * iteration, the function will restart the HBA again. The function returns
2856 * zero if HBA successfully restarted else returns negative error code.
2857 **/
2254static int 2858static int
2255lpfc_sli_chipset_init(struct lpfc_hba *phba) 2859lpfc_sli_chipset_init(struct lpfc_hba *phba)
2256{ 2860{
@@ -2336,12 +2940,25 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2336 return 0; 2940 return 0;
2337} 2941}
2338 2942
2943/**
2944 * lpfc_sli_hbq_count: Get the number of HBQs to be configured.
2945 *
2946 * This function calculates and returns the number of HBQs required to be
2947 * configured.
2948 **/
2339int 2949int
2340lpfc_sli_hbq_count(void) 2950lpfc_sli_hbq_count(void)
2341{ 2951{
2342 return ARRAY_SIZE(lpfc_hbq_defs); 2952 return ARRAY_SIZE(lpfc_hbq_defs);
2343} 2953}
2344 2954
2955/**
2956 * lpfc_sli_hbq_entry_count: Calculate total number of hbq entries.
2957 *
2958 * This function adds the number of hbq entries in every HBQ to get
2959 * the total number of hbq entries required for the HBA and returns
2960 * the total count.
2961 **/
2345static int 2962static int
2346lpfc_sli_hbq_entry_count(void) 2963lpfc_sli_hbq_entry_count(void)
2347{ 2964{
@@ -2354,12 +2971,27 @@ lpfc_sli_hbq_entry_count(void)
2354 return count; 2971 return count;
2355} 2972}
2356 2973
2974/**
2975 * lpfc_sli_hbq_size: Calculate memory required for all hbq entries.
2976 *
2977 * This function calculates amount of memory required for all hbq entries
2978 * to be configured and returns the total memory required.
2979 **/
2357int 2980int
2358lpfc_sli_hbq_size(void) 2981lpfc_sli_hbq_size(void)
2359{ 2982{
2360 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 2983 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2361} 2984}
2362 2985
2986/**
2987 * lpfc_sli_hbq_setup: configure and initialize HBQs.
2988 * @phba: Pointer to HBA context object.
2989 *
2990 * This function is called during the SLI initialization to configure
2991 * all the HBQs and post buffers to the HBQ. The caller is not
2992 * required to hold any locks. This function will return zero if successful
2993 * else it will return negative error code.
2994 **/
2363static int 2995static int
2364lpfc_sli_hbq_setup(struct lpfc_hba *phba) 2996lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2365{ 2997{
@@ -2415,15 +3047,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2415 mempool_free(pmb, phba->mbox_mem_pool); 3047 mempool_free(pmb, phba->mbox_mem_pool);
2416 3048
2417 /* Initially populate or replenish the HBQs */ 3049 /* Initially populate or replenish the HBQs */
2418 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 3050 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
2419 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) 3051 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
2420 return -ENOMEM;
2421 }
2422 return 0; 3052 return 0;
2423} 3053}
2424 3054
2425static int 3055/**
2426lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) 3056 * lpfc_sli_config_port: Issue config port mailbox command.
3057 * @phba: Pointer to HBA context object.
3058 * @sli_mode: sli mode - 2/3
3059 *
3060 * This function is called by the sli intialization code path
3061 * to issue config_port mailbox command. This function restarts the
3062 * HBA firmware and issues a config_port mailbox command to configure
3063 * the SLI interface in the sli mode specified by sli_mode
3064 * variable. The caller is not required to hold any locks.
3065 * The function returns 0 if successful, else returns negative error
3066 * code.
3067 **/
3068int
3069lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
2427{ 3070{
2428 LPFC_MBOXQ_t *pmb; 3071 LPFC_MBOXQ_t *pmb;
2429 uint32_t resetcount = 0, rc = 0, done = 0; 3072 uint32_t resetcount = 0, rc = 0, done = 0;
@@ -2460,13 +3103,15 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2460 if (rc == -ERESTART) { 3103 if (rc == -ERESTART) {
2461 phba->link_state = LPFC_LINK_UNKNOWN; 3104 phba->link_state = LPFC_LINK_UNKNOWN;
2462 continue; 3105 continue;
2463 } else if (rc) { 3106 } else if (rc)
2464 break; 3107 break;
2465 }
2466
2467 phba->link_state = LPFC_INIT_MBX_CMDS; 3108 phba->link_state = LPFC_INIT_MBX_CMDS;
2468 lpfc_config_port(phba, pmb); 3109 lpfc_config_port(phba, pmb);
2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 3110 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
3111 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3112 LPFC_SLI3_HBQ_ENABLED |
3113 LPFC_SLI3_CRP_ENABLED |
3114 LPFC_SLI3_INB_ENABLED);
2470 if (rc != MBX_SUCCESS) { 3115 if (rc != MBX_SUCCESS) {
2471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2472 "0442 Adapter failed to init, mbxCmd x%x " 3117 "0442 Adapter failed to init, mbxCmd x%x "
@@ -2476,30 +3121,64 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2476 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3121 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2477 spin_unlock_irq(&phba->hbalock); 3122 spin_unlock_irq(&phba->hbalock);
2478 rc = -ENXIO; 3123 rc = -ENXIO;
2479 } else { 3124 } else
2480 done = 1; 3125 done = 1;
2481 phba->max_vpi = (phba->max_vpi &&
2482 pmb->mb.un.varCfgPort.gmv) != 0
2483 ? pmb->mb.un.varCfgPort.max_vpi
2484 : 0;
2485 }
2486 } 3126 }
2487
2488 if (!done) { 3127 if (!done) {
2489 rc = -EINVAL; 3128 rc = -EINVAL;
2490 goto do_prep_failed; 3129 goto do_prep_failed;
2491 } 3130 }
2492 3131 if (pmb->mb.un.varCfgPort.sli_mode == 3) {
2493 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 3132 if (!pmb->mb.un.varCfgPort.cMA) {
2494 (!pmb->mb.un.varCfgPort.cMA)) { 3133 rc = -ENXIO;
2495 rc = -ENXIO; 3134 goto do_prep_failed;
3135 }
3136 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
3137 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3138 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
3139 } else
3140 phba->max_vpi = 0;
3141 if (pmb->mb.un.varCfgPort.gerbm)
3142 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3143 if (pmb->mb.un.varCfgPort.gcrp)
3144 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3145 if (pmb->mb.un.varCfgPort.ginb) {
3146 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3147 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3148 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3149 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3150 phba->inb_last_counter =
3151 phba->mbox->us.s3_inb_pgp.counter;
3152 } else {
3153 phba->port_gp = phba->mbox->us.s3_pgp.port;
3154 phba->inb_ha_copy = NULL;
3155 phba->inb_counter = NULL;
3156 }
3157 } else {
3158 phba->port_gp = phba->mbox->us.s2.port;
3159 phba->inb_ha_copy = NULL;
3160 phba->inb_counter = NULL;
3161 phba->max_vpi = 0;
2496 } 3162 }
2497
2498do_prep_failed: 3163do_prep_failed:
2499 mempool_free(pmb, phba->mbox_mem_pool); 3164 mempool_free(pmb, phba->mbox_mem_pool);
2500 return rc; 3165 return rc;
2501} 3166}
2502 3167
3168
3169/**
3170 * lpfc_sli_hba_setup: SLI intialization function.
3171 * @phba: Pointer to HBA context object.
3172 *
3173 * This function is the main SLI intialization function. This function
3174 * is called by the HBA intialization code, HBA reset code and HBA
3175 * error attention handler code. Caller is not required to hold any
3176 * locks. This function issues config_port mailbox command to configure
3177 * the SLI, setup iocb rings and HBQ rings. In the end the function
3178 * calls the config_port_post function to issue init_link mailbox
3179 * command and to start the discovery. The function will return zero
3180 * if successful, else it will return negative error code.
3181 **/
2503int 3182int
2504lpfc_sli_hba_setup(struct lpfc_hba *phba) 3183lpfc_sli_hba_setup(struct lpfc_hba *phba)
2505{ 3184{
@@ -2528,22 +3207,20 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2528 break; 3207 break;
2529 } 3208 }
2530 3209
2531 rc = lpfc_do_config_port(phba, mode); 3210 rc = lpfc_sli_config_port(phba, mode);
3211
2532 if (rc && lpfc_sli_mode == 3) 3212 if (rc && lpfc_sli_mode == 3)
2533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 3213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2534 "1820 Unable to select SLI-3. " 3214 "1820 Unable to select SLI-3. "
2535 "Not supported by adapter.\n"); 3215 "Not supported by adapter.\n");
2536 if (rc && mode != 2) 3216 if (rc && mode != 2)
2537 rc = lpfc_do_config_port(phba, 2); 3217 rc = lpfc_sli_config_port(phba, 2);
2538 if (rc) 3218 if (rc)
2539 goto lpfc_sli_hba_setup_error; 3219 goto lpfc_sli_hba_setup_error;
2540 3220
2541 if (phba->sli_rev == 3) { 3221 if (phba->sli_rev == 3) {
2542 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 3222 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2543 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 3223 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2544 phba->sli3_options |= LPFC_SLI3_ENABLED;
2545 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2546
2547 } else { 3224 } else {
2548 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 3225 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2549 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 3226 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
@@ -2558,8 +3235,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2558 if (rc) 3235 if (rc)
2559 goto lpfc_sli_hba_setup_error; 3236 goto lpfc_sli_hba_setup_error;
2560 3237
2561 /* Init HBQs */ 3238 /* Init HBQs */
2562
2563 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3239 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2564 rc = lpfc_sli_hbq_setup(phba); 3240 rc = lpfc_sli_hbq_setup(phba);
2565 if (rc) 3241 if (rc)
@@ -2581,19 +3257,19 @@ lpfc_sli_hba_setup_error:
2581 return rc; 3257 return rc;
2582} 3258}
2583 3259
2584/*! lpfc_mbox_timeout 3260
2585 * 3261/**
2586 * \pre 3262 * lpfc_mbox_timeout: Timeout call back function for mbox timer.
2587 * \post 3263 * @ptr: context object - pointer to hba structure.
2588 * \param hba Pointer to per struct lpfc_hba structure
2589 * \param l1 Pointer to the driver's mailbox queue.
2590 * \return
2591 * void
2592 *
2593 * \b Description:
2594 * 3264 *
2595 * This routine handles mailbox timeout events at timer interrupt context. 3265 * This is the callback function for mailbox timer. The mailbox
2596 */ 3266 * timer is armed when a new mailbox command is issued and the timer
3267 * is deleted when the mailbox complete. The function is called by
3268 * the kernel timer code when a mailbox does not complete within
3269 * expected time. This function wakes up the worker thread to
3270 * process the mailbox timeout and returns. All the processing is
3271 * done by the worker thread function lpfc_mbox_timeout_handler.
3272 **/
2597void 3273void
2598lpfc_mbox_timeout(unsigned long ptr) 3274lpfc_mbox_timeout(unsigned long ptr)
2599{ 3275{
@@ -2612,6 +3288,15 @@ lpfc_mbox_timeout(unsigned long ptr)
2612 return; 3288 return;
2613} 3289}
2614 3290
3291
3292/**
3293 * lpfc_mbox_timeout_handler: Worker thread function to handle mailbox timeout.
3294 * @phba: Pointer to HBA context object.
3295 *
3296 * This function is called from worker thread when a mailbox command times out.
3297 * The caller is not required to hold any locks. This function will reset the
3298 * HBA and recover all the pending commands.
3299 **/
2615void 3300void
2616lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 3301lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2617{ 3302{
@@ -2666,6 +3351,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2666 return; 3351 return;
2667} 3352}
2668 3353
3354/**
3355 * lpfc_sli_issue_mbox: Issue a mailbox command to firmware.
3356 * @phba: Pointer to HBA context object.
3357 * @pmbox: Pointer to mailbox object.
3358 * @flag: Flag indicating how the mailbox need to be processed.
3359 *
3360 * This function is called by discovery code and HBA management code
3361 * to submit a mailbox command to firmware. This function gets the
3362 * hbalock to protect the data structures.
3363 * The mailbox command can be submitted in polling mode, in which case
3364 * this function will wait in a polling loop for the completion of the
3365 * mailbox.
3366 * If the mailbox is submitted in no_wait mode (not polling) the
3367 * function will submit the command and returns immediately without waiting
3368 * for the mailbox completion. The no_wait is supported only when HBA
3369 * is in SLI2/SLI3 mode - interrupts are enabled.
3370 * The SLI interface allows only one mailbox pending at a time. If the
3371 * mailbox is issued in polling mode and there is already a mailbox
3372 * pending, then the function will return an error. If the mailbox is issued
3373 * in NO_WAIT mode and there is a mailbox pending already, the function
3374 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
3375 * The sli layer owns the mailbox object until the completion of mailbox
3376 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
3377 * return codes the caller owns the mailbox command after the return of
3378 * the function.
3379 **/
2669int 3380int
2670lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 3381lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2671{ 3382{
@@ -2676,7 +3387,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2676 int i; 3387 int i;
2677 unsigned long timeout; 3388 unsigned long timeout;
2678 unsigned long drvr_flag = 0; 3389 unsigned long drvr_flag = 0;
2679 volatile uint32_t word0, ldata; 3390 uint32_t word0, ldata;
2680 void __iomem *to_slim; 3391 void __iomem *to_slim;
2681 int processing_queue = 0; 3392 int processing_queue = 0;
2682 3393
@@ -2836,12 +3547,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2836 3547
2837 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3548 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2838 /* First copy command data to host SLIM area */ 3549 /* First copy command data to host SLIM area */
2839 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 3550 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
2840 } else { 3551 } else {
2841 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3552 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2842 /* copy command data into host mbox for cmpl */ 3553 /* copy command data into host mbox for cmpl */
2843 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 3554 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
2844 MAILBOX_CMD_SIZE);
2845 } 3555 }
2846 3556
2847 /* First copy mbox command data to HBA SLIM, skip past first 3557 /* First copy mbox command data to HBA SLIM, skip past first
@@ -2851,7 +3561,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2851 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 3561 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2852 3562
2853 /* Next copy over first word, with mbxOwner set */ 3563 /* Next copy over first word, with mbxOwner set */
2854 ldata = *((volatile uint32_t *)mb); 3564 ldata = *((uint32_t *)mb);
2855 to_slim = phba->MBslimaddr; 3565 to_slim = phba->MBslimaddr;
2856 writel(ldata, to_slim); 3566 writel(ldata, to_slim);
2857 readl(to_slim); /* flush */ 3567 readl(to_slim); /* flush */
@@ -2883,7 +3593,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2883 3593
2884 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2885 /* First read mbox status word */ 3595 /* First read mbox status word */
2886 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 3596 word0 = *((uint32_t *)phba->mbox);
2887 word0 = le32_to_cpu(word0); 3597 word0 = le32_to_cpu(word0);
2888 } else { 3598 } else {
2889 /* First read mbox status word */ 3599 /* First read mbox status word */
@@ -2922,12 +3632,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2922 3632
2923 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3633 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2924 /* First copy command data */ 3634 /* First copy command data */
2925 word0 = *((volatile uint32_t *) 3635 word0 = *((uint32_t *)phba->mbox);
2926 &phba->slim2p->mbx);
2927 word0 = le32_to_cpu(word0); 3636 word0 = le32_to_cpu(word0);
2928 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3637 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2929 MAILBOX_t *slimmb; 3638 MAILBOX_t *slimmb;
2930 volatile uint32_t slimword0; 3639 uint32_t slimword0;
2931 /* Check real SLIM for any errors */ 3640 /* Check real SLIM for any errors */
2932 slimword0 = readl(phba->MBslimaddr); 3641 slimword0 = readl(phba->MBslimaddr);
2933 slimmb = (MAILBOX_t *) & slimword0; 3642 slimmb = (MAILBOX_t *) & slimword0;
@@ -2948,8 +3657,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2948 3657
2949 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3658 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2950 /* copy results back to user */ 3659 /* copy results back to user */
2951 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 3660 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
2952 MAILBOX_CMD_SIZE);
2953 } else { 3661 } else {
2954 /* First copy command data */ 3662 /* First copy command data */
2955 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 3663 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2980,9 +3688,16 @@ out_not_finished:
2980 return MBX_NOT_FINISHED; 3688 return MBX_NOT_FINISHED;
2981} 3689}
2982 3690
2983/* 3691/**
2984 * Caller needs to hold lock. 3692 * __lpfc_sli_ringtx_put: Add an iocb to the txq.
2985 */ 3693 * @phba: Pointer to HBA context object.
3694 * @pring: Pointer to driver SLI ring object.
3695 * @piocb: Pointer to address of newly added command iocb.
3696 *
3697 * This function is called with hbalock held to add a command
3698 * iocb to the txq when SLI layer cannot submit the command iocb
3699 * to the ring.
3700 **/
2986static void 3701static void
2987__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3702__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2988 struct lpfc_iocbq *piocb) 3703 struct lpfc_iocbq *piocb)
@@ -2992,6 +3707,23 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2992 pring->txq_cnt++; 3707 pring->txq_cnt++;
2993} 3708}
2994 3709
3710/**
3711 * lpfc_sli_next_iocb: Get the next iocb in the txq.
3712 * @phba: Pointer to HBA context object.
3713 * @pring: Pointer to driver SLI ring object.
3714 * @piocb: Pointer to address of newly added command iocb.
3715 *
3716 * This function is called with hbalock held before a new
3717 * iocb is submitted to the firmware. This function checks
3718 * txq to flush the iocbs in txq to Firmware before
3719 * submitting new iocbs to the Firmware.
3720 * If there are iocbs in the txq which need to be submitted
3721 * to firmware, lpfc_sli_next_iocb returns the first element
3722 * of the txq after dequeuing it from txq.
3723 * If there is no iocb in the txq then the function will return
3724 * *piocb and *piocb is set to NULL. Caller needs to check
3725 * *piocb to find if there are more commands in the txq.
3726 **/
2995static struct lpfc_iocbq * 3727static struct lpfc_iocbq *
2996lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3728lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2997 struct lpfc_iocbq **piocb) 3729 struct lpfc_iocbq **piocb)
@@ -3007,9 +3739,30 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3007 return nextiocb; 3739 return nextiocb;
3008} 3740}
3009 3741
3010/* 3742/**
3011 * Lockless version of lpfc_sli_issue_iocb. 3743 * __lpfc_sli_issue_iocb: Lockless version of lpfc_sli_issue_iocb.
3012 */ 3744 * @phba: Pointer to HBA context object.
3745 * @pring: Pointer to driver SLI ring object.
3746 * @piocb: Pointer to command iocb.
3747 * @flag: Flag indicating if this command can be put into txq.
3748 *
3749 * __lpfc_sli_issue_iocb is used by other functions in the driver
3750 * to issue an iocb command to the HBA. If the PCI slot is recovering
3751 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3752 * flag is turned on, the function returns IOCB_ERROR.
3753 * When the link is down, this function allows only iocbs for
3754 * posting buffers.
3755 * This function finds next available slot in the command ring and
3756 * posts the command to the available slot and writes the port
3757 * attention register to request HBA start processing new iocb.
3758 * If there is no slot available in the ring and
3759 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
3760 * txq, otherwise the function returns IOCB_BUSY.
3761 *
3762 * This function is called with hbalock held.
3763 * The function will return success after it successfully submit the
3764 * iocb to firmware or after adding to the txq.
3765 **/
3013static int 3766static int
3014__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3767__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3015 struct lpfc_iocbq *piocb, uint32_t flag) 3768 struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3052,6 +3805,16 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3052 * can be issued if the link is not up. 3805 * can be issued if the link is not up.
3053 */ 3806 */
3054 switch (piocb->iocb.ulpCommand) { 3807 switch (piocb->iocb.ulpCommand) {
3808 case CMD_GEN_REQUEST64_CR:
3809 case CMD_GEN_REQUEST64_CX:
3810 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
3811 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
3812 FC_FCP_CMND) ||
3813 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
3814 MENLO_TRANSPORT_TYPE))
3815
3816 goto iocb_busy;
3817 break;
3055 case CMD_QUE_RING_BUF_CN: 3818 case CMD_QUE_RING_BUF_CN:
3056 case CMD_QUE_RING_BUF64_CN: 3819 case CMD_QUE_RING_BUF64_CN:
3057 /* 3820 /*
@@ -3106,6 +3869,19 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3106} 3869}
3107 3870
3108 3871
3872/**
3873 * lpfc_sli_issue_iocb: Wrapper function for __lpfc_sli_issue_iocb.
3874 * @phba: Pointer to HBA context object.
3875 * @pring: Pointer to driver SLI ring object.
3876 * @piocb: Pointer to command iocb.
3877 * @flag: Flag indicating if this command can be put into txq.
3878 *
3879 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
3880 * function. This function gets the hbalock and calls
3881 * __lpfc_sli_issue_iocb function and will return the error returned
3882 * by __lpfc_sli_issue_iocb function. This wrapper is used by
3883 * functions which do not hold hbalock.
3884 **/
3109int 3885int
3110lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3886lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3111 struct lpfc_iocbq *piocb, uint32_t flag) 3887 struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3120,6 +3896,17 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3120 return rc; 3896 return rc;
3121} 3897}
3122 3898
3899/**
3900 * lpfc_extra_ring_setup: Extra ring setup function.
3901 * @phba: Pointer to HBA context object.
3902 *
3903 * This function is called while driver attaches with the
3904 * HBA to setup the extra ring. The extra ring is used
3905 * only when driver needs to support target mode functionality
3906 * or IP over FC functionalities.
3907 *
3908 * This function is called with no lock held.
3909 **/
3123static int 3910static int
3124lpfc_extra_ring_setup( struct lpfc_hba *phba) 3911lpfc_extra_ring_setup( struct lpfc_hba *phba)
3125{ 3912{
@@ -3155,6 +3942,19 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
3155 return 0; 3942 return 0;
3156} 3943}
3157 3944
3945/**
3946 * lpfc_sli_async_event_handler: ASYNC iocb handler function.
3947 * @phba: Pointer to HBA context object.
3948 * @pring: Pointer to driver SLI ring object.
3949 * @iocbq: Pointer to iocb object.
3950 *
3951 * This function is called by the slow ring event handler
3952 * function when there is an ASYNC event iocb in the ring.
3953 * This function is called with no lock held.
3954 * Currently this function handles only temperature related
3955 * ASYNC events. The function decodes the temperature sensor
3956 * event message and posts events for the management applications.
3957 **/
3158static void 3958static void
3159lpfc_sli_async_event_handler(struct lpfc_hba * phba, 3959lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3160 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 3960 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
@@ -3210,6 +4010,17 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3210} 4010}
3211 4011
3212 4012
4013/**
4014 * lpfc_sli_setup: SLI ring setup function.
4015 * @phba: Pointer to HBA context object.
4016 *
4017 * lpfc_sli_setup sets up rings of the SLI interface with
4018 * number of iocbs per ring and iotags. This function is
4019 * called while driver attach to the HBA and before the
4020 * interrupts are enabled. So there is no need for locking.
4021 *
4022 * This function always returns 0.
4023 **/
3213int 4024int
3214lpfc_sli_setup(struct lpfc_hba *phba) 4025lpfc_sli_setup(struct lpfc_hba *phba)
3215{ 4026{
@@ -3321,6 +4132,17 @@ lpfc_sli_setup(struct lpfc_hba *phba)
3321 return 0; 4132 return 0;
3322} 4133}
3323 4134
4135/**
4136 * lpfc_sli_queue_setup: Queue initialization function.
4137 * @phba: Pointer to HBA context object.
4138 *
4139 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
4140 * ring. This function also initializes ring indices of each ring.
4141 * This function is called during the initialization of the SLI
4142 * interface of an HBA.
4143 * This function is called with no lock held and always returns
4144 * 1.
4145 **/
3324int 4146int
3325lpfc_sli_queue_setup(struct lpfc_hba *phba) 4147lpfc_sli_queue_setup(struct lpfc_hba *phba)
3326{ 4148{
@@ -3349,6 +4171,23 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3349 return 1; 4171 return 1;
3350} 4172}
3351 4173
4174/**
4175 * lpfc_sli_host_down: Vport cleanup function.
4176 * @vport: Pointer to virtual port object.
4177 *
4178 * lpfc_sli_host_down is called to clean up the resources
4179 * associated with a vport before destroying virtual
4180 * port data structures.
4181 * This function does following operations:
4182 * - Free discovery resources associated with this virtual
4183 * port.
4184 * - Free iocbs associated with this virtual port in
4185 * the txq.
4186 * - Send abort for all iocb commands associated with this
4187 * vport in txcmplq.
4188 *
4189 * This function is called with no lock held and always returns 1.
4190 **/
3352int 4191int
3353lpfc_sli_host_down(struct lpfc_vport *vport) 4192lpfc_sli_host_down(struct lpfc_vport *vport)
3354{ 4193{
@@ -3411,6 +4250,21 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
3411 return 1; 4250 return 1;
3412} 4251}
3413 4252
4253/**
4254 * lpfc_sli_hba_down: Resource cleanup function for the HBA.
4255 * @phba: Pointer to HBA context object.
4256 *
4257 * This function cleans up all iocb, buffers, mailbox commands
4258 * while shutting down the HBA. This function is called with no
4259 * lock held and always returns 1.
4260 * This function does the following to cleanup driver resources:
4261 * - Free discovery resources for each virtual port
4262 * - Cleanup any pending fabric iocbs
4263 * - Iterate through the iocb txq and free each entry
4264 * in the list.
4265 * - Free up any buffer posted to the HBA
4266 * - Free mailbox commands in the mailbox queue.
4267 **/
3414int 4268int
3415lpfc_sli_hba_down(struct lpfc_hba *phba) 4269lpfc_sli_hba_down(struct lpfc_hba *phba)
3416{ 4270{
@@ -3501,6 +4355,18 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3501 return 1; 4355 return 1;
3502} 4356}
3503 4357
4358/**
4359 * lpfc_sli_pcimem_bcopy: SLI memory copy function.
4360 * @srcp: Source memory pointer.
4361 * @destp: Destination memory pointer.
4362 * @cnt: Number of words required to be copied.
4363 *
4364 * This function is used for copying data between driver memory
4365 * and the SLI memory. This function also changes the endianness
4366 * of each word if native endianness is different from SLI
4367 * endianness. This function can be called with or without
4368 * lock.
4369 **/
3504void 4370void
3505lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 4371lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3506{ 4372{
@@ -3518,6 +4384,17 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3518 } 4384 }
3519} 4385}
3520 4386
4387
4388/**
4389 * lpfc_sli_ringpostbuf_put: Function to add a buffer to postbufq.
4390 * @phba: Pointer to HBA context object.
4391 * @pring: Pointer to driver SLI ring object.
4392 * @mp: Pointer to driver buffer object.
4393 *
4394 * This function is called with no lock held.
4395 * It always return zero after adding the buffer to the postbufq
4396 * buffer list.
4397 **/
3521int 4398int
3522lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4399lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3523 struct lpfc_dmabuf *mp) 4400 struct lpfc_dmabuf *mp)
@@ -3531,6 +4408,18 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3531 return 0; 4408 return 0;
3532} 4409}
3533 4410
4411/**
4412 * lpfc_sli_get_buffer_tag: Tag allocation function for a buffer posted
4413 * using CMD_QUE_XRI64_CX iocb.
4414 * @phba: Pointer to HBA context object.
4415 *
4416 * When HBQ is enabled, buffers are searched based on tags. This function
4417 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
4418 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
4419 * does not conflict with tags of buffer posted for unsolicited events.
4420 * The function returns the allocated tag. The function is called with
4421 * no locks held.
4422 **/
3534uint32_t 4423uint32_t
3535lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 4424lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
3536{ 4425{
@@ -3545,6 +4434,22 @@ lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
3545 return phba->buffer_tag_count; 4434 return phba->buffer_tag_count;
3546} 4435}
3547 4436
4437/**
4438 * lpfc_sli_ring_taggedbuf_get: Search HBQ buffer associated with
4439 * posted using CMD_QUE_XRI64_CX iocb.
4440 * @phba: Pointer to HBA context object.
4441 * @pring: Pointer to driver SLI ring object.
4442 * @tag: Buffer tag.
4443 *
4444 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
4445 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
4446 * iocb is posted to the response ring with the tag of the buffer.
4447 * This function searches the pring->postbufq list using the tag
4448 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
4449 * iocb. If the buffer is found then lpfc_dmabuf object of the
4450 * buffer is returned to the caller else NULL is returned.
4451 * This function is called with no lock held.
4452 **/
3548struct lpfc_dmabuf * 4453struct lpfc_dmabuf *
3549lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4454lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3550 uint32_t tag) 4455 uint32_t tag)
@@ -3565,7 +4470,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3565 4470
3566 spin_unlock_irq(&phba->hbalock); 4471 spin_unlock_irq(&phba->hbalock);
3567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3568 "0410 Cannot find virtual addr for buffer tag on " 4473 "0402 Cannot find virtual addr for buffer tag on "
3569 "ring %d Data x%lx x%p x%p x%x\n", 4474 "ring %d Data x%lx x%p x%p x%x\n",
3570 pring->ringno, (unsigned long) tag, 4475 pring->ringno, (unsigned long) tag,
3571 slp->next, slp->prev, pring->postbufq_cnt); 4476 slp->next, slp->prev, pring->postbufq_cnt);
@@ -3573,6 +4478,23 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3573 return NULL; 4478 return NULL;
3574} 4479}
3575 4480
4481/**
4482 * lpfc_sli_ringpostbuf_get: SLI2 buffer search function for
4483 * unsolicited ct and els events.
4484 * @phba: Pointer to HBA context object.
4485 * @pring: Pointer to driver SLI ring object.
4486 * @phys: DMA address of the buffer.
4487 *
4488 * This function searches the buffer list using the dma_address
4489 * of unsolicited event to find the driver's lpfc_dmabuf object
4490 * corresponding to the dma_address. The function returns the
4491 * lpfc_dmabuf object if a buffer is found else it returns NULL.
4492 * This function is called by the ct and els unsolicited event
4493 * handlers to get the buffer associated with the unsolicited
4494 * event.
4495 *
4496 * This function is called with no lock held.
4497 **/
3576struct lpfc_dmabuf * 4498struct lpfc_dmabuf *
3577lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4499lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3578 dma_addr_t phys) 4500 dma_addr_t phys)
@@ -3600,6 +4522,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3600 return NULL; 4522 return NULL;
3601} 4523}
3602 4524
4525/**
4526 * lpfc_sli_abort_els_cmpl: Completion handler for the els abort iocbs.
4527 * @phba: Pointer to HBA context object.
4528 * @cmdiocb: Pointer to driver command iocb object.
4529 * @rspiocb: Pointer to driver response iocb object.
4530 *
4531 * This function is the completion handler for the abort iocbs for
4532 * ELS commands. This function is called from the ELS ring event
4533 * handler with no lock held. This function frees memory resources
4534 * associated with the abort iocb.
4535 **/
3603static void 4536static void
3604lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4537lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3605 struct lpfc_iocbq *rspiocb) 4538 struct lpfc_iocbq *rspiocb)
@@ -3665,6 +4598,17 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3665 return; 4598 return;
3666} 4599}
3667 4600
4601/**
4602 * lpfc_ignore_els_cmpl: Completion handler for aborted ELS command.
4603 * @phba: Pointer to HBA context object.
4604 * @cmdiocb: Pointer to driver command iocb object.
4605 * @rspiocb: Pointer to driver response iocb object.
4606 *
4607 * The function is called from SLI ring event handler with no
4608 * lock held. This function is the completion handler for ELS commands
4609 * which are aborted. The function frees memory resources used for
4610 * the aborted ELS commands.
4611 **/
3668static void 4612static void
3669lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4613lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3670 struct lpfc_iocbq *rspiocb) 4614 struct lpfc_iocbq *rspiocb)
@@ -3673,7 +4617,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3673 4617
3674 /* ELS cmd tag <ulpIoTag> completes */ 4618 /* ELS cmd tag <ulpIoTag> completes */
3675 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 4619 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3676 "0133 Ignoring ELS cmd tag x%x completion Data: " 4620 "0139 Ignoring ELS cmd tag x%x completion Data: "
3677 "x%x x%x x%x\n", 4621 "x%x x%x x%x\n",
3678 irsp->ulpIoTag, irsp->ulpStatus, 4622 irsp->ulpIoTag, irsp->ulpStatus,
3679 irsp->un.ulpWord[4], irsp->ulpTimeout); 4623 irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -3684,6 +4628,17 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3684 return; 4628 return;
3685} 4629}
3686 4630
4631/**
4632 * lpfc_sli_issue_abort_iotag: Abort function for a command iocb.
4633 * @phba: Pointer to HBA context object.
4634 * @pring: Pointer to driver SLI ring object.
4635 * @cmdiocb: Pointer to driver command iocb object.
4636 *
4637 * This function issues an abort iocb for the provided command
4638 * iocb. This function is called with hbalock held.
4639 * The function returns 0 when it fails due to memory allocation
4640 * failure or when the command iocb is an abort request.
4641 **/
3687int 4642int
3688lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4643lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3689 struct lpfc_iocbq *cmdiocb) 4644 struct lpfc_iocbq *cmdiocb)
@@ -3748,6 +4703,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3748 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 4703 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3749 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 4704 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3750 4705
4706 if (retval)
4707 __lpfc_sli_release_iocbq(phba, abtsiocbp);
3751abort_iotag_exit: 4708abort_iotag_exit:
3752 /* 4709 /*
3753 * Caller to this routine should check for IOCB_ERROR 4710 * Caller to this routine should check for IOCB_ERROR
@@ -3757,6 +4714,29 @@ abort_iotag_exit:
3757 return retval; 4714 return retval;
3758} 4715}
3759 4716
4717/**
4718 * lpfc_sli_validate_fcp_iocb: Filtering function, used to find commands
4719 * associated with a vport/SCSI target/lun.
4720 * @iocbq: Pointer to driver iocb object.
4721 * @vport: Pointer to driver virtual port object.
4722 * @tgt_id: SCSI ID of the target.
4723 * @lun_id: LUN ID of the scsi device.
4724 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
4725 *
4726 * This function acts as iocb filter for functions which abort or count
4727 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
4728 * 0 if the filtering criteria is met for the given iocb and will return
4729 * 1 if the filtering criteria is not met.
4730 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
4731 * given iocb is for the SCSI device specified by vport, tgt_id and
4732 * lun_id parameter.
4733 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
4734 * given iocb is for the SCSI target specified by vport and tgt_id
4735 * parameters.
4736 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
4737 * given iocb is for the SCSI host associated with the given vport.
4738 * This function is called with no locks held.
4739 **/
3760static int 4740static int
3761lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 4741lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3762 uint16_t tgt_id, uint64_t lun_id, 4742 uint16_t tgt_id, uint64_t lun_id,
@@ -3800,6 +4780,25 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3800 return rc; 4780 return rc;
3801} 4781}
3802 4782
4783/**
4784 * lpfc_sli_sum_iocb: Function to count the number of FCP iocbs pending.
4785 * @vport: Pointer to virtual port.
4786 * @tgt_id: SCSI ID of the target.
4787 * @lun_id: LUN ID of the scsi device.
4788 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
4789 *
4790 * This function returns number of FCP commands pending for the vport.
4791 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
4792 * commands pending on the vport associated with SCSI device specified
4793 * by tgt_id and lun_id parameters.
4794 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
4795 * commands pending on the vport associated with SCSI target specified
4796 * by tgt_id parameter.
4797 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
4798 * commands pending on the vport.
4799 * This function returns the number of iocbs which satisfy the filter.
4800 * This function is called without any lock held.
4801 **/
3803int 4802int
3804lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 4803lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3805 lpfc_ctx_cmd ctx_cmd) 4804 lpfc_ctx_cmd ctx_cmd)
@@ -3819,6 +4818,17 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3819 return sum; 4818 return sum;
3820} 4819}
3821 4820
4821/**
4822 * lpfc_sli_abort_fcp_cmpl: Completion handler function for an aborted
4823 * FCP iocb.
4824 * @phba: Pointer to HBA context object
4825 * @cmdiocb: Pointer to command iocb object.
4826 * @rspiocb: Pointer to response iocb object.
4827 *
4828 * This function is called when an aborted FCP iocb completes. This
4829 * function is called by the ring event handler with no lock held.
4830 * This function frees the iocb.
4831 **/
3822void 4832void
3823lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4833lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3824 struct lpfc_iocbq *rspiocb) 4834 struct lpfc_iocbq *rspiocb)
@@ -3827,6 +4837,28 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3827 return; 4837 return;
3828} 4838}
3829 4839
4840/**
4841 * lpfc_sli_abort_iocb: This function issue abort for all SCSI commands
4842 * pending on a SCSI host(vport)/target/lun.
4843 * @vport: Pointer to virtual port.
4844 * @pring: Pointer to driver SLI ring object.
4845 * @tgt_id: SCSI ID of the target.
4846 * @lun_id: LUN ID of the scsi device.
4847 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
4848 *
4849 * This function sends an abort command for every SCSI command
4850 * associated with the given virtual port pending on the ring
4851 * filtered by lpfc_sli_validate_fcp_iocb function.
4852 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
4853 * FCP iocbs associated with lun specified by tgt_id and lun_id
4854 * parameters
4855 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
4856 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
4857 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
4858 * FCP iocbs associated with virtual port.
4859 * This function returns number of iocbs it failed to abort.
4860 * This function is called with no locks held.
4861 **/
3830int 4862int
3831lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 4863lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3832 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 4864 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
@@ -3878,6 +4910,24 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3878 return errcnt; 4910 return errcnt;
3879} 4911}
3880 4912
4913/**
4914 * lpfc_sli_wake_iocb_wait: iocb completion handler for iocb issued using
4915 * lpfc_sli_issue_iocb_wait.
4916 * @phba: Pointer to HBA context object.
4917 * @cmdiocbq: Pointer to command iocb.
4918 * @rspiocbq: Pointer to response iocb.
4919 *
4920 * This function is the completion handler for iocbs issued using
4921 * lpfc_sli_issue_iocb_wait function. This function is called by the
4922 * ring event handler function without any lock held. This function
4923 * can be called from both worker thread context and interrupt
4924 * context. This function also can be called from other thread which
4925 * cleans up the SLI layer objects.
4926 * This function copy the contents of the response iocb to the
4927 * response iocb memory object provided by the caller of
4928 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4929 * sleeps for the iocb completion.
4930 **/
3881static void 4931static void
3882lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 4932lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3883 struct lpfc_iocbq *cmdiocbq, 4933 struct lpfc_iocbq *cmdiocbq,
@@ -3899,13 +4949,36 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3899 return; 4949 return;
3900} 4950}
3901 4951
3902/* 4952/**
3903 * Issue the caller's iocb and wait for its completion, but no longer than the 4953 * lpfc_sli_issue_iocb_wait: Synchronous function to issue iocb commands.
3904 * caller's timeout. Note that iocb_flags is cleared before the 4954 * @phba: Pointer to HBA context object..
3905 * lpfc_sli_issue_call since the wake routine sets a unique value and by 4955 * @pring: Pointer to sli ring.
3906 * definition this is a wait function. 4956 * @piocb: Pointer to command iocb.
3907 */ 4957 * @prspiocbq: Pointer to response iocb.
3908 4958 * @timeout: Timeout in number of seconds.
4959 *
4960 * This function issues the iocb to firmware and waits for the
4961 * iocb to complete. If the iocb command is not
4962 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
4963 * Caller should not free the iocb resources if this function
4964 * returns IOCB_TIMEDOUT.
4965 * The function waits for the iocb completion using an
4966 * non-interruptible wait.
4967 * This function will sleep while waiting for iocb completion.
4968 * So, this function should not be called from any context which
4969 * does not allow sleeping. Due to the same reason, this function
4970 * cannot be called with interrupt disabled.
4971 * This function assumes that the iocb completions occur while
4972 * this function sleep. So, this function cannot be called from
4973 * the thread which process iocb completion for this ring.
4974 * This function clears the iocb_flag of the iocb object before
4975 * issuing the iocb and the iocb completion handler sets this
4976 * flag and wakes this thread when the iocb completes.
4977 * The contents of the response iocb will be copied to prspiocbq
4978 * by the completion handler when the command completes.
4979 * This function returns IOCB_SUCCESS when success.
4980 * This function is called with no lock held.
4981 **/
3909int 4982int
3910lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 4983lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3911 struct lpfc_sli_ring *pring, 4984 struct lpfc_sli_ring *pring,
@@ -3963,7 +5036,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3963 } 5036 }
3964 } else { 5037 } else {
3965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5038 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3966 ":0332 IOCB wait issue failed, Data x%x\n", 5039 "0332 IOCB wait issue failed, Data x%x\n",
3967 retval); 5040 retval);
3968 retval = IOCB_ERROR; 5041 retval = IOCB_ERROR;
3969 } 5042 }
@@ -3983,6 +5056,32 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3983 return retval; 5056 return retval;
3984} 5057}
3985 5058
5059/**
5060 * lpfc_sli_issue_mbox_wait: Synchronous function to issue mailbox.
5061 * @phba: Pointer to HBA context object.
5062 * @pmboxq: Pointer to driver mailbox object.
5063 * @timeout: Timeout in number of seconds.
5064 *
5065 * This function issues the mailbox to firmware and waits for the
5066 * mailbox command to complete. If the mailbox command is not
5067 * completed within timeout seconds, it returns MBX_TIMEOUT.
5068 * The function waits for the mailbox completion using an
5069 * interruptible wait. If the thread is woken up due to a
5070 * signal, MBX_TIMEOUT error is returned to the caller. Caller
5071 * should not free the mailbox resources, if this function returns
5072 * MBX_TIMEOUT.
5073 * This function will sleep while waiting for mailbox completion.
5074 * So, this function should not be called from any context which
5075 * does not allow sleeping. Due to the same reason, this function
5076 * cannot be called with interrupt disabled.
5077 * This function assumes that the mailbox completion occurs while
5078 * this function sleep. So, this function cannot be called from
5079 * the worker thread which processes mailbox completion.
5080 * This function is called in the context of HBA management
5081 * applications.
5082 * This function returns MBX_SUCCESS when successful.
5083 * This function is called with no lock held.
5084 **/
3986int 5085int
3987lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 5086lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3988 uint32_t timeout) 5087 uint32_t timeout)
@@ -4027,6 +5126,18 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
4027 return retval; 5126 return retval;
4028} 5127}
4029 5128
5129/**
5130 * lpfc_sli_flush_mbox_queue: mailbox queue cleanup function.
5131 * @phba: Pointer to HBA context.
5132 *
5133 * This function is called to cleanup any pending mailbox
5134 * objects in the driver queue before bringing the HBA offline.
5135 * This function is called while resetting the HBA.
5136 * The function is called without any lock held. The function
5137 * takes hbalock to update SLI data structure.
5138 * This function returns 1 when there is an active mailbox
5139 * command pending else returns 0.
5140 **/
4030int 5141int
4031lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 5142lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
4032{ 5143{
@@ -4058,8 +5169,74 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
4058 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 5169 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
4059} 5170}
4060 5171
5172/**
5173 * lpfc_sli_check_eratt: check error attention events
5174 * @phba: Pointer to HBA context.
5175 *
5176 * This function is called form timer soft interrupt context to check HBA's
5177 * error attention register bit for error attention events.
5178 *
5179 * This fucntion returns 1 when there is Error Attention in the Host Attention
5180 * Register and returns 0 otherwise.
5181 **/
5182int
5183lpfc_sli_check_eratt(struct lpfc_hba *phba)
5184{
5185 uint32_t ha_copy;
5186
5187 /* If somebody is waiting to handle an eratt, don't process it
5188 * here. The brdkill function will do this.
5189 */
5190 if (phba->link_flag & LS_IGNORE_ERATT)
5191 return 0;
5192
5193 /* Check if interrupt handler handles this ERATT */
5194 spin_lock_irq(&phba->hbalock);
5195 if (phba->hba_flag & HBA_ERATT_HANDLED) {
5196 /* Interrupt handler has handled ERATT */
5197 spin_unlock_irq(&phba->hbalock);
5198 return 0;
5199 }
5200
5201 /* Read chip Host Attention (HA) register */
5202 ha_copy = readl(phba->HAregaddr);
5203 if (ha_copy & HA_ERATT) {
5204 /* Read host status register to retrieve error event */
5205 lpfc_sli_read_hs(phba);
5206 /* Set the driver HA work bitmap */
5207 phba->work_ha |= HA_ERATT;
5208 /* Indicate polling handles this ERATT */
5209 phba->hba_flag |= HBA_ERATT_HANDLED;
5210 spin_unlock_irq(&phba->hbalock);
5211 return 1;
5212 }
5213 spin_unlock_irq(&phba->hbalock);
5214 return 0;
5215}
5216
5217/**
5218 * lpfc_sp_intr_handler: The slow-path interrupt handler of lpfc driver.
5219 * @irq: Interrupt number.
5220 * @dev_id: The device context pointer.
5221 *
5222 * This function is directly called from the PCI layer as an interrupt
5223 * service routine when the device is enabled with MSI-X multi-message
5224 * interrupt mode and there are slow-path events in the HBA. However,
5225 * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
5226 * this function is called as part of the device-level interrupt handler.
5227 * When the PCI slot is in error recovery or the HBA is undergoing
5228 * initialization, the interrupt handler will not process the interrupt.
5229 * The link attention and ELS ring attention events are handled by the
5230 * worker thread. The interrupt handler signals the worker thread and
5231 * and returns for these events. This function is called without any
5232 * lock held. It gets the hbalock to access and update SLI data
5233 * structures.
5234 *
5235 * This function returns IRQ_HANDLED when interrupt is handled else it
5236 * returns IRQ_NONE.
5237 **/
4061irqreturn_t 5238irqreturn_t
4062lpfc_intr_handler(int irq, void *dev_id) 5239lpfc_sp_intr_handler(int irq, void *dev_id)
4063{ 5240{
4064 struct lpfc_hba *phba; 5241 struct lpfc_hba *phba;
4065 uint32_t ha_copy; 5242 uint32_t ha_copy;
@@ -4078,48 +5255,52 @@ lpfc_intr_handler(int irq, void *dev_id)
4078 * Get the driver's phba structure from the dev_id and 5255 * Get the driver's phba structure from the dev_id and
4079 * assume the HBA is not interrupting. 5256 * assume the HBA is not interrupting.
4080 */ 5257 */
4081 phba = (struct lpfc_hba *) dev_id; 5258 phba = (struct lpfc_hba *)dev_id;
4082 5259
4083 if (unlikely(!phba)) 5260 if (unlikely(!phba))
4084 return IRQ_NONE; 5261 return IRQ_NONE;
4085 5262
4086 /* If the pci channel is offline, ignore all the interrupts. */
4087 if (unlikely(pci_channel_offline(phba->pcidev)))
4088 return IRQ_NONE;
4089
4090 phba->sli.slistat.sli_intr++;
4091
4092 /* 5263 /*
4093 * Call the HBA to see if it is interrupting. If not, don't claim 5264 * Stuff needs to be attented to when this function is invoked as an
4094 * the interrupt 5265 * individual interrupt handler in MSI-X multi-message interrupt mode
4095 */
4096
4097 /* Ignore all interrupts during initialization. */
4098 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
4099 return IRQ_NONE;
4100
4101 /*
4102 * Read host attention register to determine interrupt source
4103 * Clear Attention Sources, except Error Attention (to
4104 * preserve status) and Link Attention
4105 */
4106 spin_lock(&phba->hbalock);
4107 ha_copy = readl(phba->HAregaddr);
4108 /* If somebody is waiting to handle an eratt don't process it
4109 * here. The brdkill function will do this.
4110 */ 5266 */
4111 if (phba->link_flag & LS_IGNORE_ERATT) 5267 if (phba->intr_type == MSIX) {
4112 ha_copy &= ~HA_ERATT; 5268 /* If the pci channel is offline, ignore all the interrupts */
4113 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 5269 if (unlikely(pci_channel_offline(phba->pcidev)))
4114 readl(phba->HAregaddr); /* flush */ 5270 return IRQ_NONE;
4115 spin_unlock(&phba->hbalock); 5271 /* Update device-level interrupt statistics */
4116 5272 phba->sli.slistat.sli_intr++;
4117 if (unlikely(!ha_copy)) 5273 /* Ignore all interrupts during initialization. */
4118 return IRQ_NONE; 5274 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5275 return IRQ_NONE;
5276 /* Need to read HA REG for slow-path events */
5277 spin_lock(&phba->hbalock);
5278 ha_copy = readl(phba->HAregaddr);
5279 /* If somebody is waiting to handle an eratt don't process it
5280 * here. The brdkill function will do this.
5281 */
5282 if (phba->link_flag & LS_IGNORE_ERATT)
5283 ha_copy &= ~HA_ERATT;
5284 /* Check the need for handling ERATT in interrupt handler */
5285 if (ha_copy & HA_ERATT) {
5286 if (phba->hba_flag & HBA_ERATT_HANDLED)
5287 /* ERATT polling has handled ERATT */
5288 ha_copy &= ~HA_ERATT;
5289 else
5290 /* Indicate interrupt handler handles ERATT */
5291 phba->hba_flag |= HBA_ERATT_HANDLED;
5292 }
5293 /* Clear up only attention source related to slow-path */
5294 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
5295 phba->HAregaddr);
5296 readl(phba->HAregaddr); /* flush */
5297 spin_unlock(&phba->hbalock);
5298 } else
5299 ha_copy = phba->ha_copy;
4119 5300
4120 work_ha_copy = ha_copy & phba->work_ha_mask; 5301 work_ha_copy = ha_copy & phba->work_ha_mask;
4121 5302
4122 if (unlikely(work_ha_copy)) { 5303 if (work_ha_copy) {
4123 if (work_ha_copy & HA_LATT) { 5304 if (work_ha_copy & HA_LATT) {
4124 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 5305 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
4125 /* 5306 /*
@@ -4138,7 +5319,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4138 work_ha_copy &= ~HA_LATT; 5319 work_ha_copy &= ~HA_LATT;
4139 } 5320 }
4140 5321
4141 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 5322 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
4142 /* 5323 /*
4143 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 5324 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
4144 * the only slow ring. 5325 * the only slow ring.
@@ -4179,31 +5360,13 @@ lpfc_intr_handler(int irq, void *dev_id)
4179 spin_unlock(&phba->hbalock); 5360 spin_unlock(&phba->hbalock);
4180 } 5361 }
4181 } 5362 }
4182
4183 if (work_ha_copy & HA_ERATT) {
4184 /*
4185 * There was a link/board error. Read the
4186 * status register to retrieve the error event
4187 * and process it.
4188 */
4189 phba->sli.slistat.err_attn_event++;
4190 /* Save status info */
4191 phba->work_hs = readl(phba->HSregaddr);
4192 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
4193 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
4194
4195 /* Clear Chip error bit */
4196 writel(HA_ERATT, phba->HAregaddr);
4197 readl(phba->HAregaddr); /* flush */
4198 phba->pport->stopped = 1;
4199 }
4200
4201 spin_lock(&phba->hbalock); 5363 spin_lock(&phba->hbalock);
4202 if ((work_ha_copy & HA_MBATT) && 5364 if (work_ha_copy & HA_ERATT)
4203 (phba->sli.mbox_active)) { 5365 lpfc_sli_read_hs(phba);
5366 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
4204 pmb = phba->sli.mbox_active; 5367 pmb = phba->sli.mbox_active;
4205 pmbox = &pmb->mb; 5368 pmbox = &pmb->mb;
4206 mbox = &phba->slim2p->mbx; 5369 mbox = phba->mbox;
4207 vport = pmb->vport; 5370 vport = pmb->vport;
4208 5371
4209 /* First check out the status word */ 5372 /* First check out the status word */
@@ -4270,7 +5433,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4270 lpfc_printf_log(phba, 5433 lpfc_printf_log(phba,
4271 KERN_ERR, 5434 KERN_ERR,
4272 LOG_MBOX | LOG_SLI, 5435 LOG_MBOX | LOG_SLI,
4273 "0306 rc should have" 5436 "0350 rc should have"
4274 "been MBX_BUSY"); 5437 "been MBX_BUSY");
4275 goto send_current_mbox; 5438 goto send_current_mbox;
4276 } 5439 }
@@ -4283,6 +5446,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4283 } 5446 }
4284 } else 5447 } else
4285 spin_unlock(&phba->hbalock); 5448 spin_unlock(&phba->hbalock);
5449
4286 if ((work_ha_copy & HA_MBATT) && 5450 if ((work_ha_copy & HA_MBATT) &&
4287 (phba->sli.mbox_active == NULL)) { 5451 (phba->sli.mbox_active == NULL)) {
4288send_current_mbox: 5452send_current_mbox:
@@ -4302,15 +5466,74 @@ send_current_mbox:
4302 spin_unlock(&phba->hbalock); 5466 spin_unlock(&phba->hbalock);
4303 lpfc_worker_wake_up(phba); 5467 lpfc_worker_wake_up(phba);
4304 } 5468 }
5469 return IRQ_HANDLED;
4305 5470
4306 ha_copy &= ~(phba->work_ha_mask); 5471} /* lpfc_sp_intr_handler */
5472
5473/**
5474 * lpfc_fp_intr_handler: The fast-path interrupt handler of lpfc driver.
5475 * @irq: Interrupt number.
5476 * @dev_id: The device context pointer.
5477 *
5478 * This function is directly called from the PCI layer as an interrupt
5479 * service routine when the device is enabled with MSI-X multi-message
5480 * interrupt mode and there is a fast-path FCP IOCB ring event in the
5481 * HBA. However, when the device is enabled with either MSI or Pin-IRQ
5482 * interrupt mode, this function is called as part of the device-level
5483 * interrupt handler. When the PCI slot is in error recovery or the HBA
5484 * is undergoing initialization, the interrupt handler will not process
5485 * the interrupt. The SCSI FCP fast-path ring event are handled in the
5486 * intrrupt context. This function is called without any lock held. It
5487 * gets the hbalock to access and update SLI data structures.
5488 *
5489 * This function returns IRQ_HANDLED when interrupt is handled else it
5490 * returns IRQ_NONE.
5491 **/
5492irqreturn_t
5493lpfc_fp_intr_handler(int irq, void *dev_id)
5494{
5495 struct lpfc_hba *phba;
5496 uint32_t ha_copy;
5497 unsigned long status;
5498
5499 /* Get the driver's phba structure from the dev_id and
5500 * assume the HBA is not interrupting.
5501 */
5502 phba = (struct lpfc_hba *) dev_id;
5503
5504 if (unlikely(!phba))
5505 return IRQ_NONE;
5506
5507 /*
5508 * Stuff needs to be attented to when this function is invoked as an
5509 * individual interrupt handler in MSI-X multi-message interrupt mode
5510 */
5511 if (phba->intr_type == MSIX) {
5512 /* If pci channel is offline, ignore all the interrupts */
5513 if (unlikely(pci_channel_offline(phba->pcidev)))
5514 return IRQ_NONE;
5515 /* Update device-level interrupt statistics */
5516 phba->sli.slistat.sli_intr++;
5517 /* Ignore all interrupts during initialization. */
5518 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5519 return IRQ_NONE;
5520 /* Need to read HA REG for FCP ring and other ring events */
5521 ha_copy = readl(phba->HAregaddr);
5522 /* Clear up only attention source related to fast-path */
5523 spin_lock(&phba->hbalock);
5524 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
5525 phba->HAregaddr);
5526 readl(phba->HAregaddr); /* flush */
5527 spin_unlock(&phba->hbalock);
5528 } else
5529 ha_copy = phba->ha_copy;
4307 5530
4308 /* 5531 /*
4309 * Process all events on FCP ring. Take the optimized path for 5532 * Process all events on FCP ring. Take the optimized path for FCP IO.
4310 * FCP IO. Any other IO is slow path and is handled by
4311 * the worker thread.
4312 */ 5533 */
4313 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 5534 ha_copy &= ~(phba->work_ha_mask);
5535
5536 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
4314 status >>= (4*LPFC_FCP_RING); 5537 status >>= (4*LPFC_FCP_RING);
4315 if (status & HA_RXMASK) 5538 if (status & HA_RXMASK)
4316 lpfc_sli_handle_fast_ring_event(phba, 5539 lpfc_sli_handle_fast_ring_event(phba,
@@ -4319,11 +5542,10 @@ send_current_mbox:
4319 5542
4320 if (phba->cfg_multi_ring_support == 2) { 5543 if (phba->cfg_multi_ring_support == 2) {
4321 /* 5544 /*
4322 * Process all events on extra ring. Take the optimized path 5545 * Process all events on extra ring. Take the optimized path
4323 * for extra ring IO. Any other IO is slow path and is handled 5546 * for extra ring IO.
4324 * by the worker thread.
4325 */ 5547 */
4326 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 5548 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
4327 status >>= (4*LPFC_EXTRA_RING); 5549 status >>= (4*LPFC_EXTRA_RING);
4328 if (status & HA_RXMASK) { 5550 if (status & HA_RXMASK) {
4329 lpfc_sli_handle_fast_ring_event(phba, 5551 lpfc_sli_handle_fast_ring_event(phba,
@@ -4332,5 +5554,106 @@ send_current_mbox:
4332 } 5554 }
4333 } 5555 }
4334 return IRQ_HANDLED; 5556 return IRQ_HANDLED;
5557} /* lpfc_fp_intr_handler */
5558
5559/**
5560 * lpfc_intr_handler: The device-level interrupt handler of lpfc driver.
5561 * @irq: Interrupt number.
5562 * @dev_id: The device context pointer.
5563 *
5564 * This function is the device-level interrupt handler called from the PCI
5565 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
5566 * an event in the HBA which requires driver attention. This function
5567 * invokes the slow-path interrupt attention handling function and fast-path
5568 * interrupt attention handling function in turn to process the relevant
5569 * HBA attention events. This function is called without any lock held. It
5570 * gets the hbalock to access and update SLI data structures.
5571 *
5572 * This function returns IRQ_HANDLED when interrupt is handled, else it
5573 * returns IRQ_NONE.
5574 **/
5575irqreturn_t
5576lpfc_intr_handler(int irq, void *dev_id)
5577{
5578 struct lpfc_hba *phba;
5579 irqreturn_t sp_irq_rc, fp_irq_rc;
5580 unsigned long status1, status2;
5581
5582 /*
5583 * Get the driver's phba structure from the dev_id and
5584 * assume the HBA is not interrupting.
5585 */
5586 phba = (struct lpfc_hba *) dev_id;
5587
5588 if (unlikely(!phba))
5589 return IRQ_NONE;
5590
5591 /* If the pci channel is offline, ignore all the interrupts. */
5592 if (unlikely(pci_channel_offline(phba->pcidev)))
5593 return IRQ_NONE;
5594
5595 /* Update device level interrupt statistics */
5596 phba->sli.slistat.sli_intr++;
5597
5598 /* Ignore all interrupts during initialization. */
5599 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5600 return IRQ_NONE;
5601
5602 spin_lock(&phba->hbalock);
5603 phba->ha_copy = readl(phba->HAregaddr);
5604 if (unlikely(!phba->ha_copy)) {
5605 spin_unlock(&phba->hbalock);
5606 return IRQ_NONE;
5607 } else if (phba->ha_copy & HA_ERATT) {
5608 if (phba->hba_flag & HBA_ERATT_HANDLED)
5609 /* ERATT polling has handled ERATT */
5610 phba->ha_copy &= ~HA_ERATT;
5611 else
5612 /* Indicate interrupt handler handles ERATT */
5613 phba->hba_flag |= HBA_ERATT_HANDLED;
5614 }
5615
5616 /* Clear attention sources except link and error attentions */
5617 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
5618 readl(phba->HAregaddr); /* flush */
5619 spin_unlock(&phba->hbalock);
5620
5621 /*
5622 * Invokes slow-path host attention interrupt handling as appropriate.
5623 */
5624
5625 /* status of events with mailbox and link attention */
5626 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
5627
5628 /* status of events with ELS ring */
5629 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
5630 status2 >>= (4*LPFC_ELS_RING);
5631
5632 if (status1 || (status2 & HA_RXMASK))
5633 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
5634 else
5635 sp_irq_rc = IRQ_NONE;
5636
5637 /*
5638 * Invoke fast-path host attention interrupt handling as appropriate.
5639 */
5640
5641 /* status of events with FCP ring */
5642 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
5643 status1 >>= (4*LPFC_FCP_RING);
5644
5645 /* status of events with extra ring */
5646 if (phba->cfg_multi_ring_support == 2) {
5647 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
5648 status2 >>= (4*LPFC_EXTRA_RING);
5649 } else
5650 status2 = 0;
5651
5652 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5653 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
5654 else
5655 fp_irq_rc = IRQ_NONE;
4335 5656
4336} /* lpfc_intr_handler */ 5657 /* Return device-level interrupt handling status */
5658 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5659} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7249fd252cbb..883938652a6a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -233,6 +233,7 @@ struct lpfc_sli {
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
236 237
237 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 238 struct lpfc_sli_ring ring[LPFC_MAX_RING];
238 int fcp_ring; /* ring used for FCP initiator commands */ 239 int fcp_ring; /* ring used for FCP initiator commands */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ad24cacfbe10..cc43e9de22cc 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,9 +18,11 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.7" 21#define LPFC_DRIVER_VERSION "8.2.8"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
25#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
24 26
25#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 27#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
26 LPFC_DRIVER_VERSION 28 LPFC_DRIVER_VERSION
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 109f89d98830..a7de1cc02b40 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -34,6 +34,7 @@
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw.h" 35#include "lpfc_hw.h"
36#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_nl.h"
37#include "lpfc_disc.h" 38#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
39#include "lpfc.h" 40#include "lpfc.h"
@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
204 return 1; 205 return 1;
205} 206}
206 207
208/**
209 * lpfc_discovery_wait: Wait for driver discovery to quiesce.
210 * @vport: The virtual port for which this call is being executed.
211 *
212 * This driver calls this routine specifically from lpfc_vport_delete
213 * to enforce a synchronous execution of vport
214 * delete relative to discovery activities. The
215 * lpfc_vport_delete routine should not return until it
216 * can reasonably guarantee that discovery has quiesced.
217 * Post FDISC LOGO, the driver must wait until its SAN teardown is
218 * complete and all resources recovered before allowing
219 * cleanup.
220 *
221 * This routine does not require any locks held.
222 **/
223static void lpfc_discovery_wait(struct lpfc_vport *vport)
224{
225 struct lpfc_hba *phba = vport->phba;
226 uint32_t wait_flags = 0;
227 unsigned long wait_time_max;
228 unsigned long start_time;
229
230 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
231 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
232
233 /*
234 * The time constraint on this loop is a balance between the
235 * fabric RA_TOV value and dev_loss tmo. The driver's
236 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
237 */
238 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
239 wait_time_max += jiffies;
240 start_time = jiffies;
241 while (time_before(jiffies, wait_time_max)) {
242 if ((vport->num_disc_nodes > 0) ||
243 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state,
252 vport->fc_flag, vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000);
255 } else {
256 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x"
260 " wait msecs x%x\n",
261 vport->vpi, vport->port_state,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies
264 - start_time));
265 break;
266 }
267 }
268
269 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x"
273 " wait msecs x%x\n",
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time));
277}
278
207int 279int
208lpfc_vport_create(struct fc_vport *fc_vport, bool disable) 280lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
209{ 281{
@@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
506 * initiated after we've disposed of all other resources associated 578 * initiated after we've disposed of all other resources associated
507 * with the port. 579 * with the port.
508 */ 580 */
509 if (!scsi_host_get(shost) || !scsi_host_get(shost)) 581 if (!scsi_host_get(shost))
582 return VPORT_INVAL;
583 if (!scsi_host_get(shost)) {
584 scsi_host_put(shost);
510 return VPORT_INVAL; 585 return VPORT_INVAL;
586 }
511 spin_lock_irq(&phba->hbalock); 587 spin_lock_irq(&phba->hbalock);
512 vport->load_flag |= FC_UNLOADING; 588 vport->load_flag |= FC_UNLOADING;
513 spin_unlock_irq(&phba->hbalock); 589 spin_unlock_irq(&phba->hbalock);
@@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
597 } 673 }
598 vport->unreg_vpi_cmpl = VPORT_INVAL; 674 vport->unreg_vpi_cmpl = VPORT_INVAL;
599 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 675 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
676 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
677 goto skip_logo;
600 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 678 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
601 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 679 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
602 timeout = schedule_timeout(timeout); 680 timeout = schedule_timeout(timeout);
603 } 681 }
604 682
683 if (!(phba->pport->load_flag & FC_UNLOADING))
684 lpfc_discovery_wait(vport);
685
605skip_logo: 686skip_logo:
606 lpfc_cleanup(vport); 687 lpfc_cleanup(vport);
607 lpfc_sli_host_down(vport); 688 lpfc_sli_host_down(vport);
@@ -615,8 +696,10 @@ skip_logo:
615 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) 696 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
616 * does the scsi_host_put() to release the vport. 697 * does the scsi_host_put() to release the vport.
617 */ 698 */
618 lpfc_mbx_unreg_vpi(vport); 699 if (lpfc_mbx_unreg_vpi(vport))
619 } 700 scsi_host_put(shost);
701 } else
702 scsi_host_put(shost);
620 703
621 lpfc_free_vpi(phba, vport->vpi); 704 lpfc_free_vpi(phba, vport->vpi);
622 vport->work_port_events = 0; 705 vport->work_port_events = 0;
@@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
663 scsi_host_put(lpfc_shost_from_vport(vports[i])); 746 scsi_host_put(lpfc_shost_from_vport(vports[i]));
664 kfree(vports); 747 kfree(vports);
665} 748}
749
750
751/**
752 * lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
753 * @vport: Pointer to vport object.
754 *
755 * This function resets the statistical data for the vport. This function
756 * is called with the host_lock held
757 **/
758void
759lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
760{
761 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
762
763 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
764 if (!NLP_CHK_NODE_ACT(ndlp))
765 continue;
766 if (ndlp->lat_data)
767 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
768 sizeof(struct lpfc_scsicmd_bkt));
769 }
770}
771
772
773/**
774 * lpfc_alloc_bucket: Allocate data buffer required for collecting
775 * statistical data.
776 * @vport: Pointer to vport object.
777 *
778 * This function allocates data buffer required for all the FC
779 * nodes of the vport to collect statistical data.
780 **/
781void
782lpfc_alloc_bucket(struct lpfc_vport *vport)
783{
784 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
785
786 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
787 if (!NLP_CHK_NODE_ACT(ndlp))
788 continue;
789
790 kfree(ndlp->lat_data);
791 ndlp->lat_data = NULL;
792
793 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
794 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
795 sizeof(struct lpfc_scsicmd_bkt),
796 GFP_ATOMIC);
797
798 if (!ndlp->lat_data)
799 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
800 "0287 lpfc_alloc_bucket failed to "
801 "allocate statistical data buffer DID "
802 "0x%x\n", ndlp->nlp_DID);
803 }
804 }
805}
806
807/**
808 * lpfc_free_bucket: Free data buffer required for collecting
809 * statistical data.
810 * @vport: Pointer to vport object.
811 *
812 * Th function frees statistical data buffer of all the FC
813 * nodes of the vport.
814 **/
815void
816lpfc_free_bucket(struct lpfc_vport *vport)
817{
818 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
819
820 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
821 if (!NLP_CHK_NODE_ACT(ndlp))
822 continue;
823
824 kfree(ndlp->lat_data);
825 ndlp->lat_data = NULL;
826 }
827}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 96c445333b69..90828340acea 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -112,4 +112,8 @@ struct vport_cmd_tag {
112void lpfc_vport_set_state(struct lpfc_vport *vport, 112void lpfc_vport_set_state(struct lpfc_vport *vport,
113 enum fc_vport_state new_state); 113 enum fc_vport_state new_state);
114 114
115void lpfc_vport_reset_stat_data(struct lpfc_vport *);
116void lpfc_alloc_bucket(struct lpfc_vport *);
117void lpfc_free_bucket(struct lpfc_vport *);
118
115#endif /* H_LPFC_VPORT */ 119#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 28c9da7d4a5c..7dc62deb4087 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4402,6 +4402,10 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4402 scb_t *scb; 4402 scb_t *scb;
4403 int rval; 4403 int rval;
4404 4404
4405 scmd = scsi_allocate_command(GFP_KERNEL);
4406 if (!scmd)
4407 return -ENOMEM;
4408
4405 /* 4409 /*
4406 * The internal commands share one command id and hence are 4410 * The internal commands share one command id and hence are
4407 * serialized. This is so because we want to reserve maximum number of 4411 * serialized. This is so because we want to reserve maximum number of
@@ -4412,12 +4416,11 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4412 scb = &adapter->int_scb; 4416 scb = &adapter->int_scb;
4413 memset(scb, 0, sizeof(scb_t)); 4417 memset(scb, 0, sizeof(scb_t));
4414 4418
4415 scmd = &adapter->int_scmd;
4416 memset(scmd, 0, sizeof(Scsi_Cmnd));
4417
4418 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 4419 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
4419 scmd->device = sdev; 4420 scmd->device = sdev;
4420 4421
4422 memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
4423 scmd->cmnd = adapter->int_cdb;
4421 scmd->device->host = adapter->host; 4424 scmd->device->host = adapter->host;
4422 scmd->host_scribble = (void *)scb; 4425 scmd->host_scribble = (void *)scb;
4423 scmd->cmnd[0] = MEGA_INTERNAL_CMD; 4426 scmd->cmnd[0] = MEGA_INTERNAL_CMD;
@@ -4456,6 +4459,8 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4456 4459
4457 mutex_unlock(&adapter->int_mtx); 4460 mutex_unlock(&adapter->int_mtx);
4458 4461
4462 scsi_free_command(GFP_KERNEL, scmd);
4463
4459 return rval; 4464 return rval;
4460} 4465}
4461 4466
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index ee70bd4ae4ba..795201fa0b48 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -888,8 +888,8 @@ typedef struct {
888 888
889 u8 sglen; /* f/w supported scatter-gather list length */ 889 u8 sglen; /* f/w supported scatter-gather list length */
890 890
891 unsigned char int_cdb[MAX_COMMAND_SIZE];
891 scb_t int_scb; 892 scb_t int_scb;
892 Scsi_Cmnd int_scmd;
893 struct mutex int_mtx; /* To synchronize the internal 893 struct mutex int_mtx; /* To synchronize the internal
894 commands */ 894 commands */
895 struct completion int_waitq; /* wait queue for internal 895 struct completion int_waitq; /* wait queue for internal
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7d..a454f94623d7 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1167 * cmd has not been completed within the timeout period. 1167 * cmd has not been completed within the timeout period.
1168 */ 1168 */
1169static enum 1169static enum
1170scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1170blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1171{ 1171{
1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; 1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1173 struct megasas_instance *instance; 1173 struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1175 1175
1176 if (time_after(jiffies, scmd->jiffies_at_alloc + 1176 if (time_after(jiffies, scmd->jiffies_at_alloc +
1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { 1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
1178 return EH_NOT_HANDLED; 1178 return BLK_EH_NOT_HANDLED;
1179 } 1179 }
1180 1180
1181 instance = cmd->instance; 1181 instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1189 1189
1190 spin_unlock_irqrestore(instance->host->host_lock, flags); 1190 spin_unlock_irqrestore(instance->host->host_lock, flags);
1191 } 1191 }
1192 return EH_RESET_TIMER; 1192 return BLK_EH_RESET_TIMER;
1193} 1193}
1194 1194
1195/** 1195/**
@@ -2988,17 +2988,6 @@ static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2988} 2988}
2989 2989
2990/** 2990/**
2991 * megasas_mgmt_release - char node "release" entry point
2992 */
2993static int megasas_mgmt_release(struct inode *inode, struct file *filep)
2994{
2995 filep->private_data = NULL;
2996 fasync_helper(-1, filep, 0, &megasas_async_queue);
2997
2998 return 0;
2999}
3000
3001/**
3002 * megasas_mgmt_fasync - Async notifier registration from applications 2991 * megasas_mgmt_fasync - Async notifier registration from applications
3003 * 2992 *
3004 * This function adds the calling process to a driver global queue. When an 2993 * This function adds the calling process to a driver global queue. When an
@@ -3345,7 +3334,6 @@ megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
3345static const struct file_operations megasas_mgmt_fops = { 3334static const struct file_operations megasas_mgmt_fops = {
3346 .owner = THIS_MODULE, 3335 .owner = THIS_MODULE,
3347 .open = megasas_mgmt_open, 3336 .open = megasas_mgmt_open,
3348 .release = megasas_mgmt_release,
3349 .fasync = megasas_mgmt_fasync, 3337 .fasync = megasas_mgmt_fasync,
3350 .unlocked_ioctl = megasas_mgmt_ioctl, 3338 .unlocked_ioctl = megasas_mgmt_ioctl,
3351#ifdef CONFIG_COMPAT 3339#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd2..3b7240e40819 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
4170 ** 4170 **
4171 **---------------------------------------------------- 4171 **----------------------------------------------------
4172 */ 4172 */
4173 if (np->settle_time && cmd->timeout_per_command >= HZ) { 4173 if (np->settle_time && cmd->request->timeout >= HZ) {
4174 u_long tlimit = jiffies + cmd->timeout_per_command - HZ; 4174 u_long tlimit = jiffies + cmd->request->timeout - HZ;
4175 if (time_after(np->settle_time, tlimit)) 4175 if (time_after(np->settle_time, tlimit))
4176 np->settle_time = tlimit; 4176 np->settle_time = tlimit;
4177 } 4177 }
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 1c79f9794f4e..0ea78d9a37db 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5708,7 +5708,8 @@ static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * S
5708 struct device *osst_member; 5708 struct device *osst_member;
5709 int err; 5709 int err;
5710 5710
5711 osst_member = device_create_drvdata(osst_sysfs_class, device, dev, STp, "%s", name); 5711 osst_member = device_create(osst_sysfs_class, device, dev, STp,
5712 "%s", name);
5712 if (IS_ERR(osst_member)) { 5713 if (IS_ERR(osst_member)) {
5713 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name); 5714 printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
5714 return PTR_ERR(osst_member); 5715 return PTR_ERR(osst_member);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 2dd0dc9a9aed..165ff884f48e 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -140,44 +140,41 @@ static void aha152x_detach(struct pcmcia_device *link)
140#define CS_CHECK(fn, ret) \ 140#define CS_CHECK(fn, ret) \
141do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 141do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
142 142
143static int aha152x_config_check(struct pcmcia_device *p_dev,
144 cistpl_cftable_entry_t *cfg,
145 cistpl_cftable_entry_t *dflt,
146 unsigned int vcc,
147 void *priv_data)
148{
149 /* For New Media T&J, look for a SCSI window */
150 if (cfg->io.win[0].len >= 0x20)
151 p_dev->io.BasePort1 = cfg->io.win[0].base;
152 else if ((cfg->io.nwin > 1) &&
153 (cfg->io.win[1].len >= 0x20))
154 p_dev->io.BasePort1 = cfg->io.win[1].base;
155 if ((cfg->io.nwin > 0) &&
156 (p_dev->io.BasePort1 < 0xffff)) {
157 if (!pcmcia_request_io(p_dev, &p_dev->io))
158 return 0;
159 }
160 return -EINVAL;
161}
162
143static int aha152x_config_cs(struct pcmcia_device *link) 163static int aha152x_config_cs(struct pcmcia_device *link)
144{ 164{
145 scsi_info_t *info = link->priv; 165 scsi_info_t *info = link->priv;
146 struct aha152x_setup s; 166 struct aha152x_setup s;
147 tuple_t tuple; 167 int last_ret, last_fn;
148 cisparse_t parse;
149 int i, last_ret, last_fn;
150 u_char tuple_data[64];
151 struct Scsi_Host *host; 168 struct Scsi_Host *host;
152 169
153 DEBUG(0, "aha152x_config(0x%p)\n", link); 170 DEBUG(0, "aha152x_config(0x%p)\n", link);
154 171
155 tuple.TupleData = tuple_data; 172 last_ret = pcmcia_loop_config(link, aha152x_config_check, NULL);
156 tuple.TupleDataMax = 64; 173 if (last_ret) {
157 tuple.TupleOffset = 0; 174 cs_error(link, RequestIO, last_ret);
158 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 175 goto failed;
159 tuple.Attributes = 0;
160 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
161 while (1) {
162 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
163 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
164 goto next_entry;
165 /* For New Media T&J, look for a SCSI window */
166 if (parse.cftable_entry.io.win[0].len >= 0x20)
167 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
168 else if ((parse.cftable_entry.io.nwin > 1) &&
169 (parse.cftable_entry.io.win[1].len >= 0x20))
170 link->io.BasePort1 = parse.cftable_entry.io.win[1].base;
171 if ((parse.cftable_entry.io.nwin > 0) &&
172 (link->io.BasePort1 < 0xffff)) {
173 link->conf.ConfigIndex = parse.cftable_entry.index;
174 i = pcmcia_request_io(link, &link->io);
175 if (i == CS_SUCCESS) break;
176 }
177 next_entry:
178 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
179 } 176 }
180 177
181 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 178 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
182 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 179 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
183 180
@@ -208,6 +205,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
208 205
209cs_failed: 206cs_failed:
210 cs_error(link, last_fn, last_ret); 207 cs_error(link, last_fn, last_ret);
208failed:
211 aha152x_release_cs(link); 209 aha152x_release_cs(link);
212 return -ENODEV; 210 return -ENODEV;
213} 211}
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index d8b99351b053..06254f46a0dd 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -123,34 +123,30 @@ static void fdomain_detach(struct pcmcia_device *link)
123#define CS_CHECK(fn, ret) \ 123#define CS_CHECK(fn, ret) \
124do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 124do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
125 125
126static int fdomain_config_check(struct pcmcia_device *p_dev,
127 cistpl_cftable_entry_t *cfg,
128 cistpl_cftable_entry_t *dflt,
129 unsigned int vcc,
130 void *priv_data)
131{
132 p_dev->io.BasePort1 = cfg->io.win[0].base;
133 return pcmcia_request_io(p_dev, &p_dev->io);
134}
135
136
126static int fdomain_config(struct pcmcia_device *link) 137static int fdomain_config(struct pcmcia_device *link)
127{ 138{
128 scsi_info_t *info = link->priv; 139 scsi_info_t *info = link->priv;
129 tuple_t tuple; 140 int last_ret, last_fn;
130 cisparse_t parse;
131 int i, last_ret, last_fn;
132 u_char tuple_data[64];
133 char str[22]; 141 char str[22];
134 struct Scsi_Host *host; 142 struct Scsi_Host *host;
135 143
136 DEBUG(0, "fdomain_config(0x%p)\n", link); 144 DEBUG(0, "fdomain_config(0x%p)\n", link);
137 145
138 tuple.TupleData = tuple_data; 146 last_ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
139 tuple.TupleDataMax = 64; 147 if (last_ret) {
140 tuple.TupleOffset = 0; 148 cs_error(link, RequestIO, last_ret);
141 149 goto failed;
142 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
143 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
144 while (1) {
145 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
146 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
147 goto next_entry;
148 link->conf.ConfigIndex = parse.cftable_entry.index;
149 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
150 i = pcmcia_request_io(link, &link->io);
151 if (i == CS_SUCCESS) break;
152 next_entry:
153 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
154 } 150 }
155 151
156 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 152 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
@@ -181,6 +177,7 @@ static int fdomain_config(struct pcmcia_device *link)
181 177
182cs_failed: 178cs_failed:
183 cs_error(link, last_fn, last_ret); 179 cs_error(link, last_fn, last_ret);
180failed:
184 fdomain_release(link); 181 fdomain_release(link);
185 return -ENODEV; 182 return -ENODEV;
186} /* fdomain_config */ 183} /* fdomain_config */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 24e6cb8396e3..11a61ea8d5d9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1606,133 +1606,129 @@ static void nsp_cs_detach(struct pcmcia_device *link)
1606 is received, to configure the PCMCIA socket, and to make the 1606 is received, to configure the PCMCIA socket, and to make the
1607 ethernet device available to the system. 1607 ethernet device available to the system.
1608======================================================================*/ 1608======================================================================*/
1609#define CS_CHECK(fn, ret) \
1610do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1611/*====================================================================*/
1612static int nsp_cs_config(struct pcmcia_device *link)
1613{
1614 int ret;
1615 scsi_info_t *info = link->priv;
1616 tuple_t tuple;
1617 cisparse_t parse;
1618 int last_ret, last_fn;
1619 unsigned char tuple_data[64];
1620 config_info_t conf;
1621 win_req_t req;
1622 memreq_t map;
1623 cistpl_cftable_entry_t dflt = { 0 };
1624 struct Scsi_Host *host;
1625 nsp_hw_data *data = &nsp_data_base;
1626
1627 nsp_dbg(NSP_DEBUG_INIT, "in");
1628
1629 tuple.Attributes = 0;
1630 tuple.TupleData = tuple_data;
1631 tuple.TupleDataMax = sizeof(tuple_data);
1632 tuple.TupleOffset = 0;
1633
1634 /* Look up the current Vcc */
1635 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
1636 1609
1637 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 1610struct nsp_cs_configdata {
1638 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 1611 nsp_hw_data *data;
1639 while (1) { 1612 win_req_t req;
1640 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 1613};
1641 1614
1642 if (pcmcia_get_tuple_data(link, &tuple) != 0 || 1615static int nsp_cs_config_check(struct pcmcia_device *p_dev,
1643 pcmcia_parse_tuple(link, &tuple, &parse) != 0) 1616 cistpl_cftable_entry_t *cfg,
1644 goto next_entry; 1617 cistpl_cftable_entry_t *dflt,
1618 unsigned int vcc,
1619 void *priv_data)
1620{
1621 struct nsp_cs_configdata *cfg_mem = priv_data;
1645 1622
1646 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) { dflt = *cfg; } 1623 if (cfg->index == 0)
1647 if (cfg->index == 0) { goto next_entry; } 1624 return -ENODEV;
1648 link->conf.ConfigIndex = cfg->index;
1649 1625
1650 /* Does this card need audio output? */ 1626 /* Does this card need audio output? */
1651 if (cfg->flags & CISTPL_CFTABLE_AUDIO) { 1627 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
1652 link->conf.Attributes |= CONF_ENABLE_SPKR; 1628 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
1653 link->conf.Status = CCSR_AUDIO_ENA; 1629 p_dev->conf.Status = CCSR_AUDIO_ENA;
1654 } 1630 }
1655 1631
1656 /* Use power settings for Vcc and Vpp if present */ 1632 /* Use power settings for Vcc and Vpp if present */
1657 /* Note that the CIS values need to be rescaled */ 1633 /* Note that the CIS values need to be rescaled */
1658 if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { 1634 if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) {
1659 if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000) { 1635 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000)
1660 goto next_entry; 1636 return -ENODEV;
1661 } 1637 else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) {
1662 } else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM)) { 1638 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM]/10000)
1663 if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM]/10000) { 1639 return -ENODEV;
1664 goto next_entry;
1665 }
1666 } 1640 }
1667 1641
1668 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) { 1642 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) {
1669 link->conf.Vpp = 1643 p_dev->conf.Vpp =
1670 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 1644 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
1671 } else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) { 1645 } else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) {
1672 link->conf.Vpp = 1646 p_dev->conf.Vpp =
1673 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 1647 dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
1674 } 1648 }
1675 1649
1676 /* Do we need to allocate an interrupt? */ 1650 /* Do we need to allocate an interrupt? */
1677 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) { 1651 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
1678 link->conf.Attributes |= CONF_ENABLE_IRQ; 1652 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1679 }
1680 1653
1681 /* IO window settings */ 1654 /* IO window settings */
1682 link->io.NumPorts1 = link->io.NumPorts2 = 0; 1655 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
1683 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { 1656 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
1684 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; 1657 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
1685 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 1658 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1686 if (!(io->flags & CISTPL_IO_8BIT)) 1659 if (!(io->flags & CISTPL_IO_8BIT))
1687 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 1660 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
1688 if (!(io->flags & CISTPL_IO_16BIT)) 1661 if (!(io->flags & CISTPL_IO_16BIT))
1689 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 1662 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1690 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; 1663 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
1691 link->io.BasePort1 = io->win[0].base; 1664 p_dev->io.BasePort1 = io->win[0].base;
1692 link->io.NumPorts1 = io->win[0].len; 1665 p_dev->io.NumPorts1 = io->win[0].len;
1693 if (io->nwin > 1) { 1666 if (io->nwin > 1) {
1694 link->io.Attributes2 = link->io.Attributes1; 1667 p_dev->io.Attributes2 = p_dev->io.Attributes1;
1695 link->io.BasePort2 = io->win[1].base; 1668 p_dev->io.BasePort2 = io->win[1].base;
1696 link->io.NumPorts2 = io->win[1].len; 1669 p_dev->io.NumPorts2 = io->win[1].len;
1697 } 1670 }
1698 /* This reserves IO space but doesn't actually enable it */ 1671 /* This reserves IO space but doesn't actually enable it */
1699 if (pcmcia_request_io(link, &link->io) != 0) 1672 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
1700 goto next_entry; 1673 goto next_entry;
1701 } 1674 }
1702 1675
1703 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 1676 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
1704 cistpl_mem_t *mem = 1677 memreq_t map;
1705 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem; 1678 cistpl_mem_t *mem =
1706 req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM; 1679 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
1707 req.Attributes |= WIN_ENABLE; 1680 cfg_mem->req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
1708 req.Base = mem->win[0].host_addr; 1681 cfg_mem->req.Attributes |= WIN_ENABLE;
1709 req.Size = mem->win[0].len; 1682 cfg_mem->req.Base = mem->win[0].host_addr;
1710 if (req.Size < 0x1000) { 1683 cfg_mem->req.Size = mem->win[0].len;
1711 req.Size = 0x1000; 1684 if (cfg_mem->req.Size < 0x1000)
1712 } 1685 cfg_mem->req.Size = 0x1000;
1713 req.AccessSpeed = 0; 1686 cfg_mem->req.AccessSpeed = 0;
1714 if (pcmcia_request_window(&link, &req, &link->win) != 0) 1687 if (pcmcia_request_window(&p_dev, &cfg_mem->req, &p_dev->win) != 0)
1715 goto next_entry; 1688 goto next_entry;
1716 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 1689 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
1717 if (pcmcia_map_mem_page(link->win, &map) != 0) 1690 if (pcmcia_map_mem_page(p_dev->win, &map) != 0)
1718 goto next_entry; 1691 goto next_entry;
1719 1692
1720 data->MmioAddress = (unsigned long)ioremap_nocache(req.Base, req.Size); 1693 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size);
1721 data->MmioLength = req.Size; 1694 cfg_mem->data->MmioLength = cfg_mem->req.Size;
1722 } 1695 }
1723 /* If we got this far, we're cool! */ 1696 /* If we got this far, we're cool! */
1724 break; 1697 return 0;
1725
1726 next_entry:
1727 nsp_dbg(NSP_DEBUG_INIT, "next");
1728 pcmcia_disable_device(link);
1729 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
1730 } 1698 }
1731 1699
1700next_entry:
1701 nsp_dbg(NSP_DEBUG_INIT, "next");
1702 pcmcia_disable_device(p_dev);
1703 return -ENODEV;
1704}
1705
1706static int nsp_cs_config(struct pcmcia_device *link)
1707{
1708 int ret;
1709 scsi_info_t *info = link->priv;
1710 struct nsp_cs_configdata *cfg_mem;
1711 struct Scsi_Host *host;
1712 nsp_hw_data *data = &nsp_data_base;
1713
1714 nsp_dbg(NSP_DEBUG_INIT, "in");
1715
1716 cfg_mem = kzalloc(sizeof(cfg_mem), GFP_KERNEL);
1717 if (!cfg_mem)
1718 return -ENOMEM;
1719 cfg_mem->data = data;
1720
1721 ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem);
1722 goto cs_failed;
1723
1732 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1724 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1733 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 1725 if (pcmcia_request_irq(link, &link->irq))
1726 goto cs_failed;
1734 } 1727 }
1735 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 1728
1729 ret = pcmcia_request_configuration(link, &link->conf);
1730 if (ret)
1731 goto cs_failed;
1736 1732
1737 if (free_ports) { 1733 if (free_ports) {
1738 if (link->io.BasePort1) { 1734 if (link->io.BasePort1) {
@@ -1790,20 +1786,20 @@ static int nsp_cs_config(struct pcmcia_device *link)
1790 printk(" & 0x%04x-0x%04x", link->io.BasePort2, 1786 printk(" & 0x%04x-0x%04x", link->io.BasePort2,
1791 link->io.BasePort2+link->io.NumPorts2-1); 1787 link->io.BasePort2+link->io.NumPorts2-1);
1792 if (link->win) 1788 if (link->win)
1793 printk(", mem 0x%06lx-0x%06lx", req.Base, 1789 printk(", mem 0x%06lx-0x%06lx", cfg_mem->req.Base,
1794 req.Base+req.Size-1); 1790 cfg_mem->req.Base+cfg_mem->req.Size-1);
1795 printk("\n"); 1791 printk("\n");
1796 1792
1793 kfree(cfg_mem);
1797 return 0; 1794 return 0;
1798 1795
1799 cs_failed: 1796 cs_failed:
1800 nsp_dbg(NSP_DEBUG_INIT, "config fail"); 1797 nsp_dbg(NSP_DEBUG_INIT, "config fail");
1801 cs_error(link, last_fn, last_ret);
1802 nsp_cs_release(link); 1798 nsp_cs_release(link);
1799 kfree(cfg_mem);
1803 1800
1804 return -ENODEV; 1801 return -ENODEV;
1805} /* nsp_cs_config */ 1802} /* nsp_cs_config */
1806#undef CS_CHECK
1807 1803
1808 1804
1809/*====================================================================== 1805/*======================================================================
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 67c5a58d17df..20c3e5e6d88a 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -195,39 +195,33 @@ static void qlogic_detach(struct pcmcia_device *link)
195#define CS_CHECK(fn, ret) \ 195#define CS_CHECK(fn, ret) \
196do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 196do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
197 197
198static int qlogic_config_check(struct pcmcia_device *p_dev,
199 cistpl_cftable_entry_t *cfg,
200 cistpl_cftable_entry_t *dflt,
201 unsigned int vcc,
202 void *priv_data)
203{
204 p_dev->io.BasePort1 = cfg->io.win[0].base;
205 p_dev->io.NumPorts1 = cfg->io.win[0].len;
206
207 if (p_dev->io.BasePort1 == 0)
208 return -ENODEV;
209
210 return pcmcia_request_io(p_dev, &p_dev->io);
211}
212
198static int qlogic_config(struct pcmcia_device * link) 213static int qlogic_config(struct pcmcia_device * link)
199{ 214{
200 scsi_info_t *info = link->priv; 215 scsi_info_t *info = link->priv;
201 tuple_t tuple; 216 int last_ret, last_fn;
202 cisparse_t parse;
203 int i, last_ret, last_fn;
204 unsigned short tuple_data[32];
205 struct Scsi_Host *host; 217 struct Scsi_Host *host;
206 218
207 DEBUG(0, "qlogic_config(0x%p)\n", link); 219 DEBUG(0, "qlogic_config(0x%p)\n", link);
208 220
209 info->manf_id = link->manf_id; 221 last_ret = pcmcia_loop_config(link, qlogic_config_check, NULL);
210 222 if (last_ret) {
211 tuple.TupleData = (cisdata_t *) tuple_data; 223 cs_error(link, RequestIO, last_ret);
212 tuple.TupleDataMax = 64; 224 goto failed;
213 tuple.TupleOffset = 0;
214
215 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
216 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
217 while (1) {
218 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
219 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
220 goto next_entry;
221 link->conf.ConfigIndex = parse.cftable_entry.index;
222 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
223 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
224 if (link->io.BasePort1 != 0) {
225 i = pcmcia_request_io(link, &link->io);
226 if (i == CS_SUCCESS)
227 break;
228 }
229 next_entry:
230 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
231 } 225 }
232 226
233 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 227 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
@@ -262,6 +256,7 @@ static int qlogic_config(struct pcmcia_device * link)
262cs_failed: 256cs_failed:
263 cs_error(link, last_fn, last_ret); 257 cs_error(link, last_fn, last_ret);
264 pcmcia_disable_device(link); 258 pcmcia_disable_device(link);
259failed:
265 return -ENODEV; 260 return -ENODEV;
266 261
267} /* qlogic_config */ 262} /* qlogic_config */
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 0be232b58ffb..b330c11a1752 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -700,15 +700,27 @@ static struct scsi_host_template sym53c500_driver_template = {
700#define CS_CHECK(fn, ret) \ 700#define CS_CHECK(fn, ret) \
701do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 701do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
702 702
703static int SYM53C500_config_check(struct pcmcia_device *p_dev,
704 cistpl_cftable_entry_t *cfg,
705 cistpl_cftable_entry_t *dflt,
706 unsigned int vcc,
707 void *priv_data)
708{
709 p_dev->io.BasePort1 = cfg->io.win[0].base;
710 p_dev->io.NumPorts1 = cfg->io.win[0].len;
711
712 if (p_dev->io.BasePort1 == 0)
713 return -ENODEV;
714
715 return pcmcia_request_io(p_dev, &p_dev->io);
716}
717
703static int 718static int
704SYM53C500_config(struct pcmcia_device *link) 719SYM53C500_config(struct pcmcia_device *link)
705{ 720{
706 struct scsi_info_t *info = link->priv; 721 struct scsi_info_t *info = link->priv;
707 tuple_t tuple; 722 int last_ret, last_fn;
708 cisparse_t parse;
709 int i, last_ret, last_fn;
710 int irq_level, port_base; 723 int irq_level, port_base;
711 unsigned short tuple_data[32];
712 struct Scsi_Host *host; 724 struct Scsi_Host *host;
713 struct scsi_host_template *tpnt = &sym53c500_driver_template; 725 struct scsi_host_template *tpnt = &sym53c500_driver_template;
714 struct sym53c500_data *data; 726 struct sym53c500_data *data;
@@ -717,27 +729,10 @@ SYM53C500_config(struct pcmcia_device *link)
717 729
718 info->manf_id = link->manf_id; 730 info->manf_id = link->manf_id;
719 731
720 tuple.TupleData = (cisdata_t *)tuple_data; 732 last_ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL);
721 tuple.TupleDataMax = 64; 733 if (last_ret) {
722 tuple.TupleOffset = 0; 734 cs_error(link, RequestIO, last_ret);
723 735 goto failed;
724 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
725 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
726 while (1) {
727 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
728 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
729 goto next_entry;
730 link->conf.ConfigIndex = parse.cftable_entry.index;
731 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
732 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
733
734 if (link->io.BasePort1 != 0) {
735 i = pcmcia_request_io(link, &link->io);
736 if (i == CS_SUCCESS)
737 break;
738 }
739next_entry:
740 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
741 } 736 }
742 737
743 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 738 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
@@ -831,6 +826,7 @@ err_release:
831 826
832cs_failed: 827cs_failed:
833 cs_error(link, last_fn, last_ret); 828 cs_error(link, last_fn, last_ret);
829failed:
834 SYM53C500_release(link); 830 SYM53C500_release(link);
835 return -ENODEV; 831 return -ENODEV;
836} /* SYM53C500_config */ 832} /* SYM53C500_config */
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd798..b6cd12b2e996 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2846 2846
2847 /* Set ISP command timeout. */ 2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 2848 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2849 2849
2850 /* Set device target ID and LUN */ 2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd); 2851 pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3115 3115
3116 /* Set ISP command timeout. */ 3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 3117 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3118 3118
3119 /* Set device target ID and LUN */ 3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd); 3120 pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 45e7dcb4b34d..ed731968f15f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
292 valid = 0; 292 valid = 0;
293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
294 valid = 1; 294 valid = 1;
295 else if (start == (FA_BOOT_CODE_ADDR*4) || 295 else if (start == (ha->flt_region_boot * 4) ||
296 start == (FA_RISC_CODE_ADDR*4)) 296 start == (ha->flt_region_fw * 4))
297 valid = 1; 297 valid = 1;
298 else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4)) 298 else if (IS_QLA25XX(ha) &&
299 start == (ha->flt_region_vpd_nvram * 4))
299 valid = 1; 300 valid = 1;
300 if (!valid) { 301 if (!valid) {
301 qla_printk(KERN_WARNING, ha, 302 qla_printk(KERN_WARNING, ha,
@@ -1005,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1005 } 1006 }
1006 1007
1007 qla2x00_abort_fcport_cmds(fcport); 1008 qla2x00_abort_fcport_cmds(fcport);
1008 scsi_target_unblock(&rport->dev);
1009} 1009}
1010 1010
1011static int 1011static int
@@ -1065,6 +1065,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1065 pfc_host_stat->dumped_frames = stats->dumped_frames; 1065 pfc_host_stat->dumped_frames = stats->dumped_frames;
1066 pfc_host_stat->nos_count = stats->nos_rcvd; 1066 pfc_host_stat->nos_count = stats->nos_rcvd;
1067 } 1067 }
1068 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1069 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1068 1070
1069done_free: 1071done_free:
1070 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1072 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 94a720eabfd8..b97194096d8e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/semaphore.h>
29 28
30#include <scsi/scsi.h> 29#include <scsi/scsi.h>
31#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -2109,7 +2108,7 @@ struct scsi_qla_host;
2109 2108
2110struct qla_msix_entry { 2109struct qla_msix_entry {
2111 int have_irq; 2110 int have_irq;
2112 uint16_t msix_vector; 2111 uint32_t msix_vector;
2113 uint16_t msix_entry; 2112 uint16_t msix_entry;
2114}; 2113};
2115 2114
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
2157 2156
2158struct qla_statistics { 2157struct qla_statistics {
2159 uint32_t total_isp_aborts; 2158 uint32_t total_isp_aborts;
2159 uint64_t input_bytes;
2160 uint64_t output_bytes;
2160}; 2161};
2161 2162
2162/* 2163/*
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
2238#define FCPORT_UPDATE_NEEDED 27 2239#define FCPORT_UPDATE_NEEDED 27
2239#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ 2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2240#define UNLOADING 29 2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2241 2243
2242 uint32_t device_flags; 2244 uint32_t device_flags;
2243#define DFLG_LOCAL_DEVICES BIT_0 2245#define DFLG_LOCAL_DEVICES BIT_0
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
2507 uint64_t fce_wr, fce_rd; 2509 uint64_t fce_wr, fce_rd;
2508 struct mutex fce_mutex; 2510 struct mutex fce_mutex;
2509 2511
2510 uint32_t hw_event_start;
2511 uint32_t hw_event_ptr; 2512 uint32_t hw_event_ptr;
2512 uint32_t hw_event_pause_errors; 2513 uint32_t hw_event_pause_errors;
2513 2514
@@ -2546,13 +2547,20 @@ typedef struct scsi_qla_host {
2546 uint8_t fcode_revision[16]; 2547 uint8_t fcode_revision[16];
2547 uint32_t fw_revision[4]; 2548 uint32_t fw_revision[4];
2548 2549
2549 uint16_t fdt_odd_index;
2550 uint32_t fdt_wrt_disable; 2550 uint32_t fdt_wrt_disable;
2551 uint32_t fdt_erase_cmd; 2551 uint32_t fdt_erase_cmd;
2552 uint32_t fdt_block_size; 2552 uint32_t fdt_block_size;
2553 uint32_t fdt_unprotect_sec_cmd; 2553 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2554 uint32_t fdt_protect_sec_cmd;
2555 2555
2556 uint32_t flt_region_flt;
2557 uint32_t flt_region_fdt;
2558 uint32_t flt_region_boot;
2559 uint32_t flt_region_fw;
2560 uint32_t flt_region_vpd_nvram;
2561 uint32_t flt_region_hw_event;
2562 uint32_t flt_region_npiv_conf;
2563
2556 /* Needed for BEACON */ 2564 /* Needed for BEACON */
2557 uint16_t beacon_blink_led; 2565 uint16_t beacon_blink_led;
2558 uint8_t beacon_color_state; 2566 uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400d..d1d14202575a 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
789#define FA_RISC_CODE_ADDR 0x20000 789#define FA_RISC_CODE_ADDR 0x20000
790#define FA_RISC_CODE_SEGMENTS 2 790#define FA_RISC_CODE_SEGMENTS 2
791 791
792#define FA_FLASH_DESCR_ADDR_24 0x11000
793#define FA_FLASH_LAYOUT_ADDR_24 0x11400
794#define FA_NPIV_CONF0_ADDR_24 0x16000
795#define FA_NPIV_CONF1_ADDR_24 0x17000
796
792#define FA_FW_AREA_ADDR 0x40000 797#define FA_FW_AREA_ADDR 0x40000
793#define FA_VPD_NVRAM_ADDR 0x48000 798#define FA_VPD_NVRAM_ADDR 0x48000
794#define FA_FEATURE_ADDR 0x4C000 799#define FA_FEATURE_ADDR 0x4C000
795#define FA_FLASH_DESCR_ADDR 0x50000 800#define FA_FLASH_DESCR_ADDR 0x50000
801#define FA_FLASH_LAYOUT_ADDR 0x50400
796#define FA_HW_EVENT0_ADDR 0x54000 802#define FA_HW_EVENT0_ADDR 0x54000
797#define FA_HW_EVENT1_ADDR 0x54200 803#define FA_HW_EVENT1_ADDR 0x54400
798#define FA_HW_EVENT_SIZE 0x200 804#define FA_HW_EVENT_SIZE 0x200
799#define FA_HW_EVENT_ENTRY_SIZE 4 805#define FA_HW_EVENT_ENTRY_SIZE 4
806#define FA_NPIV_CONF0_ADDR 0x5C000
807#define FA_NPIV_CONF1_ADDR 0x5D000
808
800/* 809/*
801 * Flash Error Log Event Codes. 810 * Flash Error Log Event Codes.
802 */ 811 */
@@ -806,10 +815,6 @@ struct device_reg_24xx {
806#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 815#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
807#define HW_EVENT_FLASH_FW_ERR 0xF024 816#define HW_EVENT_FLASH_FW_ERR 0xF024
808 817
809#define FA_BOOT_LOG_ADDR 0x58000
810#define FA_FW_DUMP0_ADDR 0x60000
811#define FA_FW_DUMP1_ADDR 0x70000
812
813 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 818 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
814 819
815 uint32_t ctrl_status; /* Control/Status. */ 820 uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
1203 uint8_t unused2[65]; 1208 uint8_t unused2[65];
1204}; 1209};
1205 1210
1211/* Flash Layout Table ********************************************************/
1212
1213struct qla_flt_location {
1214 uint8_t sig[4];
1215 uint32_t start_lo;
1216 uint32_t start_hi;
1217 uint16_t unused;
1218 uint16_t checksum;
1219};
1220
1221struct qla_flt_header {
1222 uint16_t version;
1223 uint16_t length;
1224 uint16_t checksum;
1225 uint16_t unused;
1226};
1227
1228#define FLT_REG_FW 0x01
1229#define FLT_REG_BOOT_CODE 0x07
1230#define FLT_REG_VPD_0 0x14
1231#define FLT_REG_NVRAM_0 0x15
1232#define FLT_REG_VPD_1 0x16
1233#define FLT_REG_NVRAM_1 0x17
1234#define FLT_REG_FDT 0x1a
1235#define FLT_REG_FLT 0x1c
1236#define FLT_REG_HW_EVENT_0 0x1d
1237#define FLT_REG_HW_EVENT_1 0x1f
1238#define FLT_REG_NPIV_CONF_0 0x29
1239#define FLT_REG_NPIV_CONF_1 0x2a
1240
1241struct qla_flt_region {
1242 uint32_t code;
1243 uint32_t size;
1244 uint32_t start;
1245 uint32_t end;
1246};
1247
1248/* Flash NPIV Configuration Table ********************************************/
1249
1250struct qla_npiv_header {
1251 uint8_t sig[2];
1252 uint16_t version;
1253 uint16_t entries;
1254 uint16_t unused[4];
1255 uint16_t checksum;
1256};
1257
1258struct qla_npiv_entry {
1259 uint16_t flags;
1260 uint16_t vf_id;
1261 uint16_t qos;
1262 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE];
1265};
1266
1206/* 84XX Support **************************************************************/ 1267/* 84XX Support **************************************************************/
1207 1268
1208#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ 1269#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a6..753dbe6cce6e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, 313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
314 uint16_t, uint16_t); 314 uint16_t, uint16_t);
315 315
316extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
318 318
319extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
320
319/* 321/*
320 * Global Function Prototypes in qla_dbg.c source file. 322 * Global Function Prototypes in qla_dbg.c source file.
321 */ 323 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ee89ddd64aae..4218f20f5ed5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
83 83
84 ha->isp_ops->reset_chip(ha); 84 ha->isp_ops->reset_chip(ha);
85 85
86 rval = qla2xxx_get_flash_info(ha);
87 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no));
90 return (rval);
91 }
92
86 ha->isp_ops->get_flash_version(ha, ha->request_ring); 93 ha->isp_ops->get_flash_version(ha, ha->request_ring);
87 94
88 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
109 rval = qla2x00_setup_chip(ha); 116 rval = qla2x00_setup_chip(ha);
110 if (rval) 117 if (rval)
111 return (rval); 118 return (rval);
112 qla2xxx_get_flash_info(ha);
113 } 119 }
114 if (IS_QLA84XX(ha)) { 120 if (IS_QLA84XX(ha)) {
115 ha->cs84xx = qla84xx_get_chip(ha); 121 ha->cs84xx = qla84xx_get_chip(ha);
@@ -134,7 +140,6 @@ int
134qla2100_pci_config(scsi_qla_host_t *ha) 140qla2100_pci_config(scsi_qla_host_t *ha)
135{ 141{
136 uint16_t w; 142 uint16_t w;
137 uint32_t d;
138 unsigned long flags; 143 unsigned long flags;
139 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
140 145
@@ -145,10 +150,7 @@ qla2100_pci_config(scsi_qla_host_t *ha)
145 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 150 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
146 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 151 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
147 152
148 /* Reset expansion ROM address decode enable */ 153 pci_disable_rom(ha->pdev);
149 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
150 d &= ~PCI_ROM_ADDRESS_ENABLE;
151 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
152 154
153 /* Get PCI bus information. */ 155 /* Get PCI bus information. */
154 spin_lock_irqsave(&ha->hardware_lock, flags); 156 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -168,7 +170,6 @@ int
168qla2300_pci_config(scsi_qla_host_t *ha) 170qla2300_pci_config(scsi_qla_host_t *ha)
169{ 171{
170 uint16_t w; 172 uint16_t w;
171 uint32_t d;
172 unsigned long flags = 0; 173 unsigned long flags = 0;
173 uint32_t cnt; 174 uint32_t cnt;
174 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -230,10 +231,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
230 231
231 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 232 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
232 233
233 /* Reset expansion ROM address decode enable */ 234 pci_disable_rom(ha->pdev);
234 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
235 d &= ~PCI_ROM_ADDRESS_ENABLE;
236 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
237 235
238 /* Get PCI bus information. */ 236 /* Get PCI bus information. */
239 spin_lock_irqsave(&ha->hardware_lock, flags); 237 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -253,7 +251,6 @@ int
253qla24xx_pci_config(scsi_qla_host_t *ha) 251qla24xx_pci_config(scsi_qla_host_t *ha)
254{ 252{
255 uint16_t w; 253 uint16_t w;
256 uint32_t d;
257 unsigned long flags = 0; 254 unsigned long flags = 0;
258 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
259 256
@@ -275,10 +272,7 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
275 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 272 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
276 pcie_set_readrq(ha->pdev, 2048); 273 pcie_set_readrq(ha->pdev, 2048);
277 274
278 /* Reset expansion ROM address decode enable */ 275 pci_disable_rom(ha->pdev);
279 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
280 d &= ~PCI_ROM_ADDRESS_ENABLE;
281 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
282 276
283 ha->chip_revision = ha->pdev->revision; 277 ha->chip_revision = ha->pdev->revision;
284 278
@@ -300,7 +294,6 @@ int
300qla25xx_pci_config(scsi_qla_host_t *ha) 294qla25xx_pci_config(scsi_qla_host_t *ha)
301{ 295{
302 uint16_t w; 296 uint16_t w;
303 uint32_t d;
304 297
305 pci_set_master(ha->pdev); 298 pci_set_master(ha->pdev);
306 pci_try_set_mwi(ha->pdev); 299 pci_try_set_mwi(ha->pdev);
@@ -314,10 +307,7 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
314 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 307 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
315 pcie_set_readrq(ha->pdev, 2048); 308 pcie_set_readrq(ha->pdev, 2048);
316 309
317 /* Reset expansion ROM address decode enable */ 310 pci_disable_rom(ha->pdev);
318 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
319 d &= ~PCI_ROM_ADDRESS_ENABLE;
320 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
321 311
322 ha->chip_revision = ha->pdev->revision; 312 ha->chip_revision = ha->pdev->revision;
323 313
@@ -974,7 +964,6 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
974 &ha->fw_minor_version, 964 &ha->fw_minor_version,
975 &ha->fw_subminor_version, 965 &ha->fw_subminor_version,
976 &ha->fw_attributes, &ha->fw_memory_size); 966 &ha->fw_attributes, &ha->fw_memory_size);
977 qla2x00_resize_request_q(ha);
978 ha->flags.npiv_supported = 0; 967 ha->flags.npiv_supported = 0;
979 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
980 IS_QLA84XX(ha)) && 969 IS_QLA84XX(ha)) &&
@@ -986,6 +975,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
986 ha->max_npiv_vports = 975 ha->max_npiv_vports =
987 MIN_MULTI_ID_FABRIC - 1; 976 MIN_MULTI_ID_FABRIC - 1;
988 } 977 }
978 qla2x00_resize_request_q(ha);
989 979
990 if (ql2xallocfwdump) 980 if (ql2xallocfwdump)
991 qla2x00_alloc_fw_dump(ha); 981 qla2x00_alloc_fw_dump(ha);
@@ -2016,7 +2006,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2016 DEBUG3(printk("%s: exiting normally\n", __func__)); 2006 DEBUG3(printk("%s: exiting normally\n", __func__));
2017 } 2007 }
2018 2008
2019 /* Restore state if a resync event occured during processing */ 2009 /* Restore state if a resync event occurred during processing */
2020 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2010 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2021 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2022 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2012 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2561,7 +2551,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2561 rval = QLA_SUCCESS; 2551 rval = QLA_SUCCESS;
2562 2552
2563 /* Try GID_PT to get device list, else GAN. */ 2553 /* Try GID_PT to get device list, else GAN. */
2564 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC); 2554 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2565 if (!swl) { 2555 if (!swl) {
2566 /*EMPTY*/ 2556 /*EMPTY*/
2567 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2557 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3751,7 +3741,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3751 rval = QLA_SUCCESS; 3741 rval = QLA_SUCCESS;
3752 3742
3753 segments = FA_RISC_CODE_SEGMENTS; 3743 segments = FA_RISC_CODE_SEGMENTS;
3754 faddr = FA_RISC_CODE_ADDR; 3744 faddr = ha->flt_region_fw;
3755 dcode = (uint32_t *)ha->request_ring; 3745 dcode = (uint32_t *)ha->request_ring;
3756 *srisc_addr = 0; 3746 *srisc_addr = 0;
3757 3747
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaab..e90afad120ee 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
52 * @ha: HA context 52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock 53 * @ha_locked: is function called with the hardware lock
54 * 54 *
55 * Returns non-zero if a failure occured, else zero. 55 * Returns non-zero if a failure occurred, else zero.
56 */ 56 */
57static inline int 57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) 58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa4615..85bc0a48598b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
21 * Returns the proper CF_* direction based on CDB. 21 * Returns the proper CF_* direction based on CDB.
22 */ 22 */
23static inline uint16_t 23static inline uint16_t
24qla2x00_get_cmd_direction(struct scsi_cmnd *cmd) 24qla2x00_get_cmd_direction(srb_t *sp)
25{ 25{
26 uint16_t cflags; 26 uint16_t cflags;
27 27
28 cflags = 0; 28 cflags = 0;
29 29
30 /* Set transfer direction */ 30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) 31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 32 cflags = CF_WRITE;
33 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 33 sp->fcport->ha->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 36 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd);
39 }
35 return (cflags); 40 return (cflags);
36} 41}
37 42
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
169 174
170 ha = sp->ha; 175 ha = sp->ha;
171 176
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
173 178
174 /* Three DSDs are available in the Command Type 2 IOCB */ 179 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3; 180 avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
228 233
229 ha = sp->ha; 234 ha = sp->ha;
230 235
231 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
232 237
233 /* Two DSDs are available in the Command Type 3 IOCB */ 238 /* Two DSDs are available in the Command Type 3 IOCB */
234 avail_dsds = 2; 239 avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
262 * qla2x00_start_scsi() - Send a SCSI command to the ISP 267 * qla2x00_start_scsi() - Send a SCSI command to the ISP
263 * @sp: command to send to the ISP 268 * @sp: command to send to the ISP
264 * 269 *
265 * Returns non-zero if a failure occured, else zero. 270 * Returns non-zero if a failure occurred, else zero.
266 */ 271 */
267int 272int
268qla2x00_start_scsi(srb_t *sp) 273qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
407 * 412 *
408 * Can be called from both normal and interrupt context. 413 * Can be called from both normal and interrupt context.
409 * 414 *
410 * Returns non-zero if a failure occured, else zero. 415 * Returns non-zero if a failure occurred, else zero.
411 */ 416 */
412int 417int
413__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
625 ha = sp->ha; 630 ha = sp->ha;
626 631
627 /* Set transfer direction */ 632 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) 633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->task_mgmt_flags = 634 cmd_pkt->task_mgmt_flags =
630 __constant_cpu_to_le16(TMF_WRITE_DATA); 635 __constant_cpu_to_le16(TMF_WRITE_DATA);
631 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 636 sp->fcport->ha->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
632 cmd_pkt->task_mgmt_flags = 639 cmd_pkt->task_mgmt_flags =
633 __constant_cpu_to_le16(TMF_READ_DATA); 640 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd);
643 }
634 644
635 /* One DSD is available in the Command Type 3 IOCB */ 645 /* One DSD is available in the Command Type 3 IOCB */
636 avail_dsds = 1; 646 avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
666 * qla24xx_start_scsi() - Send a SCSI command to the ISP 676 * qla24xx_start_scsi() - Send a SCSI command to the ISP
667 * @sp: command to send to the ISP 677 * @sp: command to send to the ISP
668 * 678 *
669 * Returns non-zero if a failure occured, else zero. 679 * Returns non-zero if a failure occurred, else zero.
670 */ 680 */
671int 681int
672qla24xx_start_scsi(srb_t *sp) 682qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index bf41887cdd65..a76efd99d007 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
391 break; 391 break;
392 392
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, 394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
395 mb[1])); 395 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); 396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 397
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 399 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 461 ha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 462 qla_printk(KERN_INFO, ha,
463 "LIP reset occured (%x).\n", mb[1]); 463 "LIP reset occurred (%x).\n", mb[1]);
464 464
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 466 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* 545 /*
546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 548 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 549 */
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 590 ha->host_no, mb[1], mb[2], mb[3]));
591 591
592 rscn_entry = (mb[1] << 16) | mb[2]; 592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 ha->d_id.b.al_pa; 594 ha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 595 if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
600 break; 600 break;
601 } 601 }
602 602
603 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
603 rscn_queue_index = ha->rscn_in_ptr + 1; 605 rscn_queue_index = ha->rscn_in_ptr + 1;
604 if (rscn_queue_index == MAX_RSCN_COUNT) 606 if (rscn_queue_index == MAX_RSCN_COUNT)
605 rscn_queue_index = 0; 607 rscn_queue_index = 0;
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1060 resid = resid_len; 1062 resid = resid_len;
1061 /* Use F/W calculated residual length. */ 1063 /* Use F/W calculated residual length. */
1062 if (IS_FWI2_CAPABLE(ha)) { 1064 if (IS_FWI2_CAPABLE(ha)) {
1063 if (scsi_status & SS_RESIDUAL_UNDER && 1065 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1064 resid != fw_resid_len) { 1066 lscsi_status = 0;
1067 } else if (resid != fw_resid_len) {
1065 scsi_status &= ~SS_RESIDUAL_UNDER; 1068 scsi_status &= ~SS_RESIDUAL_UNDER;
1066 lscsi_status = 0; 1069 lscsi_status = 0;
1067 } 1070 }
@@ -1184,7 +1187,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1184 cp->serial_number, comp_status, 1187 cp->serial_number, comp_status,
1185 atomic_read(&fcport->state))); 1188 atomic_read(&fcport->state)));
1186 1189
1187 cp->result = DID_BUS_BUSY << 16; 1190 /*
1191 * We are going to have the fc class block the rport
1192 * while we try to recover so instruct the mid layer
1193 * to requeue until the class decides how to handle this.
1194 */
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1188 if (atomic_read(&fcport->state) == FCS_ONLINE) 1196 if (atomic_read(&fcport->state) == FCS_ONLINE)
1189 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
1190 break; 1198 break;
@@ -1211,7 +1219,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1211 break; 1219 break;
1212 1220
1213 case CS_TIMEOUT: 1221 case CS_TIMEOUT:
1214 cp->result = DID_BUS_BUSY << 16; 1222 /*
1223 * We are going to have the fc class block the rport
1224 * while we try to recover so instruct the mid layer
1225 * to requeue until the class decides how to handle this.
1226 */
1227 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1215 1228
1216 if (IS_FWI2_CAPABLE(ha)) { 1229 if (IS_FWI2_CAPABLE(ha)) {
1217 DEBUG2(printk(KERN_INFO 1230 DEBUG2(printk(KERN_INFO
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 813bc7784c0a..3402746ec128 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 233 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__, ha->host_no));
235 qla_printk(KERN_WARNING, ha, 235 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occured. Scheduling ISP " 236 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 237 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 239 qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 244 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 245 "abort_isp\n", __func__, ha->host_no));
246 qla_printk(KERN_WARNING, ha, 246 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occured. Issuing ISP " 247 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 248 "abort.\n");
249 249
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1964,7 +1964,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1964 *cur_iocb_cnt = mcp->mb[7]; 1964 *cur_iocb_cnt = mcp->mb[7];
1965 if (orig_iocb_cnt) 1965 if (orig_iocb_cnt)
1966 *orig_iocb_cnt = mcp->mb[10]; 1966 *orig_iocb_cnt = mcp->mb[10];
1967 if (max_npiv_vports) 1967 if (ha->flags.npiv_supported && max_npiv_vports)
1968 *max_npiv_vports = mcp->mb[11]; 1968 *max_npiv_vports = mcp->mb[11];
1969 } 1969 }
1970 1970
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
1995 char *pmap; 1995 char *pmap;
1996 dma_addr_t pmap_dma; 1996 dma_addr_t pmap_dma;
1997 1997
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma); 1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 1999 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2001 __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6d0f0e5f2827..35567203ef61 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -394,10 +394,8 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
394 } 394 }
395 395
396 /* Close window on fcport/rport state-transitioning. */ 396 /* Close window on fcport/rport state-transitioning. */
397 if (fcport->drport) { 397 if (fcport->drport)
398 cmd->result = DID_IMM_RETRY << 16; 398 goto qc_target_busy;
399 goto qc_fail_command;
400 }
401 399
402 if (atomic_read(&fcport->state) != FCS_ONLINE) { 400 if (atomic_read(&fcport->state) != FCS_ONLINE) {
403 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@@ -405,7 +403,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
405 cmd->result = DID_NO_CONNECT << 16; 403 cmd->result = DID_NO_CONNECT << 16;
406 goto qc_fail_command; 404 goto qc_fail_command;
407 } 405 }
408 goto qc_host_busy; 406 goto qc_target_busy;
409 } 407 }
410 408
411 spin_unlock_irq(ha->host->host_lock); 409 spin_unlock_irq(ha->host->host_lock);
@@ -428,10 +426,11 @@ qc_host_busy_free_sp:
428 426
429qc_host_busy_lock: 427qc_host_busy_lock:
430 spin_lock_irq(ha->host->host_lock); 428 spin_lock_irq(ha->host->host_lock);
431
432qc_host_busy:
433 return SCSI_MLQUEUE_HOST_BUSY; 429 return SCSI_MLQUEUE_HOST_BUSY;
434 430
431qc_target_busy:
432 return SCSI_MLQUEUE_TARGET_BUSY;
433
435qc_fail_command: 434qc_fail_command:
436 done(cmd); 435 done(cmd);
437 436
@@ -461,10 +460,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
461 } 460 }
462 461
463 /* Close window on fcport/rport state-transitioning. */ 462 /* Close window on fcport/rport state-transitioning. */
464 if (fcport->drport) { 463 if (fcport->drport)
465 cmd->result = DID_IMM_RETRY << 16; 464 goto qc24_target_busy;
466 goto qc24_fail_command;
467 }
468 465
469 if (atomic_read(&fcport->state) != FCS_ONLINE) { 466 if (atomic_read(&fcport->state) != FCS_ONLINE) {
470 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@@ -472,7 +469,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
472 cmd->result = DID_NO_CONNECT << 16; 469 cmd->result = DID_NO_CONNECT << 16;
473 goto qc24_fail_command; 470 goto qc24_fail_command;
474 } 471 }
475 goto qc24_host_busy; 472 goto qc24_target_busy;
476 } 473 }
477 474
478 spin_unlock_irq(ha->host->host_lock); 475 spin_unlock_irq(ha->host->host_lock);
@@ -495,10 +492,11 @@ qc24_host_busy_free_sp:
495 492
496qc24_host_busy_lock: 493qc24_host_busy_lock:
497 spin_lock_irq(ha->host->host_lock); 494 spin_lock_irq(ha->host->host_lock);
498
499qc24_host_busy:
500 return SCSI_MLQUEUE_HOST_BUSY; 495 return SCSI_MLQUEUE_HOST_BUSY;
501 496
497qc24_target_busy:
498 return SCSI_MLQUEUE_TARGET_BUSY;
499
502qc24_fail_command: 500qc24_fail_command:
503 done(cmd); 501 done(cmd);
504 502
@@ -730,6 +728,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
730 if (ha->isp_ops->abort_command(ha, sp)) { 728 if (ha->isp_ops->abort_command(ha, sp)) {
731 DEBUG2(printk("%s(%ld): abort_command " 729 DEBUG2(printk("%s(%ld): abort_command "
732 "mbx failed.\n", __func__, ha->host_no)); 730 "mbx failed.\n", __func__, ha->host_no));
731 ret = FAILED;
733 } else { 732 } else {
734 DEBUG3(printk("%s(%ld): abort_command " 733 DEBUG3(printk("%s(%ld): abort_command "
735 "mbx success.\n", __func__, ha->host_no)); 734 "mbx success.\n", __func__, ha->host_no));
@@ -1517,6 +1516,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1516 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1517 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1519 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1518 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1519 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
1520} 1520}
1521 1521
1522static int 1522static int
@@ -1567,9 +1567,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1567 goto probe_out; 1567 goto probe_out;
1568 } 1568 }
1569 1569
1570 if (pci_find_aer_capability(pdev)) 1570 /* This may fail but that's ok */
1571 if (pci_enable_pcie_error_reporting(pdev)) 1571 pci_enable_pcie_error_reporting(pdev);
1572 goto probe_out;
1573 1572
1574 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1573 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
1575 if (host == NULL) { 1574 if (host == NULL) {
@@ -1663,8 +1662,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1663 ha->gid_list_info_size = 8; 1662 ha->gid_list_info_size = 8;
1664 ha->optrom_size = OPTROM_SIZE_25XX; 1663 ha->optrom_size = OPTROM_SIZE_25XX;
1665 ha->isp_ops = &qla25xx_isp_ops; 1664 ha->isp_ops = &qla25xx_isp_ops;
1666 ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
1667 FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
1668 } 1665 }
1669 host->can_queue = ha->request_q_length + 128; 1666 host->can_queue = ha->request_q_length + 128;
1670 1667
@@ -2433,6 +2430,12 @@ qla2x00_do_dpc(void *data)
2433 ha->host_no)); 2430 ha->host_no));
2434 } 2431 }
2435 2432
2433 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
2434 atomic_read(&ha->loop_state) == LOOP_READY) {
2435 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
2436 qla2xxx_flash_npiv_conf(ha);
2437 }
2438
2436 if (!ha->interrupts_on) 2439 if (!ha->interrupts_on)
2437 ha->isp_ops->enable_intrs(ha); 2440 ha->isp_ops->enable_intrs(ha);
2438 2441
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca74474935..e4af678eb2d6 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,199 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
543 } 543 }
544} 544}
545 545
546void 546static int
547qla2xxx_get_flash_info(scsi_qla_host_t *ha) 547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
548{
549 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids;
551 uint32_t *dcode;
552 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl;
555
556 /*
557 * FLT-location structure resides after the last PCI region.
558 */
559
560 /* Begin with sane defaults. */
561 loc = locations[0];
562 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
563 FA_FLASH_LAYOUT_ADDR;
564
565 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring;
567 dcode = (uint32_t *)ha->request_ring;
568 pcihdr = 0;
569 last_image = 1;
570 do {
571 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end;
576
577 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4);
581
582 /* Validate signature of PCI data structure. */
583 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
584 bcode[0x2] != 'I' || bcode[0x3] != 'R')
585 goto end;
586
587 last_image = bcode[0x15] & BIT_7;
588
589 /* Locate next PCI expansion ROM. */
590 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
591 } while (!last_image);
592
593 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end;
600
601 wptr = (uint16_t *)ha->request_ring;
602 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++);
605 if (chksum) {
606 qla_printk(KERN_ERR, ha,
607 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
608 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
609 return QLA_FUNCTION_FAILED;
610 }
611
612 /* Good data. Use specified location. */
613 loc = locations[1];
614 *start = le16_to_cpu(fltl->start_hi) << 16 |
615 le16_to_cpu(fltl->start_lo);
616end:
617 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
618 return QLA_SUCCESS;
619}
620
621static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
623{
624 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr;
626 uint16_t cnt, chksum;
627 uint32_t start;
628 struct qla_flt_header *flt;
629 struct qla_flt_region *region;
630
631 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring;
633 flt = (struct qla_flt_header *)ha->request_ring;
634 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
636 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data;
639 if (flt->version != __constant_cpu_to_le16(1)) {
640 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
641 "version=0x%x length=0x%x checksum=0x%x.\n",
642 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
643 le16_to_cpu(flt->checksum)));
644 goto no_flash_data;
645 }
646
647 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
648 for (chksum = 0; cnt; cnt--)
649 chksum += le16_to_cpu(*wptr++);
650 if (chksum) {
651 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
652 "version=0x%x length=0x%x checksum=0x%x.\n",
653 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
654 chksum));
655 goto no_flash_data;
656 }
657
658 loc = locations[1];
659 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
660 for ( ; cnt; cnt--, region++) {
661 /* Store addresses as DWORD offsets. */
662 start = le32_to_cpu(region->start) >> 2;
663
664 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
665 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
666 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
667
668 switch (le32_to_cpu(region->code)) {
669 case FLT_REG_FW:
670 ha->flt_region_fw = start;
671 break;
672 case FLT_REG_BOOT_CODE:
673 ha->flt_region_boot = start;
674 break;
675 case FLT_REG_VPD_0:
676 ha->flt_region_vpd_nvram = start;
677 break;
678 case FLT_REG_FDT:
679 ha->flt_region_fdt = start;
680 break;
681 case FLT_REG_HW_EVENT_0:
682 if (!PCI_FUNC(ha->pdev->devfn))
683 ha->flt_region_hw_event = start;
684 break;
685 case FLT_REG_HW_EVENT_1:
686 if (PCI_FUNC(ha->pdev->devfn))
687 ha->flt_region_hw_event = start;
688 break;
689 case FLT_REG_NPIV_CONF_0:
690 if (!PCI_FUNC(ha->pdev->devfn))
691 ha->flt_region_npiv_conf = start;
692 break;
693 case FLT_REG_NPIV_CONF_1:
694 if (PCI_FUNC(ha->pdev->devfn))
695 ha->flt_region_npiv_conf = start;
696 break;
697 }
698 }
699 goto done;
700
701no_flash_data:
702 /* Use hardcoded defaults. */
703 loc = locations[0];
704 ha->flt_region_fw = FA_RISC_CODE_ADDR;
705 ha->flt_region_boot = FA_BOOT_CODE_ADDR;
706 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
707 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
708 FA_FLASH_DESCR_ADDR;
709 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
710 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
711 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
712 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
713 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
714done:
715 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
716 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
717 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
718 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
719 ha->flt_region_npiv_conf));
720}
721
722static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
548{ 724{
725#define FLASH_BLK_SIZE_4K 0x1000
549#define FLASH_BLK_SIZE_32K 0x8000 726#define FLASH_BLK_SIZE_32K 0x8000
550#define FLASH_BLK_SIZE_64K 0x10000 727#define FLASH_BLK_SIZE_64K 0x10000
728 const char *loc, *locations[] = { "MID", "FDT" };
551 uint16_t cnt, chksum; 729 uint16_t cnt, chksum;
552 uint16_t *wptr; 730 uint16_t *wptr;
553 struct qla_fdt_layout *fdt; 731 struct qla_fdt_layout *fdt;
554 uint8_t man_id, flash_id; 732 uint8_t man_id, flash_id;
555 733 uint16_t mid, fid;
556 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
557 return;
558 734
559 wptr = (uint16_t *)ha->request_ring; 735 wptr = (uint16_t *)ha->request_ring;
560 fdt = (struct qla_fdt_layout *)ha->request_ring; 736 fdt = (struct qla_fdt_layout *)ha->request_ring;
561 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 737 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
562 FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); 738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
563 if (*wptr == __constant_cpu_to_le16(0xffff)) 739 if (*wptr == __constant_cpu_to_le16(0xffff))
564 goto no_flash_data; 740 goto no_flash_data;
565 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || 741 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +753,9 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
577 goto no_flash_data; 753 goto no_flash_data;
578 } 754 }
579 755
580 ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; 756 loc = locations[1];
757 mid = le16_to_cpu(fdt->man_id);
758 fid = le16_to_cpu(fdt->id);
581 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
582 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
583 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 761 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
588 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
589 flash_conf_to_access_addr(0x0336); 767 flash_conf_to_access_addr(0x0336);
590 } 768 }
591 769 goto done;
592 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
593 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
594 le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
595 ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
596 ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
597 return;
598
599no_flash_data: 770no_flash_data:
771 loc = locations[0];
600 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 772 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
773 mid = man_id;
774 fid = flash_id;
601 ha->fdt_wrt_disable = 0x9c; 775 ha->fdt_wrt_disable = 0x9c;
602 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
603 switch (man_id) { 777 switch (man_id) {
@@ -614,8 +788,7 @@ no_flash_data:
614 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 788 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
615 break; 789 break;
616 case 0x1f: /* Atmel 26DF081A. */ 790 case 0x1f: /* Atmel 26DF081A. */
617 ha->fdt_odd_index = 1; 791 ha->fdt_block_size = FLASH_BLK_SIZE_4K;
618 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
619 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 792 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
620 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 793 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
621 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); 794 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
@@ -625,14 +798,117 @@ no_flash_data:
625 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 798 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
626 break; 799 break;
627 } 800 }
628 801done:
629 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " 802 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
630 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, 803 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
631 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 804 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
632 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 805 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
633 ha->fdt_block_size)); 806 ha->fdt_block_size));
634} 807}
635 808
809int
810qla2xxx_get_flash_info(scsi_qla_host_t *ha)
811{
812 int ret;
813 uint32_t flt_addr;
814
815 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
816 return QLA_SUCCESS;
817
818 ret = qla2xxx_find_flt_start(ha, &flt_addr);
819 if (ret != QLA_SUCCESS)
820 return ret;
821
822 qla2xxx_get_flt_info(ha, flt_addr);
823 qla2xxx_get_fdt_info(ha);
824
825 return QLA_SUCCESS;
826}
827
828void
829qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
830{
831#define NPIV_CONFIG_SIZE (16*1024)
832 void *data;
833 uint16_t *wptr;
834 uint16_t cnt, chksum;
835 struct qla_npiv_header hdr;
836 struct qla_npiv_entry *entry;
837
838 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
839 return;
840
841 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
842 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
843 if (hdr.version == __constant_cpu_to_le16(0xffff))
844 return;
845 if (hdr.version != __constant_cpu_to_le16(1)) {
846 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
847 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
848 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
849 le16_to_cpu(hdr.checksum)));
850 return;
851 }
852
853 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
854 if (!data) {
855 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
856 "allocate memory.\n"));
857 return;
858 }
859
860 ha->isp_ops->read_optrom(ha, (uint8_t *)data,
861 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
862
863 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
864 sizeof(struct qla_npiv_entry)) >> 1;
865 for (wptr = data, chksum = 0; cnt; cnt--)
866 chksum += le16_to_cpu(*wptr++);
867 if (chksum) {
868 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
869 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
870 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
871 chksum));
872 goto done;
873 }
874
875 entry = data + sizeof(struct qla_npiv_header);
876 cnt = le16_to_cpu(hdr.entries);
877 for ( ; cnt; cnt--, entry++) {
878 uint16_t flags;
879 struct fc_vport_identifiers vid;
880 struct fc_vport *vport;
881
882 flags = le16_to_cpu(entry->flags);
883 if (flags == 0xffff)
884 continue;
885 if ((flags & BIT_0) == 0)
886 continue;
887
888 memset(&vid, 0, sizeof(vid));
889 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
890 vid.vport_type = FC_PORTTYPE_NPIV;
891 vid.disable = false;
892 vid.port_name = wwn_to_u64(entry->port_name);
893 vid.node_name = wwn_to_u64(entry->node_name);
894
895 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
896 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
897 (unsigned long long)vid.port_name,
898 (unsigned long long)vid.node_name,
899 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
900
901 vport = fc_vport_create(ha->host, 0, &vid);
902 if (!vport)
903 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
904 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
905 (unsigned long long)vid.port_name,
906 (unsigned long long)vid.node_name);
907 }
908done:
909 kfree(data);
910}
911
636static void 912static void
637qla24xx_unprotect_flash(scsi_qla_host_t *ha) 913qla24xx_unprotect_flash(scsi_qla_host_t *ha)
638{ 914{
@@ -710,13 +986,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
710 qla24xx_unprotect_flash(ha); 986 qla24xx_unprotect_flash(ha);
711 987
712 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { 988 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
713 if (ha->fdt_odd_index) { 989
714 findex = faddr << 2; 990 findex = faddr;
715 fdata = findex & sec_mask; 991 fdata = (findex & sec_mask) << 2;
716 } else {
717 findex = faddr;
718 fdata = (findex & sec_mask) << 2;
719 }
720 992
721 /* Are we at the beginning of a sector? */ 993 /* Are we at the beginning of a sector? */
722 if ((findex & rest_addr) == 0) { 994 if ((findex & rest_addr) == 0) {
@@ -920,7 +1192,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
920 dwptr = (uint32_t *)buf; 1192 dwptr = (uint32_t *)buf;
921 for (i = 0; i < bytes >> 2; i++, naddr++) 1193 for (i = 0; i < bytes >> 2; i++, naddr++)
922 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1194 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
923 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr))); 1195 flash_data_to_access_addr(ha->flt_region_vpd_nvram |
1196 naddr)));
924 1197
925 return buf; 1198 return buf;
926} 1199}
@@ -935,10 +1208,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
935 dbuf = vmalloc(RMW_BUFFER_SIZE); 1208 dbuf = vmalloc(RMW_BUFFER_SIZE);
936 if (!dbuf) 1209 if (!dbuf)
937 return QLA_MEMORY_ALLOC_FAILED; 1210 return QLA_MEMORY_ALLOC_FAILED;
938 ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1211 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
939 RMW_BUFFER_SIZE); 1212 RMW_BUFFER_SIZE);
940 memcpy(dbuf + (naddr << 2), buf, bytes); 1213 memcpy(dbuf + (naddr << 2), buf, bytes);
941 ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1214 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
942 RMW_BUFFER_SIZE); 1215 RMW_BUFFER_SIZE);
943 vfree(dbuf); 1216 vfree(dbuf);
944 1217
@@ -2166,7 +2439,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2166 memset(dbyte, 0, 8); 2439 memset(dbyte, 0, 8);
2167 dcode = (uint16_t *)dbyte; 2440 dcode = (uint16_t *)dbyte;
2168 2441
2169 qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, 2442 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2170 8); 2443 8);
2171 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2444 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
2172 __func__, ha->host_no)); 2445 __func__, ha->host_no));
@@ -2177,7 +2450,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2177 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2450 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2178 dcode[3] == 0)) { 2451 dcode[3] == 0)) {
2179 DEBUG2(printk("%s(): Unrecognized fw revision at " 2452 DEBUG2(printk("%s(): Unrecognized fw revision at "
2180 "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); 2453 "%x.\n", __func__, ha->flt_region_fw * 4));
2181 } else { 2454 } else {
2182 /* values are in big endian */ 2455 /* values are in big endian */
2183 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2456 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2485,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2212 dcode = mbuf; 2485 dcode = mbuf;
2213 2486
2214 /* Begin with first PCI expansion ROM header. */ 2487 /* Begin with first PCI expansion ROM header. */
2215 pcihdr = 0; 2488 pcihdr = ha->flt_region_boot;
2216 last_image = 1; 2489 last_image = 1;
2217 do { 2490 do {
2218 /* Verify PCI expansion ROM header. */ 2491 /* Verify PCI expansion ROM header. */
@@ -2282,7 +2555,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2282 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2555 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2283 dcode = mbuf; 2556 dcode = mbuf;
2284 2557
2285 qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); 2558 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
2286 for (i = 0; i < 4; i++) 2559 for (i = 0; i < 4; i++)
2287 dcode[i] = be32_to_cpu(dcode[i]); 2560 dcode[i] = be32_to_cpu(dcode[i]);
2288 2561
@@ -2291,7 +2564,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2291 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2564 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2292 dcode[3] == 0)) { 2565 dcode[3] == 0)) {
2293 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2566 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
2294 __func__, FA_RISC_CODE_ADDR)); 2567 __func__, ha->flt_region_fw));
2295 } else { 2568 } else {
2296 ha->fw_revision[0] = dcode[0]; 2569 ha->fw_revision[0] = dcode[0];
2297 ha->fw_revision[1] = dcode[1]; 2570 ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2628,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2355 /* Locate first empty entry. */ 2628 /* Locate first empty entry. */
2356 for (;;) { 2629 for (;;) {
2357 if (ha->hw_event_ptr >= 2630 if (ha->hw_event_ptr >=
2358 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2631 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2359 DEBUG2(qla_printk(KERN_WARNING, ha, 2632 DEBUG2(qla_printk(KERN_WARNING, ha,
2360 "HW event -- Log Full!\n")); 2633 "HW event -- Log Full!\n"));
2361 return QLA_MEMORY_ALLOC_FAILED; 2634 return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2664,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2391 int rval; 2664 int rval;
2392 uint32_t marker[2], fdata[4]; 2665 uint32_t marker[2], fdata[4];
2393 2666
2394 if (ha->hw_event_start == 0) 2667 if (ha->flt_region_hw_event == 0)
2395 return QLA_FUNCTION_FAILED; 2668 return QLA_FUNCTION_FAILED;
2396 2669
2397 DEBUG2(qla_printk(KERN_WARNING, ha, 2670 DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2679,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2406 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); 2679 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2407 2680
2408 /* Locate marker. */ 2681 /* Locate marker. */
2409 ha->hw_event_ptr = ha->hw_event_start; 2682 ha->hw_event_ptr = ha->flt_region_hw_event;
2410 for (;;) { 2683 for (;;) {
2411 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2684 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
2412 4); 2685 4);
@@ -2415,7 +2688,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2415 break; 2688 break;
2416 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2689 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2417 if (ha->hw_event_ptr >= 2690 if (ha->hw_event_ptr >=
2418 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2691 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2419 DEBUG2(qla_printk(KERN_WARNING, ha, 2692 DEBUG2(qla_printk(KERN_WARNING, ha,
2420 "HW event -- Log Full!\n")); 2693 "HW event -- Log Full!\n"));
2421 return QLA_MEMORY_ALLOC_FAILED; 2694 return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4160e4caa7b9..eea6720adf16 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k7" 10#define QLA2XXX_VERSION "8.02.01-k9"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a91a57c57bff..799120fcb9be 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
139 ha->host_no, cmd->device->channel, 139 ha->host_no, cmd->device->channel,
140 cmd->device->id, cmd->device->lun)); 140 cmd->device->id, cmd->device->lun));
141 141
142 cmd->result = DID_BUS_BUSY << 16; 142 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
143 143
144 /* 144 /*
145 * Mark device missing so that we won't continue to send 145 * Mark device missing so that we won't continue to send
@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
243 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 243 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
244 qla4xxx_mark_device_missing(ha, ddb_entry); 244 qla4xxx_mark_device_missing(ha, ddb_entry);
245 245
246 cmd->result = DID_BUS_BUSY << 16; 246 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
247 break; 247 break;
248 248
249 case SCS_QUEUE_FULL: 249 case SCS_QUEUE_FULL:
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc52..db7ea3bb4e83 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -353,7 +353,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
353 ha->host_no, ddb_entry->bus, ddb_entry->target, 353 ha->host_no, ddb_entry->bus, ddb_entry->target,
354 ddb_entry->fw_ddb_index)); 354 ddb_entry->fw_ddb_index));
355 iscsi_block_session(ddb_entry->sess); 355 iscsi_block_session(ddb_entry->sess);
356 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); 356 iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
357} 357}
358 358
359static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 359static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@@ -439,7 +439,7 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
439 cmd->result = DID_NO_CONNECT << 16; 439 cmd->result = DID_NO_CONNECT << 16;
440 goto qc_fail_command; 440 goto qc_fail_command;
441 } 441 }
442 goto qc_host_busy; 442 return SCSI_MLQUEUE_TARGET_BUSY;
443 } 443 }
444 444
445 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) 445 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1542 DEBUG2(printk(KERN_INFO 1542 DEBUG2(printk(KERN_INFO
1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1545 cmd, jiffies, cmd->timeout_per_command / HZ, 1545 cmd, jiffies, cmd->request->timeout / HZ,
1546 ha->dpc_flags, cmd->result, cmd->allowed)); 1546 ha->dpc_flags, cmd->result, cmd->allowed));
1547 1547
1548 /* FIXME: wait for hba to go online */ 1548 /* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1598 DEBUG2(printk(KERN_INFO 1598 DEBUG2(printk(KERN_INFO
1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
1601 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, 1601 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
1602 ha->dpc_flags, cmd->result, cmd->allowed)); 1602 ha->dpc_flags, cmd->result, cmd->allowed));
1603 1603
1604 stat = qla4xxx_reset_target(ha, ddb_entry); 1604 stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 905350896725..69d6ad862b60 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1,6 +1,6 @@
1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 * 2 *
3 * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
4 * 4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI 5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code. 6 * Qlogic ISP driver. Mucho kudos to him for this code.
@@ -25,12 +25,14 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/dma-mapping.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
28 31
29#include <asm/byteorder.h> 32#include <asm/byteorder.h>
30 33
31#include "qlogicpti.h" 34#include "qlogicpti.h"
32 35
33#include <asm/sbus.h>
34#include <asm/dma.h> 36#include <asm/dma.h>
35#include <asm/system.h> 37#include <asm/system.h>
36#include <asm/ptrace.h> 38#include <asm/ptrace.h>
@@ -157,7 +159,7 @@ static inline void set_sbus_cfg1(struct qlogicpti *qpti)
157 * is a nop and the chip ends up using the smallest burst 159 * is a nop and the chip ends up using the smallest burst
158 * size. -DaveM 160 * size. -DaveM
159 */ 161 */
160 if (sbus_can_burst64(qpti->sdev) && (bursts & DMA_BURST64)) { 162 if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
161 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); 163 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
162 } else 164 } else
163#endif 165#endif
@@ -684,19 +686,19 @@ static void __devexit qpti_chain_del(struct qlogicpti *qpti)
684 686
685static int __devinit qpti_map_regs(struct qlogicpti *qpti) 687static int __devinit qpti_map_regs(struct qlogicpti *qpti)
686{ 688{
687 struct sbus_dev *sdev = qpti->sdev; 689 struct of_device *op = qpti->op;
688 690
689 qpti->qregs = sbus_ioremap(&sdev->resource[0], 0, 691 qpti->qregs = of_ioremap(&op->resource[0], 0,
690 sdev->reg_addrs[0].reg_size, 692 resource_size(&op->resource[0]),
691 "PTI Qlogic/ISP"); 693 "PTI Qlogic/ISP");
692 if (!qpti->qregs) { 694 if (!qpti->qregs) {
693 printk("PTI: Qlogic/ISP registers are unmappable\n"); 695 printk("PTI: Qlogic/ISP registers are unmappable\n");
694 return -1; 696 return -1;
695 } 697 }
696 if (qpti->is_pti) { 698 if (qpti->is_pti) {
697 qpti->sreg = sbus_ioremap(&sdev->resource[0], (16 * 4096), 699 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
698 sizeof(unsigned char), 700 sizeof(unsigned char),
699 "PTI Qlogic/ISP statreg"); 701 "PTI Qlogic/ISP statreg");
700 if (!qpti->sreg) { 702 if (!qpti->sreg) {
701 printk("PTI: Qlogic/ISP status register is unmappable\n"); 703 printk("PTI: Qlogic/ISP status register is unmappable\n");
702 return -1; 704 return -1;
@@ -707,9 +709,9 @@ static int __devinit qpti_map_regs(struct qlogicpti *qpti)
707 709
708static int __devinit qpti_register_irq(struct qlogicpti *qpti) 710static int __devinit qpti_register_irq(struct qlogicpti *qpti)
709{ 711{
710 struct sbus_dev *sdev = qpti->sdev; 712 struct of_device *op = qpti->op;
711 713
712 qpti->qhost->irq = qpti->irq = sdev->irqs[0]; 714 qpti->qhost->irq = qpti->irq = op->irqs[0];
713 715
714 /* We used to try various overly-clever things to 716 /* We used to try various overly-clever things to
715 * reduce the interrupt processing overhead on 717 * reduce the interrupt processing overhead on
@@ -732,17 +734,19 @@ fail:
732 734
733static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) 735static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
734{ 736{
735 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 737 struct of_device *op = qpti->op;
736 "initiator-id", 738 struct device_node *dp;
737 -1); 739
740 dp = op->node;
741
742 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
738 if (qpti->scsi_id == -1) 743 if (qpti->scsi_id == -1)
739 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 744 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
740 "scsi-initiator-id", 745 -1);
741 -1);
742 if (qpti->scsi_id == -1) 746 if (qpti->scsi_id == -1)
743 qpti->scsi_id = 747 qpti->scsi_id =
744 prom_getintdefault(qpti->sdev->bus->prom_node, 748 of_getintprop_default(dp->parent,
745 "scsi-initiator-id", 7); 749 "scsi-initiator-id", 7);
746 qpti->qhost->this_id = qpti->scsi_id; 750 qpti->qhost->this_id = qpti->scsi_id;
747 qpti->qhost->max_sectors = 64; 751 qpti->qhost->max_sectors = 64;
748 752
@@ -751,12 +755,11 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
751 755
752static void qpti_get_bursts(struct qlogicpti *qpti) 756static void qpti_get_bursts(struct qlogicpti *qpti)
753{ 757{
754 struct sbus_dev *sdev = qpti->sdev; 758 struct of_device *op = qpti->op;
755 u8 bursts, bmask; 759 u8 bursts, bmask;
756 760
757 bursts = prom_getintdefault(qpti->prom_node, "burst-sizes", 0xff); 761 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff);
758 bmask = prom_getintdefault(sdev->bus->prom_node, 762 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff);
759 "burst-sizes", 0xff);
760 if (bmask != 0xff) 763 if (bmask != 0xff)
761 bursts &= bmask; 764 bursts &= bmask;
762 if (bursts == 0xff || 765 if (bursts == 0xff ||
@@ -785,25 +788,25 @@ static void qpti_get_clock(struct qlogicpti *qpti)
785 */ 788 */
786static int __devinit qpti_map_queues(struct qlogicpti *qpti) 789static int __devinit qpti_map_queues(struct qlogicpti *qpti)
787{ 790{
788 struct sbus_dev *sdev = qpti->sdev; 791 struct of_device *op = qpti->op;
789 792
790#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 793#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
791 qpti->res_cpu = sbus_alloc_consistent(sdev, 794 qpti->res_cpu = dma_alloc_coherent(&op->dev,
792 QSIZE(RES_QUEUE_LEN), 795 QSIZE(RES_QUEUE_LEN),
793 &qpti->res_dvma); 796 &qpti->res_dvma, GFP_ATOMIC);
794 if (qpti->res_cpu == NULL || 797 if (qpti->res_cpu == NULL ||
795 qpti->res_dvma == 0) { 798 qpti->res_dvma == 0) {
796 printk("QPTI: Cannot map response queue.\n"); 799 printk("QPTI: Cannot map response queue.\n");
797 return -1; 800 return -1;
798 } 801 }
799 802
800 qpti->req_cpu = sbus_alloc_consistent(sdev, 803 qpti->req_cpu = dma_alloc_coherent(&op->dev,
801 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 804 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
802 &qpti->req_dvma); 805 &qpti->req_dvma, GFP_ATOMIC);
803 if (qpti->req_cpu == NULL || 806 if (qpti->req_cpu == NULL ||
804 qpti->req_dvma == 0) { 807 qpti->req_dvma == 0) {
805 sbus_free_consistent(sdev, QSIZE(RES_QUEUE_LEN), 808 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
806 qpti->res_cpu, qpti->res_dvma); 809 qpti->res_cpu, qpti->res_dvma);
807 printk("QPTI: Cannot map request queue.\n"); 810 printk("QPTI: Cannot map request queue.\n");
808 return -1; 811 return -1;
809 } 812 }
@@ -875,8 +878,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
875 int sg_count; 878 int sg_count;
876 879
877 sg = scsi_sglist(Cmnd); 880 sg = scsi_sglist(Cmnd);
878 sg_count = sbus_map_sg(qpti->sdev, sg, scsi_sg_count(Cmnd), 881 sg_count = dma_map_sg(&qpti->op->dev, sg,
879 Cmnd->sc_data_direction); 882 scsi_sg_count(Cmnd),
883 Cmnd->sc_data_direction);
880 884
881 ds = cmd->dataseg; 885 ds = cmd->dataseg;
882 cmd->segment_cnt = sg_count; 886 cmd->segment_cnt = sg_count;
@@ -1152,9 +1156,9 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1152 Cmnd->result = DID_ERROR << 16; 1156 Cmnd->result = DID_ERROR << 16;
1153 1157
1154 if (scsi_bufflen(Cmnd)) 1158 if (scsi_bufflen(Cmnd))
1155 sbus_unmap_sg(qpti->sdev, 1159 dma_unmap_sg(&qpti->op->dev,
1156 scsi_sglist(Cmnd), scsi_sg_count(Cmnd), 1160 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1157 Cmnd->sc_data_direction); 1161 Cmnd->sc_data_direction);
1158 1162
1159 qpti->cmd_count[Cmnd->device->id]--; 1163 qpti->cmd_count[Cmnd->device->id]--;
1160 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1164 sbus_writew(out_ptr, qpti->qregs + MBOX5);
@@ -1268,34 +1272,32 @@ static struct scsi_host_template qpti_template = {
1268 .use_clustering = ENABLE_CLUSTERING, 1272 .use_clustering = ENABLE_CLUSTERING,
1269}; 1273};
1270 1274
1271static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_device_id *match) 1275static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
1272{ 1276{
1273 static int nqptis;
1274 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1275 struct device_node *dp = dev->node;
1276 struct scsi_host_template *tpnt = match->data; 1277 struct scsi_host_template *tpnt = match->data;
1278 struct device_node *dp = op->node;
1277 struct Scsi_Host *host; 1279 struct Scsi_Host *host;
1278 struct qlogicpti *qpti; 1280 struct qlogicpti *qpti;
1281 static int nqptis;
1279 const char *fcode; 1282 const char *fcode;
1280 1283
1281 /* Sometimes Antares cards come up not completely 1284 /* Sometimes Antares cards come up not completely
1282 * setup, and we get a report of a zero IRQ. 1285 * setup, and we get a report of a zero IRQ.
1283 */ 1286 */
1284 if (sdev->irqs[0] == 0) 1287 if (op->irqs[0] == 0)
1285 return -ENODEV; 1288 return -ENODEV;
1286 1289
1287 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1290 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
1288 if (!host) 1291 if (!host)
1289 return -ENOMEM; 1292 return -ENOMEM;
1290 1293
1291 qpti = (struct qlogicpti *) host->hostdata; 1294 qpti = shost_priv(host);
1292 1295
1293 host->max_id = MAX_TARGETS; 1296 host->max_id = MAX_TARGETS;
1294 qpti->qhost = host; 1297 qpti->qhost = host;
1295 qpti->sdev = sdev; 1298 qpti->op = op;
1296 qpti->qpti_id = nqptis; 1299 qpti->qpti_id = nqptis;
1297 qpti->prom_node = sdev->prom_node; 1300 strcpy(qpti->prom_name, op->node->name);
1298 strcpy(qpti->prom_name, sdev->ofdev.node->name);
1299 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); 1301 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1300 1302
1301 if (qpti_map_regs(qpti) < 0) 1303 if (qpti_map_regs(qpti) < 0)
@@ -1341,12 +1343,12 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
1341 (qpti->ultra ? "Ultra" : "Fast"), 1343 (qpti->ultra ? "Ultra" : "Fast"),
1342 (qpti->differential ? "differential" : "single ended")); 1344 (qpti->differential ? "differential" : "single ended"));
1343 1345
1344 if (scsi_add_host(host, &dev->dev)) { 1346 if (scsi_add_host(host, &op->dev)) {
1345 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); 1347 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
1346 goto fail_unmap_queues; 1348 goto fail_unmap_queues;
1347 } 1349 }
1348 1350
1349 dev_set_drvdata(&sdev->ofdev.dev, qpti); 1351 dev_set_drvdata(&op->dev, qpti);
1350 1352
1351 qpti_chain_add(qpti); 1353 qpti_chain_add(qpti);
1352 1354
@@ -1357,19 +1359,20 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
1357 1359
1358fail_unmap_queues: 1360fail_unmap_queues:
1359#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1361#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1360 sbus_free_consistent(qpti->sdev, 1362 dma_free_coherent(&op->dev,
1361 QSIZE(RES_QUEUE_LEN), 1363 QSIZE(RES_QUEUE_LEN),
1362 qpti->res_cpu, qpti->res_dvma); 1364 qpti->res_cpu, qpti->res_dvma);
1363 sbus_free_consistent(qpti->sdev, 1365 dma_free_coherent(&op->dev,
1364 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1366 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1365 qpti->req_cpu, qpti->req_dvma); 1367 qpti->req_cpu, qpti->req_dvma);
1366#undef QSIZE 1368#undef QSIZE
1367 1369
1368fail_unmap_regs: 1370fail_unmap_regs:
1369 sbus_iounmap(qpti->qregs, 1371 of_iounmap(&op->resource[0], qpti->qregs,
1370 qpti->sdev->reg_addrs[0].reg_size); 1372 resource_size(&op->resource[0]));
1371 if (qpti->is_pti) 1373 if (qpti->is_pti)
1372 sbus_iounmap(qpti->sreg, sizeof(unsigned char)); 1374 of_iounmap(&op->resource[0], qpti->sreg,
1375 sizeof(unsigned char));
1373 1376
1374fail_free_irq: 1377fail_free_irq:
1375 free_irq(qpti->irq, qpti); 1378 free_irq(qpti->irq, qpti);
@@ -1380,9 +1383,9 @@ fail_unlink:
1380 return -ENODEV; 1383 return -ENODEV;
1381} 1384}
1382 1385
1383static int __devexit qpti_sbus_remove(struct of_device *dev) 1386static int __devexit qpti_sbus_remove(struct of_device *op)
1384{ 1387{
1385 struct qlogicpti *qpti = dev_get_drvdata(&dev->dev); 1388 struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
1386 1389
1387 qpti_chain_del(qpti); 1390 qpti_chain_del(qpti);
1388 1391
@@ -1395,24 +1398,25 @@ static int __devexit qpti_sbus_remove(struct of_device *dev)
1395 free_irq(qpti->irq, qpti); 1398 free_irq(qpti->irq, qpti);
1396 1399
1397#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1400#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1398 sbus_free_consistent(qpti->sdev, 1401 dma_free_coherent(&op->dev,
1399 QSIZE(RES_QUEUE_LEN), 1402 QSIZE(RES_QUEUE_LEN),
1400 qpti->res_cpu, qpti->res_dvma); 1403 qpti->res_cpu, qpti->res_dvma);
1401 sbus_free_consistent(qpti->sdev, 1404 dma_free_coherent(&op->dev,
1402 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1405 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1403 qpti->req_cpu, qpti->req_dvma); 1406 qpti->req_cpu, qpti->req_dvma);
1404#undef QSIZE 1407#undef QSIZE
1405 1408
1406 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size); 1409 of_iounmap(&op->resource[0], qpti->qregs,
1410 resource_size(&op->resource[0]));
1407 if (qpti->is_pti) 1411 if (qpti->is_pti)
1408 sbus_iounmap(qpti->sreg, sizeof(unsigned char)); 1412 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
1409 1413
1410 scsi_host_put(qpti->qhost); 1414 scsi_host_put(qpti->qhost);
1411 1415
1412 return 0; 1416 return 0;
1413} 1417}
1414 1418
1415static struct of_device_id qpti_match[] = { 1419static const struct of_device_id qpti_match[] = {
1416 { 1420 {
1417 .name = "ptisp", 1421 .name = "ptisp",
1418 .data = &qpti_template, 1422 .data = &qpti_template,
@@ -1442,7 +1446,7 @@ static struct of_platform_driver qpti_sbus_driver = {
1442 1446
1443static int __init qpti_init(void) 1447static int __init qpti_init(void)
1444{ 1448{
1445 return of_register_driver(&qpti_sbus_driver, &sbus_bus_type); 1449 return of_register_driver(&qpti_sbus_driver, &of_bus_type);
1446} 1450}
1447 1451
1448static void __exit qpti_exit(void) 1452static void __exit qpti_exit(void)
@@ -1453,7 +1457,7 @@ static void __exit qpti_exit(void)
1453MODULE_DESCRIPTION("QlogicISP SBUS driver"); 1457MODULE_DESCRIPTION("QlogicISP SBUS driver");
1454MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 1458MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1455MODULE_LICENSE("GPL"); 1459MODULE_LICENSE("GPL");
1456MODULE_VERSION("2.0"); 1460MODULE_VERSION("2.1");
1457 1461
1458module_init(qpti_init); 1462module_init(qpti_init);
1459module_exit(qpti_exit); 1463module_exit(qpti_exit);
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index ef6da2df584b..9c053bbaa877 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -342,7 +342,7 @@ struct qlogicpti {
342 u_int req_in_ptr; /* index of next request slot */ 342 u_int req_in_ptr; /* index of next request slot */
343 u_int res_out_ptr; /* index of next result slot */ 343 u_int res_out_ptr; /* index of next result slot */
344 long send_marker; /* must we send a marker? */ 344 long send_marker; /* must we send a marker? */
345 struct sbus_dev *sdev; 345 struct of_device *op;
346 unsigned long __pad; 346 unsigned long __pad;
347 347
348 int cmd_count[MAX_TARGETS]; 348 int cmd_count[MAX_TARGETS];
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503d..f8b79d401d58 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
291 unsigned long flags; 291 unsigned long flags;
292 292
293 cmd->device = dev; 293 cmd->device = dev;
294 init_timer(&cmd->eh_timeout);
295 INIT_LIST_HEAD(&cmd->list); 294 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags); 295 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list); 296 list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
652 unsigned long timeout; 651 unsigned long timeout;
653 int rtn = 0; 652 int rtn = 0;
654 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt);
659
655 /* check if the device is still usable */ 660 /* check if the device is still usable */
656 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
657 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
658 * returns an immediate error upwards, and signals 663 * returns an immediate error upwards, and signals
659 * that the device is no longer present */ 664 * that the device is no longer present */
660 cmd->result = DID_NO_CONNECT << 16; 665 cmd->result = DID_NO_CONNECT << 16;
661 atomic_inc(&cmd->device->iorequest_cnt); 666 scsi_done(cmd);
662 __scsi_done(cmd);
663 /* return 0 (because the command has been processed) */ 667 /* return 0 (because the command has been processed) */
664 goto out; 668 goto out;
665 } 669 }
666 670
667 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 671 /* Check to see if the scsi lld made this device blocked. */
668 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 672 if (unlikely(scsi_device_blocked(cmd->device))) {
669 /* 673 /*
670 * in SDEV_BLOCK, the command is just put back on the device 674 * in blocked state, the command is just put back on
671 * queue. The suspend state has already blocked the queue so 675 * the device queue. The suspend state has already
672 * future requests should not occur until the device 676 * blocked the queue so future requests should not
673 * transitions out of the suspend state. 677 * occur until the device transitions out of the
678 * suspend state.
674 */ 679 */
680
675 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 681 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
676 682
677 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 683 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
714 host->resetting = 0; 720 host->resetting = 0;
715 } 721 }
716 722
717 /*
718 * AK: unlikely race here: for some reason the timer could
719 * expire before the serial number is set up below.
720 */
721 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
722
723 scsi_log_send(cmd); 723 scsi_log_send(cmd);
724 724
725 /* 725 /*
726 * We will use a queued command if possible, otherwise we will
727 * emulate the queuing and calling of completion function ourselves.
728 */
729 atomic_inc(&cmd->device->iorequest_cnt);
730
731 /*
732 * Before we queue this command, check if the command 726 * Before we queue this command, check if the command
733 * length exceeds what the host adapter can handle. 727 * length exceeds what the host adapter can handle.
734 */ 728 */
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
744 } 738 }
745 739
746 spin_lock_irqsave(host->host_lock, flags); 740 spin_lock_irqsave(host->host_lock, flags);
741 /*
742 * AK: unlikely race here: for some reason the timer could
743 * expire before the serial number is set up below.
744 *
745 * TODO: kill serial or move to blk layer
746 */
747 scsi_cmd_get_serial(host, cmd); 747 scsi_cmd_get_serial(host, cmd);
748 748
749 if (unlikely(host->shost_state == SHOST_DEL)) { 749 if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 754 }
755 spin_unlock_irqrestore(host->host_lock, flags); 755 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 756 if (rtn) {
757 if (scsi_delete_timer(cmd)) { 757 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
758 atomic_inc(&cmd->device->iodone_cnt); 758 rtn != SCSI_MLQUEUE_TARGET_BUSY)
759 scsi_queue_insert(cmd, 759 rtn = SCSI_MLQUEUE_HOST_BUSY;
760 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 760
761 rtn : SCSI_MLQUEUE_HOST_BUSY); 761 scsi_queue_insert(cmd, rtn);
762 } 762
763 SCSI_LOG_MLQUEUE(3, 763 SCSI_LOG_MLQUEUE(3,
764 printk("queuecommand : request rejected\n")); 764 printk("queuecommand : request rejected\n"));
765 } 765 }
@@ -770,24 +770,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
770} 770}
771 771
772/** 772/**
773 * scsi_req_abort_cmd -- Request command recovery for the specified command
774 * @cmd: pointer to the SCSI command of interest
775 *
776 * This function requests that SCSI Core start recovery for the
777 * command by deleting the timer and adding the command to the eh
778 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
779 * implement their own error recovery MAY ignore the timeout event if
780 * they generated scsi_req_abort_cmd.
781 */
782void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
783{
784 if (!scsi_delete_timer(cmd))
785 return;
786 scsi_times_out(cmd);
787}
788EXPORT_SYMBOL(scsi_req_abort_cmd);
789
790/**
791 * scsi_done - Enqueue the finished SCSI command into the done queue. 773 * scsi_done - Enqueue the finished SCSI command into the done queue.
792 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 774 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
793 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 775 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +784,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
802 */ 784 */
803static void scsi_done(struct scsi_cmnd *cmd) 785static void scsi_done(struct scsi_cmnd *cmd)
804{ 786{
805 /* 787 blk_complete_request(cmd->request);
806 * We don't have to worry about this one timing out anymore.
807 * If we are unable to remove the timer, then the command
808 * has already timed out. In which case, we have no choice but to
809 * let the timeout function run, as we have no idea where in fact
810 * that function could really be. It might be on another processor,
811 * etc, etc.
812 */
813 if (!scsi_delete_timer(cmd))
814 return;
815 __scsi_done(cmd);
816}
817
818/* Private entry to scsi_done() to complete a command when the timer
819 * isn't running --- used by scsi_times_out */
820void __scsi_done(struct scsi_cmnd *cmd)
821{
822 struct request *rq = cmd->request;
823
824 /*
825 * Set the serial numbers back to zero
826 */
827 cmd->serial_number = 0;
828
829 atomic_inc(&cmd->device->iodone_cnt);
830 if (cmd->result)
831 atomic_inc(&cmd->device->ioerr_cnt);
832
833 BUG_ON(!rq);
834
835 /*
836 * The uptodate/nbytes values don't matter, as we allow partial
837 * completes and thus will check this in the softirq callback
838 */
839 rq->completion_data = cmd;
840 blk_complete_request(rq);
841} 788}
842 789
843/* Move this to a header if it becomes more generally useful */ 790/* Move this to a header if it becomes more generally useful */
@@ -857,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
857void scsi_finish_command(struct scsi_cmnd *cmd) 804void scsi_finish_command(struct scsi_cmnd *cmd)
858{ 805{
859 struct scsi_device *sdev = cmd->device; 806 struct scsi_device *sdev = cmd->device;
807 struct scsi_target *starget = scsi_target(sdev);
860 struct Scsi_Host *shost = sdev->host; 808 struct Scsi_Host *shost = sdev->host;
861 struct scsi_driver *drv; 809 struct scsi_driver *drv;
862 unsigned int good_bytes; 810 unsigned int good_bytes;
@@ -872,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
872 * XXX(hch): What about locking? 820 * XXX(hch): What about locking?
873 */ 821 */
874 shost->host_blocked = 0; 822 shost->host_blocked = 0;
823 starget->target_blocked = 0;
875 sdev->device_blocked = 0; 824 sdev->device_blocked = 0;
876 825
877 /* 826 /*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 39ce3aba1dac..386361778ebb 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
112} 112}
113 113
114/** 114/**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127{
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146}
147
148/**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __func__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173}
174
175/**
176 * scsi_times_out - Timeout function for normal scsi commands. 115 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out. 116 * @req: request that is timing out.
178 * 117 *
179 * Notes: 118 * Notes:
180 * We do not need to lock this. There is the potential for a race 119 * We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
182 * normal completion function determines that the timer has already 121 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 122 * fired, then it mustn't do anything.
184 */ 123 */
185void scsi_times_out(struct scsi_cmnd *scmd) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
186{ 125{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
188 129
189 scsi_log_completion(scmd, TIMEOUT_ERROR); 130 scsi_log_completion(scmd, TIMEOUT_ERROR);
190 131
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
196 eh_timed_out = NULL; 137 eh_timed_out = NULL;
197 138
198 if (eh_timed_out) 139 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) { 140 rtn = eh_timed_out(scmd);
200 case EH_HANDLED: 141 switch (rtn) {
201 __scsi_done(scmd); 142 case BLK_EH_NOT_HANDLED:
202 return;
203 case EH_RESET_TIMER:
204 scsi_add_timer(scmd, scmd->timeout_per_command,
205 scsi_times_out);
206 return;
207 case EH_NOT_HANDLED:
208 break; 143 break;
144 default:
145 return rtn;
209 } 146 }
210 147
211 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
212 scmd->result |= DID_TIME_OUT << 16; 149 scmd->result |= DID_TIME_OUT << 16;
213 __scsi_done(scmd); 150 return BLK_EH_HANDLED;
214 } 151 }
152
153 return BLK_EH_NOT_HANDLED;
215} 154}
216 155
217/** 156/**
@@ -1126,10 +1065,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1126 struct list_head *done_q) 1065 struct list_head *done_q)
1127{ 1066{
1128 struct scsi_cmnd *scmd, *tgtr_scmd, *next; 1067 struct scsi_cmnd *scmd, *tgtr_scmd, *next;
1129 unsigned int id; 1068 unsigned int id = 0;
1130 int rtn; 1069 int rtn;
1131 1070
1132 for (id = 0; id <= shost->max_id; id++) { 1071 do {
1133 tgtr_scmd = NULL; 1072 tgtr_scmd = NULL;
1134 list_for_each_entry(scmd, work_q, eh_entry) { 1073 list_for_each_entry(scmd, work_q, eh_entry) {
1135 if (id == scmd_id(scmd)) { 1074 if (id == scmd_id(scmd)) {
@@ -1137,8 +1076,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1137 break; 1076 break;
1138 } 1077 }
1139 } 1078 }
1079 if (!tgtr_scmd) {
1080 /* not one exactly equal; find the next highest */
1081 list_for_each_entry(scmd, work_q, eh_entry) {
1082 if (scmd_id(scmd) > id &&
1083 (!tgtr_scmd ||
1084 scmd_id(tgtr_scmd) > scmd_id(scmd)))
1085 tgtr_scmd = scmd;
1086 }
1087 }
1140 if (!tgtr_scmd) 1088 if (!tgtr_scmd)
1141 continue; 1089 /* no more commands, that's it */
1090 break;
1142 1091
1143 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " 1092 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
1144 "to target %d\n", 1093 "to target %d\n",
@@ -1157,7 +1106,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1157 " failed target: " 1106 " failed target: "
1158 "%d\n", 1107 "%d\n",
1159 current->comm, id)); 1108 current->comm, id));
1160 } 1109 id++;
1110 } while(id != 0);
1161 1111
1162 return list_empty(work_q); 1112 return list_empty(work_q);
1163} 1113}
@@ -1280,6 +1230,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
1280} 1230}
1281 1231
1282/** 1232/**
1233 * scsi_noretry_cmd - determinte if command should be failed fast
1234 * @scmd: SCSI cmd to examine.
1235 */
1236int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1237{
1238 switch (host_byte(scmd->result)) {
1239 case DID_OK:
1240 break;
1241 case DID_BUS_BUSY:
1242 return blk_failfast_transport(scmd->request);
1243 case DID_PARITY:
1244 return blk_failfast_dev(scmd->request);
1245 case DID_ERROR:
1246 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1247 status_byte(scmd->result) == RESERVATION_CONFLICT)
1248 return 0;
1249 /* fall through */
1250 case DID_SOFT_ERROR:
1251 return blk_failfast_driver(scmd->request);
1252 }
1253
1254 switch (status_byte(scmd->result)) {
1255 case CHECK_CONDITION:
1256 /*
1257 * assume caller has checked sense and determinted
1258 * the check condition was retryable.
1259 */
1260 return blk_failfast_dev(scmd->request);
1261 }
1262
1263 return 0;
1264}
1265
1266/**
1283 * scsi_decide_disposition - Disposition a cmd on return from LLD. 1267 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1284 * @scmd: SCSI cmd to examine. 1268 * @scmd: SCSI cmd to examine.
1285 * 1269 *
@@ -1351,7 +1335,21 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1351 1335
1352 case DID_REQUEUE: 1336 case DID_REQUEUE:
1353 return ADD_TO_MLQUEUE; 1337 return ADD_TO_MLQUEUE;
1354 1338 case DID_TRANSPORT_DISRUPTED:
1339 /*
1340 * LLD/transport was disrupted during processing of the IO.
1341 * The transport class is now blocked/blocking,
1342 * and the transport will decide what to do with the IO
1343 * based on its timers and recovery capablilities if
1344 * there are enough retries.
1345 */
1346 goto maybe_retry;
1347 case DID_TRANSPORT_FAILFAST:
1348 /*
1349 * The transport decided to failfast the IO (most likely
1350 * the fast io fail tmo fired), so send IO directly upwards.
1351 */
1352 return SUCCESS;
1355 case DID_ERROR: 1353 case DID_ERROR:
1356 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1354 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1357 status_byte(scmd->result) == RESERVATION_CONFLICT) 1355 status_byte(scmd->result) == RESERVATION_CONFLICT)
@@ -1444,7 +1442,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1444 * even if the request is marked fast fail, we still requeue 1442 * even if the request is marked fast fail, we still requeue
1445 * for queue congestion conditions (QUEUE_FULL or BUSY) */ 1443 * for queue congestion conditions (QUEUE_FULL or BUSY) */
1446 if ((++scmd->retries) <= scmd->allowed 1444 if ((++scmd->retries) <= scmd->allowed
1447 && !blk_noretry_request(scmd->request)) { 1445 && !scsi_noretry_cmd(scmd)) {
1448 return NEEDS_RETRY; 1446 return NEEDS_RETRY;
1449 } else { 1447 } else {
1450 /* 1448 /*
@@ -1569,7 +1567,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
1569 list_for_each_entry_safe(scmd, next, done_q, eh_entry) { 1567 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1570 list_del_init(&scmd->eh_entry); 1568 list_del_init(&scmd->eh_entry);
1571 if (scsi_device_online(scmd->device) && 1569 if (scsi_device_online(scmd->device) &&
1572 !blk_noretry_request(scmd->request) && 1570 !scsi_noretry_cmd(scmd) &&
1573 (++scmd->retries <= scmd->allowed)) { 1571 (++scmd->retries <= scmd->allowed)) {
1574 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" 1572 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1575 " retry cmd: %p\n", 1573 " retry cmd: %p\n",
@@ -1793,7 +1791,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1793 1791
1794 blk_rq_init(NULL, &req); 1792 blk_rq_init(NULL, &req);
1795 scmd->request = &req; 1793 scmd->request = &req;
1796 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1797 1794
1798 scmd->cmnd = req.cmd; 1795 scmd->cmnd = req.cmd;
1799 1796
@@ -1804,8 +1801,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1804 1801
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1802 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806 1803
1807 init_timer(&scmd->eh_timeout);
1808
1809 spin_lock_irqsave(shost->host_lock, flags); 1804 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->tmf_in_progress = 1; 1805 shost->tmf_in_progress = 1;
1811 spin_unlock_irqrestore(shost->host_lock, flags); 1806 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 28b19ef26309..dc1cfb2fd76b 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -237,7 +237,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
237 case SCSI_IOCTL_SEND_COMMAND: 237 case SCSI_IOCTL_SEND_COMMAND:
238 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 238 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
239 return -EACCES; 239 return -EACCES;
240 return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg); 240 return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
241 case SCSI_IOCTL_DOORLOCK: 241 case SCSI_IOCTL_DOORLOCK:
242 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 242 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
243 case SCSI_IOCTL_DOORUNLOCK: 243 case SCSI_IOCTL_DOORUNLOCK:
@@ -277,14 +277,14 @@ EXPORT_SYMBOL(scsi_ioctl);
277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag. 277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
278 */ 278 */
279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
280 void __user *arg, struct file *filp) 280 void __user *arg, int ndelay)
281{ 281{
282 int val, result; 282 int val, result;
283 283
284 /* The first set of iocts may be executed even if we're doing 284 /* The first set of iocts may be executed even if we're doing
285 * error processing, as long as the device was opened 285 * error processing, as long as the device was opened
286 * non-blocking */ 286 * non-blocking */
287 if (filp && (filp->f_flags & O_NONBLOCK)) { 287 if (ndelay) {
288 if (scsi_host_in_recovery(sdev->host)) 288 if (scsi_host_in_recovery(sdev->host))
289 return -ENODEV; 289 return -ENODEV;
290 } else if (!scsi_block_when_processing_errors(sdev)) 290 } else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62307bd794a9..f5d3b96890dc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114{ 114{
115 struct Scsi_Host *host = cmd->device->host; 115 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device; 116 struct scsi_device *device = cmd->device;
117 struct scsi_target *starget = scsi_target(device);
117 struct request_queue *q = device->request_queue; 118 struct request_queue *q = device->request_queue;
118 unsigned long flags; 119 unsigned long flags;
119 120
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
133 * if a command is requeued with no other commands outstanding 134 * if a command is requeued with no other commands outstanding
134 * either for the device or for the host. 135 * either for the device or for the host.
135 */ 136 */
136 if (reason == SCSI_MLQUEUE_HOST_BUSY) 137 switch (reason) {
138 case SCSI_MLQUEUE_HOST_BUSY:
137 host->host_blocked = host->max_host_blocked; 139 host->host_blocked = host->max_host_blocked;
138 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 140 break;
141 case SCSI_MLQUEUE_DEVICE_BUSY:
139 device->device_blocked = device->max_device_blocked; 142 device->device_blocked = device->max_device_blocked;
143 break;
144 case SCSI_MLQUEUE_TARGET_BUSY:
145 starget->target_blocked = starget->max_target_blocked;
146 break;
147 }
140 148
141 /* 149 /*
142 * Decrement the counters, since these commands are no longer 150 * Decrement the counters, since these commands are no longer
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
460void scsi_device_unbusy(struct scsi_device *sdev) 468void scsi_device_unbusy(struct scsi_device *sdev)
461{ 469{
462 struct Scsi_Host *shost = sdev->host; 470 struct Scsi_Host *shost = sdev->host;
471 struct scsi_target *starget = scsi_target(sdev);
463 unsigned long flags; 472 unsigned long flags;
464 473
465 spin_lock_irqsave(shost->host_lock, flags); 474 spin_lock_irqsave(shost->host_lock, flags);
466 shost->host_busy--; 475 shost->host_busy--;
476 starget->target_busy--;
467 if (unlikely(scsi_host_in_recovery(shost) && 477 if (unlikely(scsi_host_in_recovery(shost) &&
468 (shost->host_failed || shost->host_eh_scheduled))) 478 (shost->host_failed || shost->host_eh_scheduled)))
469 scsi_eh_wakeup(shost); 479 scsi_eh_wakeup(shost);
@@ -519,6 +529,30 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
519 spin_unlock_irqrestore(shost->host_lock, flags); 529 spin_unlock_irqrestore(shost->host_lock, flags);
520} 530}
521 531
532static inline int scsi_device_is_busy(struct scsi_device *sdev)
533{
534 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
535 return 1;
536
537 return 0;
538}
539
540static inline int scsi_target_is_busy(struct scsi_target *starget)
541{
542 return ((starget->can_queue > 0 &&
543 starget->target_busy >= starget->can_queue) ||
544 starget->target_blocked);
545}
546
547static inline int scsi_host_is_busy(struct Scsi_Host *shost)
548{
549 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
550 shost->host_blocked || shost->host_self_blocked)
551 return 1;
552
553 return 0;
554}
555
522/* 556/*
523 * Function: scsi_run_queue() 557 * Function: scsi_run_queue()
524 * 558 *
@@ -533,7 +567,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
533 */ 567 */
534static void scsi_run_queue(struct request_queue *q) 568static void scsi_run_queue(struct request_queue *q)
535{ 569{
536 struct scsi_device *sdev = q->queuedata; 570 struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
537 struct Scsi_Host *shost = sdev->host; 571 struct Scsi_Host *shost = sdev->host;
538 unsigned long flags; 572 unsigned long flags;
539 573
@@ -541,11 +575,7 @@ static void scsi_run_queue(struct request_queue *q)
541 scsi_single_lun_run(sdev); 575 scsi_single_lun_run(sdev);
542 576
543 spin_lock_irqsave(shost->host_lock, flags); 577 spin_lock_irqsave(shost->host_lock, flags);
544 while (!list_empty(&shost->starved_list) && 578 while (!list_empty(&shost->starved_list) && !scsi_host_is_busy(shost)) {
545 !shost->host_blocked && !shost->host_self_blocked &&
546 !((shost->can_queue > 0) &&
547 (shost->host_busy >= shost->can_queue))) {
548
549 int flagset; 579 int flagset;
550 580
551 /* 581 /*
@@ -560,6 +590,21 @@ static void scsi_run_queue(struct request_queue *q)
560 */ 590 */
561 sdev = list_entry(shost->starved_list.next, 591 sdev = list_entry(shost->starved_list.next,
562 struct scsi_device, starved_entry); 592 struct scsi_device, starved_entry);
593 /*
594 * The *queue_ready functions can add a device back onto the
595 * starved list's tail, so we must check for a infinite loop.
596 */
597 if (sdev == starved_head)
598 break;
599 if (!starved_head)
600 starved_head = sdev;
601
602 if (scsi_target_is_busy(scsi_target(sdev))) {
603 list_move_tail(&sdev->starved_entry,
604 &shost->starved_list);
605 continue;
606 }
607
563 list_del_init(&sdev->starved_entry); 608 list_del_init(&sdev->starved_entry);
564 spin_unlock(shost->host_lock); 609 spin_unlock(shost->host_lock);
565 610
@@ -575,13 +620,6 @@ static void scsi_run_queue(struct request_queue *q)
575 spin_unlock(sdev->request_queue->queue_lock); 620 spin_unlock(sdev->request_queue->queue_lock);
576 621
577 spin_lock(shost->host_lock); 622 spin_lock(shost->host_lock);
578 if (unlikely(!list_empty(&sdev->starved_entry)))
579 /*
580 * sdev lost a race, and was put back on the
581 * starved list. This is unlikely but without this
582 * in theory we could loop forever.
583 */
584 break;
585 } 623 }
586 spin_unlock_irqrestore(shost->host_lock, flags); 624 spin_unlock_irqrestore(shost->host_lock, flags);
587 625
@@ -681,7 +719,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
681 leftover = req->data_len; 719 leftover = req->data_len;
682 720
683 /* kill remainder if no retrys */ 721 /* kill remainder if no retrys */
684 if (error && blk_noretry_request(req)) 722 if (error && scsi_noretry_cmd(cmd))
685 blk_end_request(req, error, leftover); 723 blk_end_request(req, error, leftover);
686 else { 724 else {
687 if (requeue) { 725 if (requeue) {
@@ -1181,7 +1219,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1181 1219
1182 cmd->transfersize = req->data_len; 1220 cmd->transfersize = req->data_len;
1183 cmd->allowed = req->retries; 1221 cmd->allowed = req->retries;
1184 cmd->timeout_per_command = req->timeout;
1185 return BLKPREP_OK; 1222 return BLKPREP_OK;
1186} 1223}
1187EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1224EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1251,6 +1288,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1251 break; 1288 break;
1252 case SDEV_QUIESCE: 1289 case SDEV_QUIESCE:
1253 case SDEV_BLOCK: 1290 case SDEV_BLOCK:
1291 case SDEV_CREATED_BLOCK:
1254 /* 1292 /*
1255 * If the devices is blocked we defer normal commands. 1293 * If the devices is blocked we defer normal commands.
1256 */ 1294 */
@@ -1323,8 +1361,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
1323static inline int scsi_dev_queue_ready(struct request_queue *q, 1361static inline int scsi_dev_queue_ready(struct request_queue *q,
1324 struct scsi_device *sdev) 1362 struct scsi_device *sdev)
1325{ 1363{
1326 if (sdev->device_busy >= sdev->queue_depth)
1327 return 0;
1328 if (sdev->device_busy == 0 && sdev->device_blocked) { 1364 if (sdev->device_busy == 0 && sdev->device_blocked) {
1329 /* 1365 /*
1330 * unblock after device_blocked iterates to zero 1366 * unblock after device_blocked iterates to zero
@@ -1338,12 +1374,58 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1338 return 0; 1374 return 0;
1339 } 1375 }
1340 } 1376 }
1341 if (sdev->device_blocked) 1377 if (scsi_device_is_busy(sdev))
1342 return 0; 1378 return 0;
1343 1379
1344 return 1; 1380 return 1;
1345} 1381}
1346 1382
1383
1384/*
1385 * scsi_target_queue_ready: checks if there we can send commands to target
1386 * @sdev: scsi device on starget to check.
1387 *
1388 * Called with the host lock held.
1389 */
1390static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1391 struct scsi_device *sdev)
1392{
1393 struct scsi_target *starget = scsi_target(sdev);
1394
1395 if (starget->single_lun) {
1396 if (starget->starget_sdev_user &&
1397 starget->starget_sdev_user != sdev)
1398 return 0;
1399 starget->starget_sdev_user = sdev;
1400 }
1401
1402 if (starget->target_busy == 0 && starget->target_blocked) {
1403 /*
1404 * unblock after target_blocked iterates to zero
1405 */
1406 if (--starget->target_blocked == 0) {
1407 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1408 "unblocking target at zero depth\n"));
1409 } else {
1410 blk_plug_device(sdev->request_queue);
1411 return 0;
1412 }
1413 }
1414
1415 if (scsi_target_is_busy(starget)) {
1416 if (list_empty(&sdev->starved_entry)) {
1417 list_add_tail(&sdev->starved_entry,
1418 &shost->starved_list);
1419 return 0;
1420 }
1421 }
1422
1423 /* We're OK to process the command, so we can't be starved */
1424 if (!list_empty(&sdev->starved_entry))
1425 list_del_init(&sdev->starved_entry);
1426 return 1;
1427}
1428
1347/* 1429/*
1348 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1430 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1349 * return 0. We must end up running the queue again whenever 0 is 1431 * return 0. We must end up running the queue again whenever 0 is
@@ -1369,8 +1451,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1369 return 0; 1451 return 0;
1370 } 1452 }
1371 } 1453 }
1372 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1454 if (scsi_host_is_busy(shost)) {
1373 shost->host_blocked || shost->host_self_blocked) {
1374 if (list_empty(&sdev->starved_entry)) 1455 if (list_empty(&sdev->starved_entry))
1375 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1456 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1376 return 0; 1457 return 0;
@@ -1384,12 +1465,44 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1384} 1465}
1385 1466
1386/* 1467/*
1468 * Busy state exporting function for request stacking drivers.
1469 *
1470 * For efficiency, no lock is taken to check the busy state of
1471 * shost/starget/sdev, since the returned value is not guaranteed and
1472 * may be changed after request stacking drivers call the function,
1473 * regardless of taking lock or not.
1474 *
1475 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1476 * (e.g. !sdev), scsi needs to return 'not busy'.
1477 * Otherwise, request stacking drivers may hold requests forever.
1478 */
1479static int scsi_lld_busy(struct request_queue *q)
1480{
1481 struct scsi_device *sdev = q->queuedata;
1482 struct Scsi_Host *shost;
1483 struct scsi_target *starget;
1484
1485 if (!sdev)
1486 return 0;
1487
1488 shost = sdev->host;
1489 starget = scsi_target(sdev);
1490
1491 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1492 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1493 return 1;
1494
1495 return 0;
1496}
1497
1498/*
1387 * Kill a request for a dead device 1499 * Kill a request for a dead device
1388 */ 1500 */
1389static void scsi_kill_request(struct request *req, struct request_queue *q) 1501static void scsi_kill_request(struct request *req, struct request_queue *q)
1390{ 1502{
1391 struct scsi_cmnd *cmd = req->special; 1503 struct scsi_cmnd *cmd = req->special;
1392 struct scsi_device *sdev = cmd->device; 1504 struct scsi_device *sdev = cmd->device;
1505 struct scsi_target *starget = scsi_target(sdev);
1393 struct Scsi_Host *shost = sdev->host; 1506 struct Scsi_Host *shost = sdev->host;
1394 1507
1395 blkdev_dequeue_request(req); 1508 blkdev_dequeue_request(req);
@@ -1413,20 +1526,30 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1413 spin_unlock(sdev->request_queue->queue_lock); 1526 spin_unlock(sdev->request_queue->queue_lock);
1414 spin_lock(shost->host_lock); 1527 spin_lock(shost->host_lock);
1415 shost->host_busy++; 1528 shost->host_busy++;
1529 starget->target_busy++;
1416 spin_unlock(shost->host_lock); 1530 spin_unlock(shost->host_lock);
1417 spin_lock(sdev->request_queue->queue_lock); 1531 spin_lock(sdev->request_queue->queue_lock);
1418 1532
1419 __scsi_done(cmd); 1533 blk_complete_request(req);
1420} 1534}
1421 1535
1422static void scsi_softirq_done(struct request *rq) 1536static void scsi_softirq_done(struct request *rq)
1423{ 1537{
1424 struct scsi_cmnd *cmd = rq->completion_data; 1538 struct scsi_cmnd *cmd = rq->special;
1425 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1539 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1426 int disposition; 1540 int disposition;
1427 1541
1428 INIT_LIST_HEAD(&cmd->eh_entry); 1542 INIT_LIST_HEAD(&cmd->eh_entry);
1429 1543
1544 /*
1545 * Set the serial numbers back to zero
1546 */
1547 cmd->serial_number = 0;
1548
1549 atomic_inc(&cmd->device->iodone_cnt);
1550 if (cmd->result)
1551 atomic_inc(&cmd->device->ioerr_cnt);
1552
1430 disposition = scsi_decide_disposition(cmd); 1553 disposition = scsi_decide_disposition(cmd);
1431 if (disposition != SUCCESS && 1554 if (disposition != SUCCESS &&
1432 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1555 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1541,14 +1664,13 @@ static void scsi_request_fn(struct request_queue *q)
1541 goto not_ready; 1664 goto not_ready;
1542 } 1665 }
1543 1666
1667 if (!scsi_target_queue_ready(shost, sdev))
1668 goto not_ready;
1669
1544 if (!scsi_host_queue_ready(q, shost, sdev)) 1670 if (!scsi_host_queue_ready(q, shost, sdev))
1545 goto not_ready; 1671 goto not_ready;
1546 if (scsi_target(sdev)->single_lun) { 1672
1547 if (scsi_target(sdev)->starget_sdev_user && 1673 scsi_target(sdev)->target_busy++;
1548 scsi_target(sdev)->starget_sdev_user != sdev)
1549 goto not_ready;
1550 scsi_target(sdev)->starget_sdev_user = sdev;
1551 }
1552 shost->host_busy++; 1674 shost->host_busy++;
1553 1675
1554 /* 1676 /*
@@ -1675,6 +1797,8 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1675 1797
1676 blk_queue_prep_rq(q, scsi_prep_fn); 1798 blk_queue_prep_rq(q, scsi_prep_fn);
1677 blk_queue_softirq_done(q, scsi_softirq_done); 1799 blk_queue_softirq_done(q, scsi_softirq_done);
1800 blk_queue_rq_timed_out(q, scsi_times_out);
1801 blk_queue_lld_busy(q, scsi_lld_busy);
1678 return q; 1802 return q;
1679} 1803}
1680 1804
@@ -2023,22 +2147,21 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2023 do { 2147 do {
2024 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2148 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2025 timeout, retries); 2149 timeout, retries);
2026 } while ((driver_byte(result) & DRIVER_SENSE) && 2150 if (sdev->removable && scsi_sense_valid(sshdr) &&
2027 sshdr && sshdr->sense_key == UNIT_ATTENTION && 2151 sshdr->sense_key == UNIT_ATTENTION)
2028 --retries); 2152 sdev->changed = 1;
2153 } while (scsi_sense_valid(sshdr) &&
2154 sshdr->sense_key == UNIT_ATTENTION && --retries);
2029 2155
2030 if (!sshdr) 2156 if (!sshdr)
2031 /* could not allocate sense buffer, so can't process it */ 2157 /* could not allocate sense buffer, so can't process it */
2032 return result; 2158 return result;
2033 2159
2034 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 2160 if (sdev->removable && scsi_sense_valid(sshdr) &&
2035 2161 (sshdr->sense_key == UNIT_ATTENTION ||
2036 if ((scsi_sense_valid(sshdr)) && 2162 sshdr->sense_key == NOT_READY)) {
2037 ((sshdr->sense_key == UNIT_ATTENTION) || 2163 sdev->changed = 1;
2038 (sshdr->sense_key == NOT_READY))) { 2164 result = 0;
2039 sdev->changed = 1;
2040 result = 0;
2041 }
2042 } 2165 }
2043 if (!sshdr_external) 2166 if (!sshdr_external)
2044 kfree(sshdr); 2167 kfree(sshdr);
@@ -2064,10 +2187,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2064 2187
2065 switch (state) { 2188 switch (state) {
2066 case SDEV_CREATED: 2189 case SDEV_CREATED:
2067 /* There are no legal states that come back to 2190 switch (oldstate) {
2068 * created. This is the manually initialised start 2191 case SDEV_CREATED_BLOCK:
2069 * state */ 2192 break;
2070 goto illegal; 2193 default:
2194 goto illegal;
2195 }
2196 break;
2071 2197
2072 case SDEV_RUNNING: 2198 case SDEV_RUNNING:
2073 switch (oldstate) { 2199 switch (oldstate) {
@@ -2105,8 +2231,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2105 2231
2106 case SDEV_BLOCK: 2232 case SDEV_BLOCK:
2107 switch (oldstate) { 2233 switch (oldstate) {
2108 case SDEV_CREATED:
2109 case SDEV_RUNNING: 2234 case SDEV_RUNNING:
2235 case SDEV_CREATED_BLOCK:
2236 break;
2237 default:
2238 goto illegal;
2239 }
2240 break;
2241
2242 case SDEV_CREATED_BLOCK:
2243 switch (oldstate) {
2244 case SDEV_CREATED:
2110 break; 2245 break;
2111 default: 2246 default:
2112 goto illegal; 2247 goto illegal;
@@ -2394,8 +2529,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
2394 int err = 0; 2529 int err = 0;
2395 2530
2396 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2531 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2397 if (err) 2532 if (err) {
2398 return err; 2533 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2534
2535 if (err)
2536 return err;
2537 }
2399 2538
2400 /* 2539 /*
2401 * The device has transitioned to SDEV_BLOCK. Stop the 2540 * The device has transitioned to SDEV_BLOCK. Stop the
@@ -2438,8 +2577,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
2438 * and goose the device queue if successful. 2577 * and goose the device queue if successful.
2439 */ 2578 */
2440 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2579 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2441 if (err) 2580 if (err) {
2442 return err; 2581 err = scsi_device_set_state(sdev, SDEV_CREATED);
2582
2583 if (err)
2584 return err;
2585 }
2443 2586
2444 spin_lock_irqsave(q->queue_lock, flags); 2587 spin_lock_irqsave(q->queue_lock, flags);
2445 blk_start_queue(q); 2588 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a22662..723fdecd91bd 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/delay.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/netlink.h> 26#include <net/netlink.h>
26 27
@@ -30,6 +31,39 @@
30struct sock *scsi_nl_sock = NULL; 31struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock); 32EXPORT_SYMBOL_GPL(scsi_nl_sock);
32 33
34static DEFINE_SPINLOCK(scsi_nl_lock);
35static struct list_head scsi_nl_drivers;
36
37static u32 scsi_nl_state;
38#define STATE_EHANDLER_BSY 0x00000001
39
40struct scsi_nl_transport {
41 int (*msg_handler)(struct sk_buff *);
42 void (*event_handler)(struct notifier_block *, unsigned long, void *);
43 unsigned int refcnt;
44 int flags;
45};
46
47/* flags values (bit flags) */
48#define HANDLER_DELETING 0x1
49
50static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
51 { {NULL, }, };
52
53
54struct scsi_nl_drvr {
55 struct list_head next;
56 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
57 u32 len, u32 pid);
58 void (*devt_handler)(struct notifier_block *nb,
59 unsigned long event, void *notify_ptr);
60 struct scsi_host_template *hostt;
61 u64 vendor_id;
62 unsigned int refcnt;
63 int flags;
64};
65
66
33 67
34/** 68/**
35 * scsi_nl_rcv_msg - Receive message handler. 69 * scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
45{ 79{
46 struct nlmsghdr *nlh; 80 struct nlmsghdr *nlh;
47 struct scsi_nl_hdr *hdr; 81 struct scsi_nl_hdr *hdr;
48 uint32_t rlen; 82 unsigned long flags;
49 int err; 83 u32 rlen;
84 int err, tport;
50 85
51 while (skb->len >= NLMSG_SPACE(0)) { 86 while (skb->len >= NLMSG_SPACE(0)) {
52 err = 0; 87 err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
65 100
66 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { 101 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
67 err = -EBADMSG; 102 err = -EBADMSG;
68 return; 103 goto next_msg;
69 } 104 }
70 105
71 hdr = NLMSG_DATA(nlh); 106 hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 118 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 119 printk(KERN_WARNING "%s: discarding partial message\n",
85 __func__); 120 __func__);
86 return; 121 goto next_msg;
87 } 122 }
88 123
89 /* 124 /*
90 * We currently don't support anyone sending us a message 125 * Deliver message to the appropriate transport
91 */ 126 */
127 spin_lock_irqsave(&scsi_nl_lock, flags);
128
129 tport = hdr->transport;
130 if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
131 !(transports[tport].flags & HANDLER_DELETING) &&
132 (transports[tport].msg_handler)) {
133 transports[tport].refcnt++;
134 spin_unlock_irqrestore(&scsi_nl_lock, flags);
135 err = transports[tport].msg_handler(skb);
136 spin_lock_irqsave(&scsi_nl_lock, flags);
137 transports[tport].refcnt--;
138 } else
139 err = -ENOENT;
140
141 spin_unlock_irqrestore(&scsi_nl_lock, flags);
92 142
93next_msg: 143next_msg:
94 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 144 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
110scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) 160scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
111{ 161{
112 struct netlink_notify *n = ptr; 162 struct netlink_notify *n = ptr;
163 struct scsi_nl_drvr *driver;
164 unsigned long flags;
165 int tport;
113 166
114 if (n->protocol != NETLINK_SCSITRANSPORT) 167 if (n->protocol != NETLINK_SCSITRANSPORT)
115 return NOTIFY_DONE; 168 return NOTIFY_DONE;
116 169
170 spin_lock_irqsave(&scsi_nl_lock, flags);
171 scsi_nl_state |= STATE_EHANDLER_BSY;
172
173 /*
174 * Pass event on to any transports that may be listening
175 */
176 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
177 if (!(transports[tport].flags & HANDLER_DELETING) &&
178 (transports[tport].event_handler)) {
179 spin_unlock_irqrestore(&scsi_nl_lock, flags);
180 transports[tport].event_handler(this, event, ptr);
181 spin_lock_irqsave(&scsi_nl_lock, flags);
182 }
183 }
184
117 /* 185 /*
118 * Currently, we are not tracking PID's, etc. There is nothing 186 * Pass event on to any drivers that may be listening
119 * to handle.
120 */ 187 */
188 list_for_each_entry(driver, &scsi_nl_drivers, next) {
189 if (!(driver->flags & HANDLER_DELETING) &&
190 (driver->devt_handler)) {
191 spin_unlock_irqrestore(&scsi_nl_lock, flags);
192 driver->devt_handler(this, event, ptr);
193 spin_lock_irqsave(&scsi_nl_lock, flags);
194 }
195 }
196
197 scsi_nl_state &= ~STATE_EHANDLER_BSY;
198 spin_unlock_irqrestore(&scsi_nl_lock, flags);
121 199
122 return NOTIFY_DONE; 200 return NOTIFY_DONE;
123} 201}
@@ -127,8 +205,279 @@ static struct notifier_block scsi_netlink_notifier = {
127}; 205};
128 206
129 207
208/*
209 * GENERIC SCSI transport receive and event handlers
210 */
211
212/**
213 * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
214 * @skb: socket receive buffer
215 **/
216static int
217scsi_generic_msg_handler(struct sk_buff *skb)
218{
219 struct nlmsghdr *nlh = nlmsg_hdr(skb);
220 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
221 struct scsi_nl_drvr *driver;
222 struct Scsi_Host *shost;
223 unsigned long flags;
224 int err = 0, match, pid;
225
226 pid = NETLINK_CREDS(skb)->pid;
227
228 switch (snlh->msgtype) {
229 case SCSI_NL_SHOST_VENDOR:
230 {
231 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
232
233 /* Locate the driver that corresponds to the message */
234 spin_lock_irqsave(&scsi_nl_lock, flags);
235 match = 0;
236 list_for_each_entry(driver, &scsi_nl_drivers, next) {
237 if (driver->vendor_id == msg->vendor_id) {
238 match = 1;
239 break;
240 }
241 }
242
243 if ((!match) || (!driver->dmsg_handler)) {
244 spin_unlock_irqrestore(&scsi_nl_lock, flags);
245 err = -ESRCH;
246 goto rcv_exit;
247 }
248
249 if (driver->flags & HANDLER_DELETING) {
250 spin_unlock_irqrestore(&scsi_nl_lock, flags);
251 err = -ESHUTDOWN;
252 goto rcv_exit;
253 }
254
255 driver->refcnt++;
256 spin_unlock_irqrestore(&scsi_nl_lock, flags);
257
258
259 /* if successful, scsi_host_lookup takes a shost reference */
260 shost = scsi_host_lookup(msg->host_no);
261 if (!shost) {
262 err = -ENODEV;
263 goto driver_exit;
264 }
265
266 /* is this host owned by the vendor ? */
267 if (shost->hostt != driver->hostt) {
268 err = -EINVAL;
269 goto vendormsg_put;
270 }
271
272 /* pass message on to the driver */
273 err = driver->dmsg_handler(shost, (void *)&msg[1],
274 msg->vmsg_datalen, pid);
275
276vendormsg_put:
277 /* release reference by scsi_host_lookup */
278 scsi_host_put(shost);
279
280driver_exit:
281 /* release our own reference on the registration object */
282 spin_lock_irqsave(&scsi_nl_lock, flags);
283 driver->refcnt--;
284 spin_unlock_irqrestore(&scsi_nl_lock, flags);
285 break;
286 }
287
288 default:
289 err = -EBADR;
290 break;
291 }
292
293rcv_exit:
294 if (err)
295 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
296 __func__, snlh->msgtype, err);
297 return err;
298}
299
300
301/**
302 * scsi_nl_add_transport -
303 * Registers message and event handlers for a transport. Enables
304 * receipt of netlink messages and events to a transport.
305 *
306 * @tport: transport registering handlers
307 * @msg_handler: receive message handler callback
308 * @event_handler: receive event handler callback
309 **/
310int
311scsi_nl_add_transport(u8 tport,
312 int (*msg_handler)(struct sk_buff *),
313 void (*event_handler)(struct notifier_block *, unsigned long, void *))
314{
315 unsigned long flags;
316 int err = 0;
317
318 if (tport >= SCSI_NL_MAX_TRANSPORTS)
319 return -EINVAL;
320
321 spin_lock_irqsave(&scsi_nl_lock, flags);
322
323 if (scsi_nl_state & STATE_EHANDLER_BSY) {
324 spin_unlock_irqrestore(&scsi_nl_lock, flags);
325 msleep(1);
326 spin_lock_irqsave(&scsi_nl_lock, flags);
327 }
328
329 if (transports[tport].msg_handler || transports[tport].event_handler) {
330 err = -EALREADY;
331 goto register_out;
332 }
333
334 transports[tport].msg_handler = msg_handler;
335 transports[tport].event_handler = event_handler;
336 transports[tport].flags = 0;
337 transports[tport].refcnt = 0;
338
339register_out:
340 spin_unlock_irqrestore(&scsi_nl_lock, flags);
341
342 return err;
343}
344EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
345
346
130/** 347/**
131 * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface 348 * scsi_nl_remove_transport -
349 * Disable transport receiption of messages and events
350 *
351 * @tport: transport deregistering handlers
352 *
353 **/
354void
355scsi_nl_remove_transport(u8 tport)
356{
357 unsigned long flags;
358
359 spin_lock_irqsave(&scsi_nl_lock, flags);
360 if (scsi_nl_state & STATE_EHANDLER_BSY) {
361 spin_unlock_irqrestore(&scsi_nl_lock, flags);
362 msleep(1);
363 spin_lock_irqsave(&scsi_nl_lock, flags);
364 }
365
366 if (tport < SCSI_NL_MAX_TRANSPORTS) {
367 transports[tport].flags |= HANDLER_DELETING;
368
369 while (transports[tport].refcnt != 0) {
370 spin_unlock_irqrestore(&scsi_nl_lock, flags);
371 schedule_timeout_uninterruptible(HZ/4);
372 spin_lock_irqsave(&scsi_nl_lock, flags);
373 }
374 transports[tport].msg_handler = NULL;
375 transports[tport].event_handler = NULL;
376 transports[tport].flags = 0;
377 }
378
379 spin_unlock_irqrestore(&scsi_nl_lock, flags);
380
381 return;
382}
383EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
384
385
386/**
387 * scsi_nl_add_driver -
388 * A driver is registering its interfaces for SCSI netlink messages
389 *
390 * @vendor_id: A unique identification value for the driver.
391 * @hostt: address of the driver's host template. Used
392 * to verify an shost is bound to the driver
393 * @nlmsg_handler: receive message handler callback
394 * @nlevt_handler: receive event handler callback
395 *
396 * Returns:
397 * 0 on Success
398 * error result otherwise
399 **/
400int
401scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
402 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
403 u32 len, u32 pid),
404 void (*nlevt_handler)(struct notifier_block *nb,
405 unsigned long event, void *notify_ptr))
406{
407 struct scsi_nl_drvr *driver;
408 unsigned long flags;
409
410 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
411 if (unlikely(!driver)) {
412 printk(KERN_ERR "%s: allocation failure\n", __func__);
413 return -ENOMEM;
414 }
415
416 driver->dmsg_handler = nlmsg_handler;
417 driver->devt_handler = nlevt_handler;
418 driver->hostt = hostt;
419 driver->vendor_id = vendor_id;
420
421 spin_lock_irqsave(&scsi_nl_lock, flags);
422 if (scsi_nl_state & STATE_EHANDLER_BSY) {
423 spin_unlock_irqrestore(&scsi_nl_lock, flags);
424 msleep(1);
425 spin_lock_irqsave(&scsi_nl_lock, flags);
426 }
427 list_add_tail(&driver->next, &scsi_nl_drivers);
428 spin_unlock_irqrestore(&scsi_nl_lock, flags);
429
430 return 0;
431}
432EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
433
434
435/**
436 * scsi_nl_remove_driver -
437 * An driver is unregistering with the SCSI netlink messages
438 *
439 * @vendor_id: The unique identification value for the driver.
440 **/
441void
442scsi_nl_remove_driver(u64 vendor_id)
443{
444 struct scsi_nl_drvr *driver;
445 unsigned long flags;
446
447 spin_lock_irqsave(&scsi_nl_lock, flags);
448 if (scsi_nl_state & STATE_EHANDLER_BSY) {
449 spin_unlock_irqrestore(&scsi_nl_lock, flags);
450 msleep(1);
451 spin_lock_irqsave(&scsi_nl_lock, flags);
452 }
453
454 list_for_each_entry(driver, &scsi_nl_drivers, next) {
455 if (driver->vendor_id == vendor_id) {
456 driver->flags |= HANDLER_DELETING;
457 while (driver->refcnt != 0) {
458 spin_unlock_irqrestore(&scsi_nl_lock, flags);
459 schedule_timeout_uninterruptible(HZ/4);
460 spin_lock_irqsave(&scsi_nl_lock, flags);
461 }
462 list_del(&driver->next);
463 kfree(driver);
464 spin_unlock_irqrestore(&scsi_nl_lock, flags);
465 return;
466 }
467 }
468
469 spin_unlock_irqrestore(&scsi_nl_lock, flags);
470
471 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
472 __func__, (unsigned long long)vendor_id);
473 return;
474}
475EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
476
477
478/**
479 * scsi_netlink_init - Called by SCSI subsystem to intialize
480 * the SCSI transport netlink interface
132 * 481 *
133 **/ 482 **/
134void 483void
@@ -136,6 +485,8 @@ scsi_netlink_init(void)
136{ 485{
137 int error; 486 int error;
138 487
488 INIT_LIST_HEAD(&scsi_nl_drivers);
489
139 error = netlink_register_notifier(&scsi_netlink_notifier); 490 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 491 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 492 printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +501,15 @@ scsi_netlink_init(void)
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 501 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __func__); 502 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 503 netlink_unregister_notifier(&scsi_netlink_notifier);
504 return;
153 } 505 }
154 506
507 /* Register the entry points for the generic SCSI transport */
508 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
509 scsi_generic_msg_handler, NULL);
510 if (error)
511 printk(KERN_ERR "%s: register of GENERIC transport handler"
512 " failed - %d\n", __func__, error);
155 return; 513 return;
156} 514}
157 515
@@ -163,6 +521,8 @@ scsi_netlink_init(void)
163void 521void
164scsi_netlink_exit(void) 522scsi_netlink_exit(void)
165{ 523{
524 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
525
166 if (scsi_nl_sock) { 526 if (scsi_nl_sock) {
167 netlink_kernel_release(scsi_nl_sock); 527 netlink_kernel_release(scsi_nl_sock);
168 netlink_unregister_notifier(&scsi_netlink_notifier); 528 netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +532,147 @@ scsi_netlink_exit(void)
172} 532}
173 533
174 534
535/*
536 * Exported Interfaces
537 */
538
539/**
540 * scsi_nl_send_transport_msg -
541 * Generic function to send a single message from a SCSI transport to
542 * a single process
543 *
544 * @pid: receiving pid
545 * @hdr: message payload
546 *
547 **/
548void
549scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
550{
551 struct sk_buff *skb;
552 struct nlmsghdr *nlh;
553 const char *fn;
554 char *datab;
555 u32 len, skblen;
556 int err;
557
558 if (!scsi_nl_sock) {
559 err = -ENOENT;
560 fn = "netlink socket";
561 goto msg_fail;
562 }
563
564 len = NLMSG_SPACE(hdr->msglen);
565 skblen = NLMSG_SPACE(len);
566
567 skb = alloc_skb(skblen, GFP_KERNEL);
568 if (!skb) {
569 err = -ENOBUFS;
570 fn = "alloc_skb";
571 goto msg_fail;
572 }
573
574 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
575 if (!nlh) {
576 err = -ENOBUFS;
577 fn = "nlmsg_put";
578 goto msg_fail_skb;
579 }
580 datab = NLMSG_DATA(nlh);
581 memcpy(datab, hdr, hdr->msglen);
582
583 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
584 if (err < 0) {
585 fn = "nlmsg_unicast";
586 /* nlmsg_unicast already kfree_skb'd */
587 goto msg_fail;
588 }
589
590 return;
591
592msg_fail_skb:
593 kfree_skb(skb);
594msg_fail:
595 printk(KERN_WARNING
596 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
597 "msglen %d: %s : err %d\n",
598 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
599 fn, err);
600 return;
601}
602EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
603
604
605/**
606 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
607 * to a specific process id.
608 *
609 * @pid: process id of the receiver
610 * @host_no: host # sending the message
611 * @vendor_id: unique identifier for the driver's vendor
612 * @data_len: amount, in bytes, of vendor unique payload data
613 * @data_buf: pointer to vendor unique data buffer
614 *
615 * Returns:
616 * 0 on succesful return
617 * otherwise, failing error code
618 *
619 * Notes:
620 * This routine assumes no locks are held on entry.
621 */
622int
623scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
624 char *data_buf, u32 data_len)
625{
626 struct sk_buff *skb;
627 struct nlmsghdr *nlh;
628 struct scsi_nl_host_vendor_msg *msg;
629 u32 len, skblen;
630 int err;
631
632 if (!scsi_nl_sock) {
633 err = -ENOENT;
634 goto send_vendor_fail;
635 }
636
637 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
638 skblen = NLMSG_SPACE(len);
639
640 skb = alloc_skb(skblen, GFP_KERNEL);
641 if (!skb) {
642 err = -ENOBUFS;
643 goto send_vendor_fail;
644 }
645
646 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
647 skblen - sizeof(*nlh), 0);
648 if (!nlh) {
649 err = -ENOBUFS;
650 goto send_vendor_fail_skb;
651 }
652 msg = NLMSG_DATA(nlh);
653
654 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
655 SCSI_NL_SHOST_VENDOR, len);
656 msg->vendor_id = vendor_id;
657 msg->host_no = host_no;
658 msg->vmsg_datalen = data_len; /* bytes */
659 memcpy(&msg[1], data_buf, data_len);
660
661 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
662 if (err)
663 /* nlmsg_multicast already kfree_skb'd */
664 goto send_vendor_fail;
665
666 return 0;
667
668send_vendor_fail_skb:
669 kfree_skb(skb);
670send_vendor_fail:
671 printk(KERN_WARNING
672 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
673 __func__, host_no, err);
674 return err;
675}
676EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
677
678
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f7511204..e1850904ff73 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
4#include <linux/device.h> 4#include <linux/device.h>
5 5
6struct request_queue; 6struct request_queue;
7struct request;
7struct scsi_cmnd; 8struct scsi_cmnd;
8struct scsi_device; 9struct scsi_device;
9struct scsi_host_template; 10struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
27extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); 28extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 29extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 30extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd);
31#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
32void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
49extern void scsi_exit_devinfo(void); 49extern void scsi_exit_devinfo(void);
50 50
51/* scsi_error.c */ 51/* scsi_error.c */
52extern void scsi_add_timer(struct scsi_cmnd *, int, 52extern enum blk_eh_timer_return scsi_times_out(struct request *req);
53 void (*)(struct scsi_cmnd *));
54extern int scsi_delete_timer(struct scsi_cmnd *);
55extern void scsi_times_out(struct scsi_cmnd *cmd);
56extern int scsi_error_handler(void *host); 53extern int scsi_error_handler(void *host);
57extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 54extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
58extern void scsi_eh_wakeup(struct Scsi_Host *shost); 55extern void scsi_eh_wakeup(struct Scsi_Host *shost);
@@ -62,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
62 struct list_head *done_q); 59 struct list_head *done_q);
63int scsi_eh_get_sense(struct list_head *work_q, 60int scsi_eh_get_sense(struct list_head *work_q,
64 struct list_head *done_q); 61 struct list_head *done_q);
62int scsi_noretry_cmd(struct scsi_cmnd *scmd);
65 63
66/* scsi_lib.c */ 64/* scsi_lib.c */
67extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 65extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf9..82f7b2dd08a2 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
259 int error = -ENXIO; 259 int error = -ENXIO;
260 260
261 shost = scsi_host_lookup(host); 261 shost = scsi_host_lookup(host);
262 if (IS_ERR(shost)) 262 if (!shost)
263 return PTR_ERR(shost); 263 return error;
264 264
265 if (shost->transportt->user_scan) 265 if (shost->transportt->user_scan)
266 error = shost->transportt->user_scan(shost, channel, id, lun); 266 error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
287 int error = -ENXIO; 287 int error = -ENXIO;
288 288
289 shost = scsi_host_lookup(host); 289 shost = scsi_host_lookup(host);
290 if (IS_ERR(shost)) 290 if (!shost)
291 return PTR_ERR(shost); 291 return error;
292 sdev = scsi_device_lookup(shost, channel, id, lun); 292 sdev = scsi_device_lookup(shost, channel, id, lun);
293 if (sdev) { 293 if (sdev) {
294 scsi_remove_device(sdev); 294 scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 34d0de6cd511..b14dc02c3ded 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
419 dev->type = &scsi_target_type; 419 dev->type = &scsi_target_type;
420 starget->id = id; 420 starget->id = id;
421 starget->channel = channel; 421 starget->channel = channel;
422 starget->can_queue = 0;
422 INIT_LIST_HEAD(&starget->siblings); 423 INIT_LIST_HEAD(&starget->siblings);
423 INIT_LIST_HEAD(&starget->devices); 424 INIT_LIST_HEAD(&starget->devices);
424 starget->state = STARGET_CREATED; 425 starget->state = STARGET_CREATED;
@@ -730,6 +731,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 731static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
731 int *bflags, int async) 732 int *bflags, int async)
732{ 733{
734 int ret;
735
733 /* 736 /*
734 * XXX do not save the inquiry, since it can change underneath us, 737 * XXX do not save the inquiry, since it can change underneath us,
735 * save just vendor/model/rev. 738 * save just vendor/model/rev.
@@ -885,7 +888,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
885 888
886 /* set the device running here so that slave configure 889 /* set the device running here so that slave configure
887 * may do I/O */ 890 * may do I/O */
888 scsi_device_set_state(sdev, SDEV_RUNNING); 891 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
892 if (ret) {
893 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
894
895 if (ret) {
896 sdev_printk(KERN_ERR, sdev,
897 "in wrong state %s to complete scan\n",
898 scsi_device_state_name(sdev->sdev_state));
899 return SCSI_SCAN_NO_RESPONSE;
900 }
901 }
889 902
890 if (*bflags & BLIST_MS_192_BYTES_FOR_3F) 903 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
891 sdev->use_192_bytes_for_3f = 1; 904 sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +912,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
899 transport_configure_device(&sdev->sdev_gendev); 912 transport_configure_device(&sdev->sdev_gendev);
900 913
901 if (sdev->host->hostt->slave_configure) { 914 if (sdev->host->hostt->slave_configure) {
902 int ret = sdev->host->hostt->slave_configure(sdev); 915 ret = sdev->host->hostt->slave_configure(sdev);
903 if (ret) { 916 if (ret) {
904 /* 917 /*
905 * if LLDD reports slave not present, don't clutter 918 * if LLDD reports slave not present, don't clutter
@@ -994,7 +1007,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
994 */ 1007 */
995 sdev = scsi_device_lookup_by_target(starget, lun); 1008 sdev = scsi_device_lookup_by_target(starget, lun);
996 if (sdev) { 1009 if (sdev) {
997 if (rescan || sdev->sdev_state != SDEV_CREATED) { 1010 if (rescan || !scsi_device_created(sdev)) {
998 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1011 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
999 "scsi scan: device exists on %s\n", 1012 "scsi scan: device exists on %s\n",
1000 sdev->sdev_gendev.bus_id)); 1013 sdev->sdev_gendev.bus_id));
@@ -1467,7 +1480,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1467 kfree(lun_data); 1480 kfree(lun_data);
1468 out: 1481 out:
1469 scsi_device_put(sdev); 1482 scsi_device_put(sdev);
1470 if (sdev->sdev_state == SDEV_CREATED) 1483 if (scsi_device_created(sdev))
1471 /* 1484 /*
1472 * the sdev we used didn't appear in the report luns scan 1485 * the sdev we used didn't appear in the report luns scan
1473 */ 1486 */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be5..93c28f30bbd7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
34 { SDEV_QUIESCE, "quiesce" }, 34 { SDEV_QUIESCE, "quiesce" },
35 { SDEV_OFFLINE, "offline" }, 35 { SDEV_OFFLINE, "offline" },
36 { SDEV_BLOCK, "blocked" }, 36 { SDEV_BLOCK, "blocked" },
37 { SDEV_CREATED_BLOCK, "created-blocked" },
37}; 38};
38 39
39const char *scsi_device_state_name(enum scsi_device_state state) 40const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
560sdev_rd_attr (model, "%.16s\n"); 561sdev_rd_attr (model, "%.16s\n");
561sdev_rd_attr (rev, "%.4s\n"); 562sdev_rd_attr (rev, "%.4s\n");
562 563
564/*
565 * TODO: can we make these symlinks to the block layer ones?
566 */
563static ssize_t 567static ssize_t
564sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 568sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
565{ 569{
566 struct scsi_device *sdev; 570 struct scsi_device *sdev;
567 sdev = to_scsi_device(dev); 571 sdev = to_scsi_device(dev);
568 return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); 572 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
569} 573}
570 574
571static ssize_t 575static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
576 int timeout; 580 int timeout;
577 sdev = to_scsi_device(dev); 581 sdev = to_scsi_device(dev);
578 sscanf (buf, "%d\n", &timeout); 582 sscanf (buf, "%d\n", &timeout);
579 sdev->timeout = timeout * HZ; 583 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
580 return count; 584 return count;
581} 585}
582static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 586static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..48ba413f7f6a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
460 460
461 /* TODO: replace with a O(1) alg */ 461 /* TODO: replace with a O(1) alg */
462 shost = scsi_host_lookup(host_no); 462 shost = scsi_host_lookup(host_no);
463 if (IS_ERR(shost)) { 463 if (!shost) {
464 printk(KERN_ERR "Could not find host no %d\n", host_no); 464 printk(KERN_ERR "Could not find host no %d\n", host_no);
465 return -EINVAL; 465 return -EINVAL;
466 } 466 }
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); 550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
551 551
552 shost = scsi_host_lookup(host_no); 552 shost = scsi_host_lookup(host_no);
553 if (IS_ERR(shost)) { 553 if (!shost) {
554 printk(KERN_ERR "Could not find host no %d\n", host_no); 554 printk(KERN_ERR "Could not find host no %d\n", host_no);
555 return err; 555 return err;
556 } 556 }
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); 603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
604 604
605 shost = scsi_host_lookup(host_no); 605 shost = scsi_host_lookup(host_no);
606 if (IS_ERR(shost)) { 606 if (!shost) {
607 printk(KERN_ERR "Could not find host no %d\n", host_no); 607 printk(KERN_ERR "Could not find host no %d\n", host_no);
608 return err; 608 return err;
609 } 609 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb84..1e71abf0607a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
40 40
41static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 41static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
42static void fc_vport_sched_delete(struct work_struct *work); 42static void fc_vport_sched_delete(struct work_struct *work);
43 43static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44/*
45 * This is a temporary carrier for creating a vport. It will eventually
46 * be replaced by a real message definition for sgio or netlink.
47 *
48 * fc_vport_identifiers: This set of data contains all elements
49 * to uniquely identify and instantiate a FC virtual port.
50 *
51 * Notes:
52 * symbolic_name: The driver is to append the symbolic_name string data
53 * to the symbolic_node_name data that it generates by default.
54 * the resulting combination should then be registered with the switch.
55 * It is expected that things like Xen may stuff a VM title into
56 * this field.
57 */
58struct fc_vport_identifiers {
59 u64 node_name;
60 u64 port_name;
61 u32 roles;
62 bool disable;
63 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
64 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
65};
66
67static int fc_vport_create(struct Scsi_Host *shost, int channel,
68 struct device *pdev, struct fc_vport_identifiers *ids, 44 struct device *pdev, struct fc_vport_identifiers *ids,
69 struct fc_vport **vport); 45 struct fc_vport **vport);
70 46
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1760 vid.disable = false; /* always enabled */ 1736 vid.disable = false; /* always enabled */
1761 1737
1762 /* we only allow support on Channel 0 !!! */ 1738 /* we only allow support on Channel 0 !!! */
1763 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport); 1739 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1764 return stat ? stat : count; 1740 return stat ? stat : count;
1765} 1741}
1766static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, 1742static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
1950 * Notes: 1926 * Notes:
1951 * This routine assumes no locks are held on entry. 1927 * This routine assumes no locks are held on entry.
1952 */ 1928 */
1953static enum scsi_eh_timer_return 1929static enum blk_eh_timer_return
1954fc_timed_out(struct scsi_cmnd *scmd) 1930fc_timed_out(struct scsi_cmnd *scmd)
1955{ 1931{
1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 1932 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1957 1933
1958 if (rport->port_state == FC_PORTSTATE_BLOCKED) 1934 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1959 return EH_RESET_TIMER; 1935 return BLK_EH_RESET_TIMER;
1960 1936
1961 return EH_NOT_HANDLED; 1937 return BLK_EH_NOT_HANDLED;
1962} 1938}
1963 1939
1964/* 1940/*
@@ -2157,8 +2133,7 @@ fc_attach_transport(struct fc_function_template *ft)
2157 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); 2133 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2158 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); 2134 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
2159 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); 2135 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2160 if (ft->terminate_rport_io) 2136 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2161 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2162 2137
2163 BUG_ON(count > FC_RPORT_NUM_ATTRS); 2138 BUG_ON(count > FC_RPORT_NUM_ATTRS);
2164 2139
@@ -2352,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost)
2352} 2327}
2353EXPORT_SYMBOL(fc_remove_host); 2328EXPORT_SYMBOL(fc_remove_host);
2354 2329
2330static void fc_terminate_rport_io(struct fc_rport *rport)
2331{
2332 struct Scsi_Host *shost = rport_to_shost(rport);
2333 struct fc_internal *i = to_fc_internal(shost->transportt);
2334
2335 /* Involve the LLDD if possible to terminate all io on the rport. */
2336 if (i->f->terminate_rport_io)
2337 i->f->terminate_rport_io(rport);
2338
2339 /*
2340 * must unblock to flush queued IO. The caller will have set
2341 * the port_state or flags, so that fc_remote_port_chkready will
2342 * fail IO.
2343 */
2344 scsi_target_unblock(&rport->dev);
2345}
2355 2346
2356/** 2347/**
2357 * fc_starget_delete - called to delete the scsi decendents of an rport 2348 * fc_starget_delete - called to delete the scsi decendents of an rport
@@ -2364,13 +2355,8 @@ fc_starget_delete(struct work_struct *work)
2364{ 2355{
2365 struct fc_rport *rport = 2356 struct fc_rport *rport =
2366 container_of(work, struct fc_rport, stgt_delete_work); 2357 container_of(work, struct fc_rport, stgt_delete_work);
2367 struct Scsi_Host *shost = rport_to_shost(rport);
2368 struct fc_internal *i = to_fc_internal(shost->transportt);
2369
2370 /* Involve the LLDD if possible to terminate all io on the rport. */
2371 if (i->f->terminate_rport_io)
2372 i->f->terminate_rport_io(rport);
2373 2358
2359 fc_terminate_rport_io(rport);
2374 scsi_remove_target(&rport->dev); 2360 scsi_remove_target(&rport->dev);
2375} 2361}
2376 2362
@@ -2396,10 +2382,7 @@ fc_rport_final_delete(struct work_struct *work)
2396 if (rport->flags & FC_RPORT_SCAN_PENDING) 2382 if (rport->flags & FC_RPORT_SCAN_PENDING)
2397 scsi_flush_work(shost); 2383 scsi_flush_work(shost);
2398 2384
2399 /* involve the LLDD to terminate all pending i/o */ 2385 fc_terminate_rport_io(rport);
2400 if (i->f->terminate_rport_io)
2401 i->f->terminate_rport_io(rport);
2402
2403 /* 2386 /*
2404 * Cancel any outstanding timers. These should really exist 2387 * Cancel any outstanding timers. These should really exist
2405 * only when rmmod'ing the LLDD and we're asking for 2388 * only when rmmod'ing the LLDD and we're asking for
@@ -2663,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2663 2646
2664 spin_lock_irqsave(shost->host_lock, flags); 2647 spin_lock_irqsave(shost->host_lock, flags);
2665 2648
2666 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2649 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2650 FC_RPORT_DEVLOSS_PENDING);
2667 2651
2668 /* if target, initiate a scan */ 2652 /* if target, initiate a scan */
2669 if (rport->scsi_target_id != -1) { 2653 if (rport->scsi_target_id != -1) {
@@ -2726,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2726 rport->port_id = ids->port_id; 2710 rport->port_id = ids->port_id;
2727 rport->roles = ids->roles; 2711 rport->roles = ids->roles;
2728 rport->port_state = FC_PORTSTATE_ONLINE; 2712 rport->port_state = FC_PORTSTATE_ONLINE;
2713 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2729 2714
2730 if (fci->f->dd_fcrport_size) 2715 if (fci->f->dd_fcrport_size)
2731 memset(rport->dd_data, 0, 2716 memset(rport->dd_data, 0,
@@ -2808,7 +2793,6 @@ void
2808fc_remote_port_delete(struct fc_rport *rport) 2793fc_remote_port_delete(struct fc_rport *rport)
2809{ 2794{
2810 struct Scsi_Host *shost = rport_to_shost(rport); 2795 struct Scsi_Host *shost = rport_to_shost(rport);
2811 struct fc_internal *i = to_fc_internal(shost->transportt);
2812 int timeout = rport->dev_loss_tmo; 2796 int timeout = rport->dev_loss_tmo;
2813 unsigned long flags; 2797 unsigned long flags;
2814 2798
@@ -2854,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *rport)
2854 2838
2855 /* see if we need to kill io faster than waiting for device loss */ 2839 /* see if we need to kill io faster than waiting for device loss */
2856 if ((rport->fast_io_fail_tmo != -1) && 2840 if ((rport->fast_io_fail_tmo != -1) &&
2857 (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io)) 2841 (rport->fast_io_fail_tmo < timeout))
2858 fc_queue_devloss_work(shost, &rport->fail_io_work, 2842 fc_queue_devloss_work(shost, &rport->fail_io_work,
2859 rport->fast_io_fail_tmo * HZ); 2843 rport->fast_io_fail_tmo * HZ);
2860 2844
@@ -2930,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2930 fc_flush_devloss(shost); 2914 fc_flush_devloss(shost);
2931 2915
2932 spin_lock_irqsave(shost->host_lock, flags); 2916 spin_lock_irqsave(shost->host_lock, flags);
2933 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2917 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2918 FC_RPORT_DEVLOSS_PENDING);
2934 spin_unlock_irqrestore(shost->host_lock, flags); 2919 spin_unlock_irqrestore(shost->host_lock, flags);
2935 2920
2936 /* ensure any stgt delete functions are done */ 2921 /* ensure any stgt delete functions are done */
@@ -3025,6 +3010,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
3025 rport->supported_classes = FC_COS_UNSPECIFIED; 3010 rport->supported_classes = FC_COS_UNSPECIFIED;
3026 rport->roles = FC_PORT_ROLE_UNKNOWN; 3011 rport->roles = FC_PORT_ROLE_UNKNOWN;
3027 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3012 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3028 3014
3029 /* remove the identifiers that aren't used in the consisting binding */ 3015 /* remove the identifiers that aren't used in the consisting binding */
3030 switch (fc_host->tgtid_bind_type) { 3016 switch (fc_host->tgtid_bind_type) {
@@ -3067,13 +3053,12 @@ fc_timeout_fail_rport_io(struct work_struct *work)
3067{ 3053{
3068 struct fc_rport *rport = 3054 struct fc_rport *rport =
3069 container_of(work, struct fc_rport, fail_io_work.work); 3055 container_of(work, struct fc_rport, fail_io_work.work);
3070 struct Scsi_Host *shost = rport_to_shost(rport);
3071 struct fc_internal *i = to_fc_internal(shost->transportt);
3072 3056
3073 if (rport->port_state != FC_PORTSTATE_BLOCKED) 3057 if (rport->port_state != FC_PORTSTATE_BLOCKED)
3074 return; 3058 return;
3075 3059
3076 i->f->terminate_rport_io(rport); 3060 rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3061 fc_terminate_rport_io(rport);
3077} 3062}
3078 3063
3079/** 3064/**
@@ -3103,7 +3088,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3103 3088
3104 3089
3105/** 3090/**
3106 * fc_vport_create - allocates and creates a FC virtual port. 3091 * fc_vport_setup - allocates and creates a FC virtual port.
3107 * @shost: scsi host the virtual port is connected to. 3092 * @shost: scsi host the virtual port is connected to.
3108 * @channel: Channel on shost port connected to. 3093 * @channel: Channel on shost port connected to.
3109 * @pdev: parent device for vport 3094 * @pdev: parent device for vport
@@ -3118,7 +3103,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3118 * This routine assumes no locks are held on entry. 3103 * This routine assumes no locks are held on entry.
3119 */ 3104 */
3120static int 3105static int
3121fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, 3106fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3122 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3107 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3123{ 3108{
3124 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3109 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3216,28 @@ delete_vport:
3231 return error; 3216 return error;
3232} 3217}
3233 3218
3219/**
3220 * fc_vport_create - Admin App or LLDD requests creation of a vport
3221 * @shost: scsi host the virtual port is connected to.
3222 * @channel: channel on shost port connected to.
3223 * @ids: The world wide names, FC4 port roles, etc for
3224 * the virtual port.
3225 *
3226 * Notes:
3227 * This routine assumes no locks are held on entry.
3228 */
3229struct fc_vport *
3230fc_vport_create(struct Scsi_Host *shost, int channel,
3231 struct fc_vport_identifiers *ids)
3232{
3233 int stat;
3234 struct fc_vport *vport;
3235
3236 stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3237 ids, &vport);
3238 return stat ? NULL : vport;
3239}
3240EXPORT_SYMBOL(fc_vport_create);
3234 3241
3235/** 3242/**
3236 * fc_vport_terminate - Admin App or LLDD requests termination of a vport 3243 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164f..4a803ebaf508 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -138,7 +138,7 @@ static ssize_t
138show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) 138show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
139{ 139{
140 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); 140 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
141 return sprintf(buf, "%u\n", ep->id); 141 return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
142} 142}
143static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); 143static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
144 144
@@ -156,7 +156,7 @@ static struct attribute_group iscsi_endpoint_group = {
156static int iscsi_match_epid(struct device *dev, void *data) 156static int iscsi_match_epid(struct device *dev, void *data)
157{ 157{
158 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); 158 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
159 unsigned int *epid = (unsigned int *) data; 159 uint64_t *epid = (uint64_t *) data;
160 160
161 return *epid == ep->id; 161 return *epid == ep->id;
162} 162}
@@ -166,7 +166,7 @@ iscsi_create_endpoint(int dd_size)
166{ 166{
167 struct device *dev; 167 struct device *dev;
168 struct iscsi_endpoint *ep; 168 struct iscsi_endpoint *ep;
169 unsigned int id; 169 uint64_t id;
170 int err; 170 int err;
171 171
172 for (id = 1; id < ISCSI_MAX_EPID; id++) { 172 for (id = 1; id < ISCSI_MAX_EPID; id++) {
@@ -187,7 +187,8 @@ iscsi_create_endpoint(int dd_size)
187 187
188 ep->id = id; 188 ep->id = id;
189 ep->dev.class = &iscsi_endpoint_class; 189 ep->dev.class = &iscsi_endpoint_class;
190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id); 190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
191 (unsigned long long) id);
191 err = device_register(&ep->dev); 192 err = device_register(&ep->dev);
192 if (err) 193 if (err)
193 goto free_ep; 194 goto free_ep;
@@ -374,10 +375,10 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
374 err = 0; 375 err = 0;
375 break; 376 break;
376 case ISCSI_SESSION_FAILED: 377 case ISCSI_SESSION_FAILED:
377 err = DID_IMM_RETRY << 16; 378 err = DID_TRANSPORT_DISRUPTED << 16;
378 break; 379 break;
379 case ISCSI_SESSION_FREE: 380 case ISCSI_SESSION_FREE:
380 err = DID_NO_CONNECT << 16; 381 err = DID_TRANSPORT_FAILFAST << 16;
381 break; 382 break;
382 default: 383 default:
383 err = DID_NO_CONNECT << 16; 384 err = DID_NO_CONNECT << 16;
@@ -1010,7 +1011,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
1010 1011
1011 skb = alloc_skb(len, GFP_ATOMIC); 1012 skb = alloc_skb(len, GFP_ATOMIC);
1012 if (!skb) { 1013 if (!skb) {
1013 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 1014 iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
1014 iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " 1015 iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
1015 "control PDU: OOM\n"); 1016 "control PDU: OOM\n");
1016 return -ENOMEM; 1017 return -ENOMEM;
@@ -1031,7 +1032,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
1031} 1032}
1032EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 1033EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
1033 1034
1034void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 1035void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1035{ 1036{
1036 struct nlmsghdr *nlh; 1037 struct nlmsghdr *nlh;
1037 struct sk_buff *skb; 1038 struct sk_buff *skb;
@@ -1063,7 +1064,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
1063 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1064 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1064 error); 1065 error);
1065} 1066}
1066EXPORT_SYMBOL_GPL(iscsi_conn_error); 1067EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1067 1068
1068static int 1069static int
1069iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1070iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
@@ -1361,7 +1362,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
1361 return -EINVAL; 1362 return -EINVAL;
1362 1363
1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); 1364 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
1364 if (IS_ERR(shost)) { 1365 if (!shost) {
1365 printk(KERN_ERR "target discovery could not find host no %u\n", 1366 printk(KERN_ERR "target discovery could not find host no %u\n",
1366 ev->u.tgt_dscvr.host_no); 1367 ev->u.tgt_dscvr.host_no);
1367 return -ENODEV; 1368 return -ENODEV;
@@ -1387,7 +1388,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1387 return -ENOSYS; 1388 return -ENOSYS;
1388 1389
1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no); 1390 shost = scsi_host_lookup(ev->u.set_host_param.host_no);
1390 if (IS_ERR(shost)) { 1391 if (!shost) {
1391 printk(KERN_ERR "set_host_param could not find host no %u\n", 1392 printk(KERN_ERR "set_host_param could not find host no %u\n",
1392 ev->u.set_host_param.host_no); 1393 ev->u.set_host_param.host_no);
1393 return -ENODEV; 1394 return -ENODEV;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index b29360ed0bdc..7c2d28924d2a 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -109,7 +109,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
109 for(i = 0; i < DV_RETRIES; i++) { 109 for(i = 0; i < DV_RETRIES; i++) {
110 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, 110 result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
111 sense, DV_TIMEOUT, /* retries */ 1, 111 sense, DV_TIMEOUT, /* retries */ 1,
112 REQ_FAILFAST); 112 REQ_FAILFAST_DEV |
113 REQ_FAILFAST_TRANSPORT |
114 REQ_FAILFAST_DRIVER);
113 if (result & DRIVER_SENSE) { 115 if (result & DRIVER_SENSE) {
114 struct scsi_sense_hdr sshdr_tmp; 116 struct scsi_sense_hdr sshdr_tmp;
115 if (!sshdr) 117 if (!sshdr)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..c9e1242eaf25 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
47#include <linux/blkpg.h> 47#include <linux/blkpg.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/string_helpers.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51 52
52#include <scsi/scsi.h> 53#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 87MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 88MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
88 89
90#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
91#define SD_MINORS 16
92#else
93#define SD_MINORS 0
94#endif
95
89static int sd_revalidate_disk(struct gendisk *); 96static int sd_revalidate_disk(struct gendisk *);
90static int sd_probe(struct device *); 97static int sd_probe(struct device *);
91static int sd_remove(struct device *); 98static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
159 sd_print_sense_hdr(sdkp, &sshdr); 166 sd_print_sense_hdr(sdkp, &sshdr);
160 return -EINVAL; 167 return -EINVAL;
161 } 168 }
162 sd_revalidate_disk(sdkp->disk); 169 revalidate_disk(sdkp->disk);
163 return count; 170 return count;
164} 171}
165 172
@@ -377,8 +384,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
377 sector_t block = rq->sector; 384 sector_t block = rq->sector;
378 sector_t threshold; 385 sector_t threshold;
379 unsigned int this_count = rq->nr_sectors; 386 unsigned int this_count = rq->nr_sectors;
380 unsigned int timeout = sdp->timeout; 387 int ret, host_dif;
381 int ret;
382 388
383 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 389 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
384 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 390 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
@@ -509,7 +515,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
509 rq->nr_sectors)); 515 rq->nr_sectors));
510 516
511 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 517 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
512 if (scsi_host_dif_capable(sdp->host, sdkp->protection_type)) 518 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
519 if (host_dif)
513 SCpnt->cmnd[1] = 1 << 5; 520 SCpnt->cmnd[1] = 1 << 5;
514 else 521 else
515 SCpnt->cmnd[1] = 0; 522 SCpnt->cmnd[1] = 0;
@@ -567,8 +574,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
567 SCpnt->sdb.length = this_count * sdp->sector_size; 574 SCpnt->sdb.length = this_count * sdp->sector_size;
568 575
569 /* If DIF or DIX is enabled, tell HBA how to handle request */ 576 /* If DIF or DIX is enabled, tell HBA how to handle request */
570 if (sdkp->protection_type || scsi_prot_sg_count(SCpnt)) 577 if (host_dif || scsi_prot_sg_count(SCpnt))
571 sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt)); 578 sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt),
579 sdkp->protection_type);
572 580
573 /* 581 /*
574 * We shouldn't disconnect in the middle of a sector, so with a dumb 582 * We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -578,7 +586,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 SCpnt->transfersize = sdp->sector_size; 586 SCpnt->transfersize = sdp->sector_size;
579 SCpnt->underflow = this_count << 9; 587 SCpnt->underflow = this_count << 9;
580 SCpnt->allowed = SD_MAX_RETRIES; 588 SCpnt->allowed = SD_MAX_RETRIES;
581 SCpnt->timeout_per_command = timeout;
582 589
583 /* 590 /*
584 * This indicates that the command is ready from our end to be 591 * This indicates that the command is ready from our end to be
@@ -602,17 +609,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
602 * In the latter case @inode and @filp carry an abridged amount 609 * In the latter case @inode and @filp carry an abridged amount
603 * of information as noted above. 610 * of information as noted above.
604 **/ 611 **/
605static int sd_open(struct inode *inode, struct file *filp) 612static int sd_open(struct block_device *bdev, fmode_t mode)
606{ 613{
607 struct gendisk *disk = inode->i_bdev->bd_disk; 614 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
608 struct scsi_disk *sdkp;
609 struct scsi_device *sdev; 615 struct scsi_device *sdev;
610 int retval; 616 int retval;
611 617
612 if (!(sdkp = scsi_disk_get(disk))) 618 if (!sdkp)
613 return -ENXIO; 619 return -ENXIO;
614 620
615
616 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 621 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
617 622
618 sdev = sdkp->device; 623 sdev = sdkp->device;
@@ -626,14 +631,13 @@ static int sd_open(struct inode *inode, struct file *filp)
626 goto error_out; 631 goto error_out;
627 632
628 if (sdev->removable || sdkp->write_prot) 633 if (sdev->removable || sdkp->write_prot)
629 check_disk_change(inode->i_bdev); 634 check_disk_change(bdev);
630 635
631 /* 636 /*
632 * If the drive is empty, just let the open fail. 637 * If the drive is empty, just let the open fail.
633 */ 638 */
634 retval = -ENOMEDIUM; 639 retval = -ENOMEDIUM;
635 if (sdev->removable && !sdkp->media_present && 640 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
636 !(filp->f_flags & O_NDELAY))
637 goto error_out; 641 goto error_out;
638 642
639 /* 643 /*
@@ -641,7 +645,7 @@ static int sd_open(struct inode *inode, struct file *filp)
641 * if the user expects to be able to write to the thing. 645 * if the user expects to be able to write to the thing.
642 */ 646 */
643 retval = -EROFS; 647 retval = -EROFS;
644 if (sdkp->write_prot && (filp->f_mode & FMODE_WRITE)) 648 if (sdkp->write_prot && (mode & FMODE_WRITE))
645 goto error_out; 649 goto error_out;
646 650
647 /* 651 /*
@@ -677,9 +681,8 @@ error_out:
677 * Note: may block (uninterruptible) if error recovery is underway 681 * Note: may block (uninterruptible) if error recovery is underway
678 * on this disk. 682 * on this disk.
679 **/ 683 **/
680static int sd_release(struct inode *inode, struct file *filp) 684static int sd_release(struct gendisk *disk, fmode_t mode)
681{ 685{
682 struct gendisk *disk = inode->i_bdev->bd_disk;
683 struct scsi_disk *sdkp = scsi_disk(disk); 686 struct scsi_disk *sdkp = scsi_disk(disk);
684 struct scsi_device *sdev = sdkp->device; 687 struct scsi_device *sdev = sdkp->device;
685 688
@@ -736,10 +739,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
736 * Note: most ioctls are forward onto the block subsystem or further 739 * Note: most ioctls are forward onto the block subsystem or further
737 * down in the scsi subsystem. 740 * down in the scsi subsystem.
738 **/ 741 **/
739static int sd_ioctl(struct inode * inode, struct file * filp, 742static int sd_ioctl(struct block_device *bdev, fmode_t mode,
740 unsigned int cmd, unsigned long arg) 743 unsigned int cmd, unsigned long arg)
741{ 744{
742 struct block_device *bdev = inode->i_bdev;
743 struct gendisk *disk = bdev->bd_disk; 745 struct gendisk *disk = bdev->bd_disk;
744 struct scsi_device *sdp = scsi_disk(disk)->device; 746 struct scsi_device *sdp = scsi_disk(disk)->device;
745 void __user *p = (void __user *)arg; 747 void __user *p = (void __user *)arg;
@@ -754,7 +756,8 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
754 * may try and take the device offline, in which case all further 756 * may try and take the device offline, in which case all further
755 * access to the device is prohibited. 757 * access to the device is prohibited.
756 */ 758 */
757 error = scsi_nonblockable_ioctl(sdp, cmd, p, filp); 759 error = scsi_nonblockable_ioctl(sdp, cmd, p,
760 (mode & FMODE_NDELAY_NOW) != 0);
758 if (!scsi_block_when_processing_errors(sdp) || !error) 761 if (!scsi_block_when_processing_errors(sdp) || !error)
759 return error; 762 return error;
760 763
@@ -768,7 +771,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
768 case SCSI_IOCTL_GET_BUS_NUMBER: 771 case SCSI_IOCTL_GET_BUS_NUMBER:
769 return scsi_ioctl(sdp, cmd, p); 772 return scsi_ioctl(sdp, cmd, p);
770 default: 773 default:
771 error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p); 774 error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
772 if (error != -ENOTTY) 775 if (error != -ENOTTY)
773 return error; 776 return error;
774 } 777 }
@@ -910,7 +913,7 @@ static void sd_rescan(struct device *dev)
910 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 913 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
911 914
912 if (sdkp) { 915 if (sdkp) {
913 sd_revalidate_disk(sdkp->disk); 916 revalidate_disk(sdkp->disk);
914 scsi_disk_put(sdkp); 917 scsi_disk_put(sdkp);
915 } 918 }
916} 919}
@@ -921,11 +924,10 @@ static void sd_rescan(struct device *dev)
921 * This gets directly called from VFS. When the ioctl 924 * This gets directly called from VFS. When the ioctl
922 * is not recognized we go back to the other translation paths. 925 * is not recognized we go back to the other translation paths.
923 */ 926 */
924static long sd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 927static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
928 unsigned int cmd, unsigned long arg)
925{ 929{
926 struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev; 930 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
927 struct gendisk *disk = bdev->bd_disk;
928 struct scsi_device *sdev = scsi_disk(disk)->device;
929 931
930 /* 932 /*
931 * If we are in the middle of error recovery, don't let anyone 933 * If we are in the middle of error recovery, don't let anyone
@@ -955,7 +957,7 @@ static struct block_device_operations sd_fops = {
955 .owner = THIS_MODULE, 957 .owner = THIS_MODULE,
956 .open = sd_open, 958 .open = sd_open,
957 .release = sd_release, 959 .release = sd_release,
958 .ioctl = sd_ioctl, 960 .locked_ioctl = sd_ioctl,
959 .getgeo = sd_getgeo, 961 .getgeo = sd_getgeo,
960#ifdef CONFIG_COMPAT 962#ifdef CONFIG_COMPAT
961 .compat_ioctl = sd_compat_ioctl, 963 .compat_ioctl = sd_compat_ioctl,
@@ -1047,7 +1049,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1047 good_bytes = sd_completed_bytes(SCpnt); 1049 good_bytes = sd_completed_bytes(SCpnt);
1048 break; 1050 break;
1049 case RECOVERED_ERROR: 1051 case RECOVERED_ERROR:
1050 case NO_SENSE:
1051 /* Inform the user, but make sure that it's not treated 1052 /* Inform the user, but make sure that it's not treated
1052 * as a hard error. 1053 * as a hard error.
1053 */ 1054 */
@@ -1056,6 +1057,15 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1056 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1057 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1057 good_bytes = scsi_bufflen(SCpnt); 1058 good_bytes = scsi_bufflen(SCpnt);
1058 break; 1059 break;
1060 case NO_SENSE:
1061 /* This indicates a false check condition, so ignore it. An
1062 * unknown amount of data was transferred so treat it as an
1063 * error.
1064 */
1065 scsi_print_sense("sd", SCpnt);
1066 SCpnt->result = 0;
1067 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1068 break;
1059 case ABORTED_COMMAND: 1069 case ABORTED_COMMAND:
1060 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ 1070 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
1061 scsi_print_result(SCpnt); 1071 scsi_print_result(SCpnt);
@@ -1069,15 +1079,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1069 scsi_print_sense("sd", SCpnt); 1079 scsi_print_sense("sd", SCpnt);
1070 good_bytes = sd_completed_bytes(SCpnt); 1080 good_bytes = sd_completed_bytes(SCpnt);
1071 } 1081 }
1072 if (!scsi_device_protection(SCpnt->device) &&
1073 SCpnt->device->use_10_for_rw &&
1074 (SCpnt->cmnd[0] == READ_10 ||
1075 SCpnt->cmnd[0] == WRITE_10))
1076 SCpnt->device->use_10_for_rw = 0;
1077 if (SCpnt->device->use_10_for_ms &&
1078 (SCpnt->cmnd[0] == MODE_SENSE_10 ||
1079 SCpnt->cmnd[0] == MODE_SELECT_10))
1080 SCpnt->device->use_10_for_ms = 0;
1081 break; 1082 break;
1082 default: 1083 default:
1083 break; 1084 break;
@@ -1247,14 +1248,12 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1247 else 1248 else
1248 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 1249 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1249 1250
1251 sdkp->protection_type = type;
1252
1250 switch (type) { 1253 switch (type) {
1251 case SD_DIF_TYPE0_PROTECTION: 1254 case SD_DIF_TYPE0_PROTECTION:
1252 sdkp->protection_type = 0;
1253 break;
1254
1255 case SD_DIF_TYPE1_PROTECTION: 1255 case SD_DIF_TYPE1_PROTECTION:
1256 case SD_DIF_TYPE3_PROTECTION: 1256 case SD_DIF_TYPE3_PROTECTION:
1257 sdkp->protection_type = type;
1258 break; 1257 break;
1259 1258
1260 case SD_DIF_TYPE2_PROTECTION: 1259 case SD_DIF_TYPE2_PROTECTION:
@@ -1272,7 +1271,6 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1272 return; 1271 return;
1273 1272
1274disable: 1273disable:
1275 sdkp->protection_type = 0;
1276 sdkp->capacity = 0; 1274 sdkp->capacity = 0;
1277} 1275}
1278 1276
@@ -1429,27 +1427,21 @@ got_data:
1429 */ 1427 */
1430 sector_size = 512; 1428 sector_size = 512;
1431 } 1429 }
1430 blk_queue_hardsect_size(sdp->request_queue, sector_size);
1431
1432 { 1432 {
1433 /* 1433 char cap_str_2[10], cap_str_10[10];
1434 * The msdos fs needs to know the hardware sector size 1434 u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
1435 * So I have created this table. See ll_rw_blk.c
1436 * Jacques Gelinas (Jacques@solucorp.qc.ca)
1437 */
1438 int hard_sector = sector_size;
1439 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1440 struct request_queue *queue = sdp->request_queue;
1441 sector_t mb = sz;
1442 1435
1443 blk_queue_hardsect_size(queue, hard_sector); 1436 string_get_size(sz, STRING_UNITS_2, cap_str_2,
1444 /* avoid 64-bit division on 32-bit platforms */ 1437 sizeof(cap_str_2));
1445 sector_div(sz, 625); 1438 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1446 mb -= sz - 974; 1439 sizeof(cap_str_10));
1447 sector_div(mb, 1950);
1448 1440
1449 sd_printk(KERN_NOTICE, sdkp, 1441 sd_printk(KERN_NOTICE, sdkp,
1450 "%llu %d-byte hardware sectors (%llu MB)\n", 1442 "%llu %d-byte hardware sectors: (%s/%s)\n",
1451 (unsigned long long)sdkp->capacity, 1443 (unsigned long long)sdkp->capacity,
1452 hard_sector, (unsigned long long)mb); 1444 sector_size, cap_str_10, cap_str_2);
1453 } 1445 }
1454 1446
1455 /* Rescale capacity to 512-byte units */ 1447 /* Rescale capacity to 512-byte units */
@@ -1764,6 +1756,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
1764} 1756}
1765 1757
1766/** 1758/**
1759 * sd_format_disk_name - format disk name
1760 * @prefix: name prefix - ie. "sd" for SCSI disks
1761 * @index: index of the disk to format name for
1762 * @buf: output buffer
1763 * @buflen: length of the output buffer
1764 *
1765 * SCSI disk names starts at sda. The 26th device is sdz and the
1766 * 27th is sdaa. The last one for two lettered suffix is sdzz
1767 * which is followed by sdaaa.
1768 *
1769 * This is basically 26 base counting with one extra 'nil' entry
1770 * at the beggining from the second digit on and can be
1771 * determined using similar method as 26 base conversion with the
1772 * index shifted -1 after each digit is computed.
1773 *
1774 * CONTEXT:
1775 * Don't care.
1776 *
1777 * RETURNS:
1778 * 0 on success, -errno on failure.
1779 */
1780static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
1781{
1782 const int base = 'z' - 'a' + 1;
1783 char *begin = buf + strlen(prefix);
1784 char *end = buf + buflen;
1785 char *p;
1786 int unit;
1787
1788 p = end - 1;
1789 *p = '\0';
1790 unit = base;
1791 do {
1792 if (p == begin)
1793 return -EINVAL;
1794 *--p = 'a' + (index % unit);
1795 index = (index / unit) - 1;
1796 } while (index >= 0);
1797
1798 memmove(begin, p, end - p);
1799 memcpy(buf, prefix, strlen(prefix));
1800
1801 return 0;
1802}
1803
1804/**
1767 * sd_probe - called during driver initialization and whenever a 1805 * sd_probe - called during driver initialization and whenever a
1768 * new scsi device is attached to the system. It is called once 1806 * new scsi device is attached to the system. It is called once
1769 * for each scsi device (not just disks) present. 1807 * for each scsi device (not just disks) present.
@@ -1801,7 +1839,7 @@ static int sd_probe(struct device *dev)
1801 if (!sdkp) 1839 if (!sdkp)
1802 goto out; 1840 goto out;
1803 1841
1804 gd = alloc_disk(16); 1842 gd = alloc_disk(SD_MINORS);
1805 if (!gd) 1843 if (!gd)
1806 goto out_free; 1844 goto out_free;
1807 1845
@@ -1815,8 +1853,8 @@ static int sd_probe(struct device *dev)
1815 if (error) 1853 if (error)
1816 goto out_put; 1854 goto out_put;
1817 1855
1818 error = -EBUSY; 1856 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
1819 if (index >= SD_MAX_DISKS) 1857 if (error)
1820 goto out_free_index; 1858 goto out_free_index;
1821 1859
1822 sdkp->device = sdp; 1860 sdkp->device = sdp;
@@ -1826,11 +1864,12 @@ static int sd_probe(struct device *dev)
1826 sdkp->openers = 0; 1864 sdkp->openers = 0;
1827 sdkp->previous_state = 1; 1865 sdkp->previous_state = 1;
1828 1866
1829 if (!sdp->timeout) { 1867 if (!sdp->request_queue->rq_timeout) {
1830 if (sdp->type != TYPE_MOD) 1868 if (sdp->type != TYPE_MOD)
1831 sdp->timeout = SD_TIMEOUT; 1869 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1832 else 1870 else
1833 sdp->timeout = SD_MOD_TIMEOUT; 1871 blk_queue_rq_timeout(sdp->request_queue,
1872 SD_MOD_TIMEOUT);
1834 } 1873 }
1835 1874
1836 device_initialize(&sdkp->dev); 1875 device_initialize(&sdkp->dev);
@@ -1843,24 +1882,12 @@ static int sd_probe(struct device *dev)
1843 1882
1844 get_device(&sdp->sdev_gendev); 1883 get_device(&sdp->sdev_gendev);
1845 1884
1846 gd->major = sd_major((index & 0xf0) >> 4); 1885 if (index < SD_MAX_DISKS) {
1847 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1886 gd->major = sd_major((index & 0xf0) >> 4);
1848 gd->minors = 16; 1887 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1849 gd->fops = &sd_fops; 1888 gd->minors = SD_MINORS;
1850
1851 if (index < 26) {
1852 sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
1853 } else if (index < (26 + 1) * 26) {
1854 sprintf(gd->disk_name, "sd%c%c",
1855 'a' + index / 26 - 1,'a' + index % 26);
1856 } else {
1857 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
1858 const unsigned int m2 = (index / 26 - 1) % 26;
1859 const unsigned int m3 = index % 26;
1860 sprintf(gd->disk_name, "sd%c%c%c",
1861 'a' + m1, 'a' + m2, 'a' + m3);
1862 } 1889 }
1863 1890 gd->fops = &sd_fops;
1864 gd->private_data = &sdkp->driver; 1891 gd->private_data = &sdkp->driver;
1865 gd->queue = sdkp->device->request_queue; 1892 gd->queue = sdkp->device->request_queue;
1866 1893
@@ -1869,7 +1896,7 @@ static int sd_probe(struct device *dev)
1869 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1896 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1870 1897
1871 gd->driverfs_dev = &sdp->sdev_gendev; 1898 gd->driverfs_dev = &sdp->sdev_gendev;
1872 gd->flags = GENHD_FL_DRIVERFS; 1899 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
1873 if (sdp->removable) 1900 if (sdp->removable)
1874 gd->flags |= GENHD_FL_REMOVABLE; 1901 gd->flags |= GENHD_FL_REMOVABLE;
1875 1902
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 95b9f06534d5..75638e7d3f66 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -97,19 +97,28 @@ struct sd_dif_tuple {
97 __be32 ref_tag; /* Target LBA or indirect LBA */ 97 __be32 ref_tag; /* Target LBA or indirect LBA */
98}; 98};
99 99
100#if defined(CONFIG_BLK_DEV_INTEGRITY) 100#ifdef CONFIG_BLK_DEV_INTEGRITY
101 101
102extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int); 102extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int);
103extern void sd_dif_config_host(struct scsi_disk *); 103extern void sd_dif_config_host(struct scsi_disk *);
104extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); 104extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
105extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); 105extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
106 106
107#else /* CONFIG_BLK_DEV_INTEGRITY */ 107#else /* CONFIG_BLK_DEV_INTEGRITY */
108 108
109#define sd_dif_op(a, b, c) do { } while (0) 109static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c)
110#define sd_dif_config_host(a) do { } while (0) 110{
111#define sd_dif_prepare(a, b, c) (0) 111}
112#define sd_dif_complete(a, b) (0) 112static inline void sd_dif_config_host(struct scsi_disk *disk)
113{
114}
115static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a)
116{
117 return 0;
118}
119static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
120{
121}
113 122
114#endif /* CONFIG_BLK_DEV_INTEGRITY */ 123#endif /* CONFIG_BLK_DEV_INTEGRITY */
115 124
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4d17f3d35aac..3ebb1f289490 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -311,25 +311,26 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
311 struct scsi_device *sdp = sdkp->device; 311 struct scsi_device *sdp = sdkp->device;
312 struct gendisk *disk = sdkp->disk; 312 struct gendisk *disk = sdkp->disk;
313 u8 type = sdkp->protection_type; 313 u8 type = sdkp->protection_type;
314 int dif, dix;
314 315
315 /* If this HBA doesn't support DIX, resort to normal I/O or DIF */ 316 dif = scsi_host_dif_capable(sdp->host, type);
316 if (scsi_host_dix_capable(sdp->host, type) == 0) { 317 dix = scsi_host_dix_capable(sdp->host, type);
317 318
318 if (type == SD_DIF_TYPE0_PROTECTION) 319 if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
319 return; 320 dif = 0; dix = 1;
320 321 }
321 if (scsi_host_dif_capable(sdp->host, type) == 0) {
322 sd_printk(KERN_INFO, sdkp, "Type %d protection " \
323 "unsupported by HBA. Disabling DIF.\n", type);
324 sdkp->protection_type = 0;
325 return;
326 }
327 322
328 sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n", 323 if (type) {
329 type); 324 if (dif)
325 sd_printk(KERN_NOTICE, sdkp,
326 "Enabling DIF Type %d protection\n", type);
327 else
328 sd_printk(KERN_NOTICE, sdkp,
329 "Disabling DIF Type %d protection\n", type);
330 }
330 331
332 if (!dix)
331 return; 333 return;
332 }
333 334
334 /* Enable DMA of protection information */ 335 /* Enable DMA of protection information */
335 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) 336 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
@@ -343,17 +344,17 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
343 else 344 else
344 blk_integrity_register(disk, &dif_type1_integrity_crc); 345 blk_integrity_register(disk, &dif_type1_integrity_crc);
345 346
346 sd_printk(KERN_INFO, sdkp, 347 sd_printk(KERN_NOTICE, sdkp,
347 "Enabling %s integrity protection\n", disk->integrity->name); 348 "Enabling DIX %s protection\n", disk->integrity->name);
348 349
349 /* Signal to block layer that we support sector tagging */ 350 /* Signal to block layer that we support sector tagging */
350 if (type && sdkp->ATO) { 351 if (dif && type && sdkp->ATO) {
351 if (type == SD_DIF_TYPE3_PROTECTION) 352 if (type == SD_DIF_TYPE3_PROTECTION)
352 disk->integrity->tag_size = sizeof(u16) + sizeof(u32); 353 disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
353 else 354 else
354 disk->integrity->tag_size = sizeof(u16); 355 disk->integrity->tag_size = sizeof(u16);
355 356
356 sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n", 357 sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
357 disk->integrity->tag_size); 358 disk->integrity->tag_size);
358 } 359 }
359} 360}
@@ -361,7 +362,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
361/* 362/*
362 * DIF DMA operation magic decoder ring. 363 * DIF DMA operation magic decoder ring.
363 */ 364 */
364void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix) 365void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type)
365{ 366{
366 int csum_convert, prot_op; 367 int csum_convert, prot_op;
367 368
@@ -406,7 +407,8 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
406 } 407 }
407 408
408 scsi_set_prot_op(scmd, prot_op); 409 scsi_set_prot_op(scmd, prot_op);
409 scsi_set_prot_type(scmd, dif); 410 if (dif)
411 scsi_set_prot_type(scmd, type);
410} 412}
411 413
412/* 414/*
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650a..5103855242ae 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
53 52
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
69#endif 68#endif
70 69
71#define SG_ALLOW_DIO_DEF 0 70#define SG_ALLOW_DIO_DEF 0
72#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 71
74#define SG_MAX_DEVS 32768 72#define SG_MAX_DEVS 32768
75 73
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 struct page **pages;
122 struct scatterlist *buffer;/* scatter list */ 120 int page_order;
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
125} Sg_scatter_hold; 123} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq;
139 struct bio *bio;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
175 175
176static int sg_fasync(int fd, struct file *filp, int mode); 176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 177/* tasklet or soft irq callback */
178static void sg_cmd_done(void *data, char *sense, int result, int resid); 178static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request * srp); 179static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 180static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188 int read_only, Sg_request **o_srp); 188 int read_only, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193static int sg_write_xfer(Sg_request * srp);
194static int sg_read_xfer(Sg_request * srp);
195static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196static void sg_remove_scat(Sg_scatter_hold * schp); 192static void sg_remove_scat(Sg_scatter_hold * schp);
197static void sg_build_reserve(Sg_fd * sfp, int req_size); 193static void sg_build_reserve(Sg_fd * sfp, int req_size);
198static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201static void sg_page_free(struct page *page, int size);
202static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206static Sg_request *sg_add_request(Sg_fd * sfp); 200static Sg_request *sg_add_request(Sg_fd * sfp);
207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208static int sg_res_in_use(Sg_fd * sfp); 202static int sg_res_in_use(Sg_fd * sfp);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 203static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 204#ifdef CONFIG_SCSI_PROC_FS
212static int sg_last_dev(void); 205static int sg_last_dev(void);
@@ -334,7 +327,6 @@ sg_release(struct inode *inode, struct file *filp)
334 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 327 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
335 return -ENXIO; 328 return -ENXIO;
336 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 329 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
337 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
338 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 330 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
339 if (!sdp->detached) { 331 if (!sdp->detached) {
340 scsi_device_put(sdp->device); 332 scsi_device_put(sdp->device);
@@ -529,8 +521,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
529 err = -EFAULT; 521 err = -EFAULT;
530 goto err_out; 522 goto err_out;
531 } 523 }
532 err = sg_read_xfer(srp); 524err_out:
533 err_out:
534 sg_finish_rem_req(srp); 525 sg_finish_rem_req(srp);
535 return (0 == err) ? count : err; 526 return (0 == err) ? count : err;
536} 527}
@@ -612,7 +603,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
612 else 603 else
613 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 604 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
614 hp->dxfer_len = mxsize; 605 hp->dxfer_len = mxsize;
615 hp->dxferp = (char __user *)buf + cmd_size; 606 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
607 hp->dxferp = (char __user *)buf + cmd_size;
608 else
609 hp->dxferp = NULL;
616 hp->sbp = NULL; 610 hp->sbp = NULL;
617 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 611 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
618 hp->flags = input_size; /* structure abuse ... */ 612 hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +726,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
732 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 726 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
733 (int) cmnd[0], (int) hp->cmd_len)); 727 (int) cmnd[0], (int) hp->cmd_len));
734 728
735 if ((k = sg_start_req(srp))) { 729 k = sg_start_req(srp, cmnd);
730 if (k) {
736 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 731 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
737 sg_finish_rem_req(srp); 732 sg_finish_rem_req(srp);
738 return k; /* probably out of space --> ENOMEM */ 733 return k; /* probably out of space --> ENOMEM */
739 } 734 }
740 if ((k = sg_write_xfer(srp))) {
741 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
742 sg_finish_rem_req(srp);
743 return k;
744 }
745 if (sdp->detached) { 735 if (sdp->detached) {
746 sg_finish_rem_req(srp); 736 sg_finish_rem_req(srp);
747 return -ENODEV; 737 return -ENODEV;
@@ -763,20 +753,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
763 break; 753 break;
764 } 754 }
765 hp->duration = jiffies_to_msecs(jiffies); 755 hp->duration = jiffies_to_msecs(jiffies);
766/* Now send everything of to mid-level. The next time we hear about this 756
767 packet is when sg_cmd_done() is called (i.e. a callback). */ 757 srp->rq->timeout = timeout;
768 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 758 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
769 hp->dxfer_len, srp->data.k_use_sg, timeout, 759 srp->rq, 1, sg_rq_end_io);
770 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 760 return 0;
771 GFP_ATOMIC)) {
772 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
773 /*
774 * most likely out of mem, but could also be a bad map
775 */
776 sg_finish_rem_req(srp);
777 return -ENOMEM;
778 } else
779 return 0;
780} 761}
781 762
782static int 763static int
@@ -1077,7 +1058,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1077 if (sg_allow_access(filp, &opcode)) 1058 if (sg_allow_access(filp, &opcode))
1078 return -EPERM; 1059 return -EPERM;
1079 } 1060 }
1080 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1061 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1081 case SG_SET_DEBUG: 1062 case SG_SET_DEBUG:
1082 result = get_user(val, ip); 1063 result = get_user(val, ip);
1083 if (result) 1064 if (result)
@@ -1192,8 +1173,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 Sg_fd *sfp; 1173 Sg_fd *sfp;
1193 unsigned long offset, len, sa; 1174 unsigned long offset, len, sa;
1194 Sg_scatter_hold *rsv_schp; 1175 Sg_scatter_hold *rsv_schp;
1195 struct scatterlist *sg; 1176 int k, length;
1196 int k;
1197 1177
1198 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1178 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1199 return VM_FAULT_SIGBUS; 1179 return VM_FAULT_SIGBUS;
@@ -1203,15 +1183,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1203 return VM_FAULT_SIGBUS; 1183 return VM_FAULT_SIGBUS;
1204 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1184 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1205 offset, rsv_schp->k_use_sg)); 1185 offset, rsv_schp->k_use_sg));
1206 sg = rsv_schp->buffer;
1207 sa = vma->vm_start; 1186 sa = vma->vm_start;
1208 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1187 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1209 ++k, sg = sg_next(sg)) { 1188 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1210 len = vma->vm_end - sa; 1189 len = vma->vm_end - sa;
1211 len = (len < sg->length) ? len : sg->length; 1190 len = (len < length) ? len : length;
1212 if (offset < len) { 1191 if (offset < len) {
1213 struct page *page; 1192 struct page *page = nth_page(rsv_schp->pages[k],
1214 page = virt_to_page(page_address(sg_page(sg)) + offset); 1193 offset >> PAGE_SHIFT);
1215 get_page(page); /* increment page count */ 1194 get_page(page); /* increment page count */
1216 vmf->page = page; 1195 vmf->page = page;
1217 return 0; /* success */ 1196 return 0; /* success */
@@ -1233,8 +1212,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1233 Sg_fd *sfp; 1212 Sg_fd *sfp;
1234 unsigned long req_sz, len, sa; 1213 unsigned long req_sz, len, sa;
1235 Sg_scatter_hold *rsv_schp; 1214 Sg_scatter_hold *rsv_schp;
1236 int k; 1215 int k, length;
1237 struct scatterlist *sg;
1238 1216
1239 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1217 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1240 return -ENXIO; 1218 return -ENXIO;
@@ -1248,11 +1226,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1248 return -ENOMEM; /* cannot map more than reserved buffer */ 1226 return -ENOMEM; /* cannot map more than reserved buffer */
1249 1227
1250 sa = vma->vm_start; 1228 sa = vma->vm_start;
1251 sg = rsv_schp->buffer; 1229 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1252 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1230 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1253 ++k, sg = sg_next(sg)) {
1254 len = vma->vm_end - sa; 1231 len = vma->vm_end - sa;
1255 len = (len < sg->length) ? len : sg->length; 1232 len = (len < length) ? len : length;
1256 sa += len; 1233 sa += len;
1257 } 1234 }
1258 1235
@@ -1263,16 +1240,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1263 return 0; 1240 return 0;
1264} 1241}
1265 1242
1266/* This function is a "bottom half" handler that is called by the 1243/*
1267 * mid level when a command is completed (or has failed). */ 1244 * This function is a "bottom half" handler that is called by the mid
1268static void 1245 * level when a command is completed (or has failed).
1269sg_cmd_done(void *data, char *sense, int result, int resid) 1246 */
1247static void sg_rq_end_io(struct request *rq, int uptodate)
1270{ 1248{
1271 Sg_request *srp = data; 1249 struct sg_request *srp = rq->end_io_data;
1272 Sg_device *sdp = NULL; 1250 Sg_device *sdp = NULL;
1273 Sg_fd *sfp; 1251 Sg_fd *sfp;
1274 unsigned long iflags; 1252 unsigned long iflags;
1275 unsigned int ms; 1253 unsigned int ms;
1254 char *sense;
1255 int result, resid;
1276 1256
1277 if (NULL == srp) { 1257 if (NULL == srp) {
1278 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1258 printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1266,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1286 return; 1266 return;
1287 } 1267 }
1288 1268
1269 sense = rq->sense;
1270 result = rq->errors;
1271 resid = rq->data_len;
1289 1272
1290 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1273 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1291 sdp->disk->disk_name, srp->header.pack_id, result)); 1274 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1279,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1296 if (0 != result) { 1279 if (0 != result) {
1297 struct scsi_sense_hdr sshdr; 1280 struct scsi_sense_hdr sshdr;
1298 1281
1299 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1300 srp->header.status = 0xff & result; 1282 srp->header.status = 0xff & result;
1301 srp->header.masked_status = status_byte(result); 1283 srp->header.masked_status = status_byte(result);
1302 srp->header.msg_status = msg_byte(result); 1284 srp->header.msg_status = msg_byte(result);
@@ -1467,12 +1449,10 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1467 if (sg_sysfs_valid) { 1449 if (sg_sysfs_valid) {
1468 struct device *sg_class_member; 1450 struct device *sg_class_member;
1469 1451
1470 sg_class_member = device_create_drvdata(sg_sysfs_class, 1452 sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1471 cl_dev->parent, 1453 MKDEV(SCSI_GENERIC_MAJOR,
1472 MKDEV(SCSI_GENERIC_MAJOR, 1454 sdp->index),
1473 sdp->index), 1455 sdp, "%s", disk->disk_name);
1474 sdp,
1475 "%s", disk->disk_name);
1476 if (IS_ERR(sg_class_member)) { 1456 if (IS_ERR(sg_class_member)) {
1477 printk(KERN_ERR "sg_add: " 1457 printk(KERN_ERR "sg_add: "
1478 "device_create failed\n"); 1458 "device_create failed\n");
@@ -1634,37 +1614,79 @@ exit_sg(void)
1634 idr_destroy(&sg_index_idr); 1614 idr_destroy(&sg_index_idr);
1635} 1615}
1636 1616
1637static int 1617static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1638sg_start_req(Sg_request * srp)
1639{ 1618{
1640 int res; 1619 int res;
1620 struct request *rq;
1641 Sg_fd *sfp = srp->parentfp; 1621 Sg_fd *sfp = srp->parentfp;
1642 sg_io_hdr_t *hp = &srp->header; 1622 sg_io_hdr_t *hp = &srp->header;
1643 int dxfer_len = (int) hp->dxfer_len; 1623 int dxfer_len = (int) hp->dxfer_len;
1644 int dxfer_dir = hp->dxfer_direction; 1624 int dxfer_dir = hp->dxfer_direction;
1625 unsigned int iov_count = hp->iovec_count;
1645 Sg_scatter_hold *req_schp = &srp->data; 1626 Sg_scatter_hold *req_schp = &srp->data;
1646 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1627 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1628 struct request_queue *q = sfp->parentdp->device->request_queue;
1629 struct rq_map_data *md, map_data;
1630 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1631
1632 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1633 dxfer_len));
1634
1635 rq = blk_get_request(q, rw, GFP_ATOMIC);
1636 if (!rq)
1637 return -ENOMEM;
1638
1639 memcpy(rq->cmd, cmd, hp->cmd_len);
1640
1641 rq->cmd_len = hp->cmd_len;
1642 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1643
1644 srp->rq = rq;
1645 rq->end_io_data = srp;
1646 rq->sense = srp->sense_b;
1647 rq->retries = SG_DEFAULT_RETRIES;
1647 1648
1648 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1649 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1649 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1650 return 0; 1650 return 0;
1651 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1651
1652 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1652 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1653 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1653 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1654 res = sg_build_direct(srp, sfp, dxfer_len); 1654 !sfp->parentdp->device->host->unchecked_isa_dma &&
1655 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1655 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1656 return res; 1656 md = NULL;
1657 } 1657 else
1658 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1658 md = &map_data;
1659 sg_link_reserve(sfp, srp, dxfer_len); 1659
1660 else { 1660 if (md) {
1661 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1661 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1662 if (res) { 1662 sg_link_reserve(sfp, srp, dxfer_len);
1663 sg_remove_scat(req_schp); 1663 else {
1664 return res; 1664 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1665 if (res)
1666 return res;
1665 } 1667 }
1668
1669 md->pages = req_schp->pages;
1670 md->page_order = req_schp->page_order;
1671 md->nr_entries = req_schp->k_use_sg;
1666 } 1672 }
1667 return 0; 1673
1674 if (iov_count)
1675 res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
1676 hp->dxfer_len, GFP_ATOMIC);
1677 else
1678 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1679 hp->dxfer_len, GFP_ATOMIC);
1680
1681 if (!res) {
1682 srp->bio = rq->bio;
1683
1684 if (!md) {
1685 req_schp->dio_in_use = 1;
1686 hp->info |= SG_INFO_DIRECT_IO;
1687 }
1688 }
1689 return res;
1668} 1690}
1669 1691
1670static void 1692static void
@@ -1678,186 +1700,37 @@ sg_finish_rem_req(Sg_request * srp)
1678 sg_unlink_reserve(sfp, srp); 1700 sg_unlink_reserve(sfp, srp);
1679 else 1701 else
1680 sg_remove_scat(req_schp); 1702 sg_remove_scat(req_schp);
1703
1704 if (srp->rq) {
1705 if (srp->bio)
1706 blk_rq_unmap_user(srp->bio);
1707
1708 blk_put_request(srp->rq);
1709 }
1710
1681 sg_remove_request(sfp, srp); 1711 sg_remove_request(sfp, srp);
1682} 1712}
1683 1713
1684static int 1714static int
1685sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1715sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1686{ 1716{
1687 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1717 int sg_bufflen = tablesize * sizeof(struct page *);
1688 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1718 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1689 1719
1690 /* 1720 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1691 * TODO: test without low_dma, we should not need it since 1721 if (!schp->pages)
1692 * the block layer will bounce the buffer for us
1693 *
1694 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1695 */
1696 if (sfp->low_dma)
1697 gfp_flags |= GFP_DMA;
1698 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1699 if (!schp->buffer)
1700 return -ENOMEM; 1722 return -ENOMEM;
1701 sg_init_table(schp->buffer, tablesize);
1702 schp->sglist_len = sg_bufflen; 1723 schp->sglist_len = sg_bufflen;
1703 return tablesize; /* number of scat_gath elements allocated */ 1724 return tablesize; /* number of scat_gath elements allocated */
1704} 1725}
1705 1726
1706#ifdef SG_ALLOW_DIO_CODE
1707/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1708 /* TODO: hopefully we can use the generic block layer code */
1709
1710/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1711 - mapping of all pages not successful
1712 (i.e., either completely successful or fails)
1713*/
1714static int
1715st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1716 unsigned long uaddr, size_t count, int rw)
1717{
1718 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1719 unsigned long start = uaddr >> PAGE_SHIFT;
1720 const int nr_pages = end - start;
1721 int res, i, j;
1722 struct page **pages;
1723
1724 /* User attempted Overflow! */
1725 if ((uaddr + count) < uaddr)
1726 return -EINVAL;
1727
1728 /* Too big */
1729 if (nr_pages > max_pages)
1730 return -ENOMEM;
1731
1732 /* Hmm? */
1733 if (count == 0)
1734 return 0;
1735
1736 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1737 return -ENOMEM;
1738
1739 /* Try to fault in all of the necessary pages */
1740 down_read(&current->mm->mmap_sem);
1741 /* rw==READ means read from drive, write into memory area */
1742 res = get_user_pages(
1743 current,
1744 current->mm,
1745 uaddr,
1746 nr_pages,
1747 rw == READ,
1748 0, /* don't force */
1749 pages,
1750 NULL);
1751 up_read(&current->mm->mmap_sem);
1752
1753 /* Errors and no page mapped should return here */
1754 if (res < nr_pages)
1755 goto out_unmap;
1756
1757 for (i=0; i < nr_pages; i++) {
1758 /* FIXME: flush superflous for rw==READ,
1759 * probably wrong function for rw==WRITE
1760 */
1761 flush_dcache_page(pages[i]);
1762 /* ?? Is locking needed? I don't think so */
1763 /* if (!trylock_page(pages[i]))
1764 goto out_unlock; */
1765 }
1766
1767 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1768 if (nr_pages > 1) {
1769 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1770 count -= sgl[0].length;
1771 for (i=1; i < nr_pages ; i++)
1772 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1773 }
1774 else {
1775 sgl[0].length = count;
1776 }
1777
1778 kfree(pages);
1779 return nr_pages;
1780
1781 out_unmap:
1782 if (res > 0) {
1783 for (j=0; j < res; j++)
1784 page_cache_release(pages[j]);
1785 res = 0;
1786 }
1787 kfree(pages);
1788 return res;
1789}
1790
1791
1792/* And unmap them... */
1793static int
1794st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1795 int dirtied)
1796{
1797 int i;
1798
1799 for (i=0; i < nr_pages; i++) {
1800 struct page *page = sg_page(&sgl[i]);
1801
1802 if (dirtied)
1803 SetPageDirty(page);
1804 /* unlock_page(page); */
1805 /* FIXME: cache flush missing for rw==READ
1806 * FIXME: call the correct reference counting function
1807 */
1808 page_cache_release(page);
1809 }
1810
1811 return 0;
1812}
1813
1814/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1815#endif
1816
1817
1818/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1819static int
1820sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1821{
1822#ifdef SG_ALLOW_DIO_CODE
1823 sg_io_hdr_t *hp = &srp->header;
1824 Sg_scatter_hold *schp = &srp->data;
1825 int sg_tablesize = sfp->parentdp->sg_tablesize;
1826 int mx_sc_elems, res;
1827 struct scsi_device *sdev = sfp->parentdp->device;
1828
1829 if (((unsigned long)hp->dxferp &
1830 queue_dma_alignment(sdev->request_queue)) != 0)
1831 return 1;
1832
1833 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1834 if (mx_sc_elems <= 0) {
1835 return 1;
1836 }
1837 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1838 (unsigned long)hp->dxferp, dxfer_len,
1839 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1840 if (res <= 0) {
1841 sg_remove_scat(schp);
1842 return 1;
1843 }
1844 schp->k_use_sg = res;
1845 schp->dio_in_use = 1;
1846 hp->info |= SG_INFO_DIRECT_IO;
1847 return 0;
1848#else
1849 return 1;
1850#endif
1851}
1852
1853static int 1727static int
1854sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1728sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1855{ 1729{
1856 struct scatterlist *sg; 1730 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1857 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1858 int sg_tablesize = sfp->parentdp->sg_tablesize; 1731 int sg_tablesize = sfp->parentdp->sg_tablesize;
1859 int blk_size = buff_size; 1732 int blk_size = buff_size, order;
1860 struct page *p = NULL; 1733 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1861 1734
1862 if (blk_size < 0) 1735 if (blk_size < 0)
1863 return -EFAULT; 1736 return -EFAULT;
@@ -1881,15 +1754,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1881 } else 1754 } else
1882 scatter_elem_sz_prev = num; 1755 scatter_elem_sz_prev = num;
1883 } 1756 }
1884 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1757
1885 (rem_sz > 0) && (k < mx_sc_elems); 1758 if (sfp->low_dma)
1886 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1759 gfp_mask |= GFP_DMA;
1887 1760
1761 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1762 gfp_mask |= __GFP_ZERO;
1763
1764 order = get_order(num);
1765retry:
1766 ret_sz = 1 << (PAGE_SHIFT + order);
1767
1768 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1769 k++, rem_sz -= ret_sz) {
1770
1888 num = (rem_sz > scatter_elem_sz_prev) ? 1771 num = (rem_sz > scatter_elem_sz_prev) ?
1889 scatter_elem_sz_prev : rem_sz; 1772 scatter_elem_sz_prev : rem_sz;
1890 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1773
1891 if (!p) 1774 schp->pages[k] = alloc_pages(gfp_mask, order);
1892 return -ENOMEM; 1775 if (!schp->pages[k])
1776 goto out;
1893 1777
1894 if (num == scatter_elem_sz_prev) { 1778 if (num == scatter_elem_sz_prev) {
1895 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1779 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1781,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1897 scatter_elem_sz_prev = ret_sz; 1781 scatter_elem_sz_prev = ret_sz;
1898 } 1782 }
1899 } 1783 }
1900 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1901 1784
1902 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1785 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1903 "ret_sz=%d\n", k, num, ret_sz)); 1786 "ret_sz=%d\n", k, num, ret_sz));
1904 } /* end of for loop */ 1787 } /* end of for loop */
1905 1788
1789 schp->page_order = order;
1906 schp->k_use_sg = k; 1790 schp->k_use_sg = k;
1907 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1791 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1908 "rem_sz=%d\n", k, rem_sz)); 1792 "rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1794,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1910 schp->bufflen = blk_size; 1794 schp->bufflen = blk_size;
1911 if (rem_sz > 0) /* must have failed */ 1795 if (rem_sz > 0) /* must have failed */
1912 return -ENOMEM; 1796 return -ENOMEM;
1913
1914 return 0; 1797 return 0;
1915} 1798out:
1916 1799 for (i = 0; i < k; i++)
1917static int 1800 __free_pages(schp->pages[k], order);
1918sg_write_xfer(Sg_request * srp)
1919{
1920 sg_io_hdr_t *hp = &srp->header;
1921 Sg_scatter_hold *schp = &srp->data;
1922 struct scatterlist *sg = schp->buffer;
1923 int num_xfer = 0;
1924 int j, k, onum, usglen, ksglen, res;
1925 int iovec_count = (int) hp->iovec_count;
1926 int dxfer_dir = hp->dxfer_direction;
1927 unsigned char *p;
1928 unsigned char __user *up;
1929 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1930
1931 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1932 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1933 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1934 if (schp->bufflen < num_xfer)
1935 num_xfer = schp->bufflen;
1936 }
1937 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1938 (new_interface
1939 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1940 return 0;
1941
1942 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1943 num_xfer, iovec_count, schp->k_use_sg));
1944 if (iovec_count) {
1945 onum = iovec_count;
1946 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1947 return -EFAULT;
1948 } else
1949 onum = 1;
1950
1951 ksglen = sg->length;
1952 p = page_address(sg_page(sg));
1953 for (j = 0, k = 0; j < onum; ++j) {
1954 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1955 if (res)
1956 return res;
1957
1958 for (; p; sg = sg_next(sg), ksglen = sg->length,
1959 p = page_address(sg_page(sg))) {
1960 if (usglen <= 0)
1961 break;
1962 if (ksglen > usglen) {
1963 if (usglen >= num_xfer) {
1964 if (__copy_from_user(p, up, num_xfer))
1965 return -EFAULT;
1966 return 0;
1967 }
1968 if (__copy_from_user(p, up, usglen))
1969 return -EFAULT;
1970 p += usglen;
1971 ksglen -= usglen;
1972 break;
1973 } else {
1974 if (ksglen >= num_xfer) {
1975 if (__copy_from_user(p, up, num_xfer))
1976 return -EFAULT;
1977 return 0;
1978 }
1979 if (__copy_from_user(p, up, ksglen))
1980 return -EFAULT;
1981 up += ksglen;
1982 usglen -= ksglen;
1983 }
1984 ++k;
1985 if (k >= schp->k_use_sg)
1986 return 0;
1987 }
1988 }
1989
1990 return 0;
1991}
1992 1801
1993static int 1802 if (--order >= 0)
1994sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1803 goto retry;
1995 int wr_xf, int *countp, unsigned char __user **up)
1996{
1997 int num_xfer = (int) hp->dxfer_len;
1998 unsigned char __user *p = hp->dxferp;
1999 int count;
2000 1804
2001 if (0 == sg_num) { 1805 return -ENOMEM;
2002 if (wr_xf && ('\0' == hp->interface_id))
2003 count = (int) hp->flags; /* holds "old" input_size */
2004 else
2005 count = num_xfer;
2006 } else {
2007 sg_iovec_t iovec;
2008 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2009 return -EFAULT;
2010 p = iovec.iov_base;
2011 count = (int) iovec.iov_len;
2012 }
2013 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2014 return -EFAULT;
2015 if (up)
2016 *up = p;
2017 if (countp)
2018 *countp = count;
2019 return 0;
2020} 1806}
2021 1807
2022static void 1808static void
2023sg_remove_scat(Sg_scatter_hold * schp) 1809sg_remove_scat(Sg_scatter_hold * schp)
2024{ 1810{
2025 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1811 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2026 if (schp->buffer && (schp->sglist_len > 0)) { 1812 if (schp->pages && schp->sglist_len > 0) {
2027 struct scatterlist *sg = schp->buffer; 1813 if (!schp->dio_in_use) {
2028
2029 if (schp->dio_in_use) {
2030#ifdef SG_ALLOW_DIO_CODE
2031 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2032#endif
2033 } else {
2034 int k; 1814 int k;
2035 1815
2036 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 1816 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2037 ++k, sg = sg_next(sg)) {
2038 SCSI_LOG_TIMEOUT(5, printk( 1817 SCSI_LOG_TIMEOUT(5, printk(
2039 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1818 "sg_remove_scat: k=%d, pg=0x%p\n",
2040 k, sg_page(sg), sg->length)); 1819 k, schp->pages[k]));
2041 sg_page_free(sg_page(sg), sg->length); 1820 __free_pages(schp->pages[k], schp->page_order);
2042 } 1821 }
2043 }
2044 kfree(schp->buffer);
2045 }
2046 memset(schp, 0, sizeof (*schp));
2047}
2048 1822
2049static int 1823 kfree(schp->pages);
2050sg_read_xfer(Sg_request * srp)
2051{
2052 sg_io_hdr_t *hp = &srp->header;
2053 Sg_scatter_hold *schp = &srp->data;
2054 struct scatterlist *sg = schp->buffer;
2055 int num_xfer = 0;
2056 int j, k, onum, usglen, ksglen, res;
2057 int iovec_count = (int) hp->iovec_count;
2058 int dxfer_dir = hp->dxfer_direction;
2059 unsigned char *p;
2060 unsigned char __user *up;
2061 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2062
2063 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2064 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2065 num_xfer = hp->dxfer_len;
2066 if (schp->bufflen < num_xfer)
2067 num_xfer = schp->bufflen;
2068 }
2069 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2070 (new_interface
2071 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2072 return 0;
2073
2074 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2075 num_xfer, iovec_count, schp->k_use_sg));
2076 if (iovec_count) {
2077 onum = iovec_count;
2078 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2079 return -EFAULT;
2080 } else
2081 onum = 1;
2082
2083 p = page_address(sg_page(sg));
2084 ksglen = sg->length;
2085 for (j = 0, k = 0; j < onum; ++j) {
2086 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2087 if (res)
2088 return res;
2089
2090 for (; p; sg = sg_next(sg), ksglen = sg->length,
2091 p = page_address(sg_page(sg))) {
2092 if (usglen <= 0)
2093 break;
2094 if (ksglen > usglen) {
2095 if (usglen >= num_xfer) {
2096 if (__copy_to_user(up, p, num_xfer))
2097 return -EFAULT;
2098 return 0;
2099 }
2100 if (__copy_to_user(up, p, usglen))
2101 return -EFAULT;
2102 p += usglen;
2103 ksglen -= usglen;
2104 break;
2105 } else {
2106 if (ksglen >= num_xfer) {
2107 if (__copy_to_user(up, p, num_xfer))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 if (__copy_to_user(up, p, ksglen))
2112 return -EFAULT;
2113 up += ksglen;
2114 usglen -= ksglen;
2115 }
2116 ++k;
2117 if (k >= schp->k_use_sg)
2118 return 0;
2119 } 1824 }
2120 } 1825 }
2121 1826 memset(schp, 0, sizeof (*schp));
2122 return 0;
2123} 1827}
2124 1828
2125static int 1829static int
2126sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 1830sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2127{ 1831{
2128 Sg_scatter_hold *schp = &srp->data; 1832 Sg_scatter_hold *schp = &srp->data;
2129 struct scatterlist *sg = schp->buffer;
2130 int k, num; 1833 int k, num;
2131 1834
2132 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 1835 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1837,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2134 if ((!outp) || (num_read_xfer <= 0)) 1837 if ((!outp) || (num_read_xfer <= 0))
2135 return 0; 1838 return 0;
2136 1839
2137 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 1840 num = 1 << (PAGE_SHIFT + schp->page_order);
2138 num = sg->length; 1841 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2139 if (num > num_read_xfer) { 1842 if (num > num_read_xfer) {
2140 if (__copy_to_user(outp, page_address(sg_page(sg)), 1843 if (__copy_to_user(outp, page_address(schp->pages[k]),
2141 num_read_xfer)) 1844 num_read_xfer))
2142 return -EFAULT; 1845 return -EFAULT;
2143 break; 1846 break;
2144 } else { 1847 } else {
2145 if (__copy_to_user(outp, page_address(sg_page(sg)), 1848 if (__copy_to_user(outp, page_address(schp->pages[k]),
2146 num)) 1849 num))
2147 return -EFAULT; 1850 return -EFAULT;
2148 num_read_xfer -= num; 1851 num_read_xfer -= num;
@@ -2177,24 +1880,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2177{ 1880{
2178 Sg_scatter_hold *req_schp = &srp->data; 1881 Sg_scatter_hold *req_schp = &srp->data;
2179 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1882 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2180 struct scatterlist *sg = rsv_schp->buffer;
2181 int k, num, rem; 1883 int k, num, rem;
2182 1884
2183 srp->res_used = 1; 1885 srp->res_used = 1;
2184 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 1886 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2185 rem = size; 1887 rem = size;
2186 1888
2187 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 1889 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2188 num = sg->length; 1890 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2189 if (rem <= num) { 1891 if (rem <= num) {
2190 sfp->save_scat_len = num;
2191 sg->length = rem;
2192 req_schp->k_use_sg = k + 1; 1892 req_schp->k_use_sg = k + 1;
2193 req_schp->sglist_len = rsv_schp->sglist_len; 1893 req_schp->sglist_len = rsv_schp->sglist_len;
2194 req_schp->buffer = rsv_schp->buffer; 1894 req_schp->pages = rsv_schp->pages;
2195 1895
2196 req_schp->bufflen = size; 1896 req_schp->bufflen = size;
2197 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 1897 req_schp->page_order = rsv_schp->page_order;
2198 break; 1898 break;
2199 } else 1899 } else
2200 rem -= num; 1900 rem -= num;
@@ -2208,22 +1908,13 @@ static void
2208sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 1908sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2209{ 1909{
2210 Sg_scatter_hold *req_schp = &srp->data; 1910 Sg_scatter_hold *req_schp = &srp->data;
2211 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2212 1911
2213 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 1912 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2214 (int) req_schp->k_use_sg)); 1913 (int) req_schp->k_use_sg));
2215 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2216 struct scatterlist *sg = rsv_schp->buffer;
2217
2218 if (sfp->save_scat_len > 0)
2219 (sg + (req_schp->k_use_sg - 1))->length =
2220 (unsigned) sfp->save_scat_len;
2221 else
2222 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2223 }
2224 req_schp->k_use_sg = 0; 1914 req_schp->k_use_sg = 0;
2225 req_schp->bufflen = 0; 1915 req_schp->bufflen = 0;
2226 req_schp->buffer = NULL; 1916 req_schp->pages = NULL;
1917 req_schp->page_order = 0;
2227 req_schp->sglist_len = 0; 1918 req_schp->sglist_len = 0;
2228 sfp->save_scat_len = 0; 1919 sfp->save_scat_len = 0;
2229 srp->res_used = 0; 1920 srp->res_used = 0;
@@ -2481,53 +2172,6 @@ sg_res_in_use(Sg_fd * sfp)
2481 return srp ? 1 : 0; 2172 return srp ? 1 : 0;
2482} 2173}
2483 2174
2484/* The size fetched (value output via retSzp) set when non-NULL return */
2485static struct page *
2486sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2487{
2488 struct page *resp = NULL;
2489 gfp_t page_mask;
2490 int order, a_size;
2491 int resSz;
2492
2493 if ((rqSz <= 0) || (NULL == retSzp))
2494 return resp;
2495
2496 if (lowDma)
2497 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2498 else
2499 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2500
2501 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2502 order++, a_size <<= 1) ;
2503 resSz = a_size; /* rounded up if necessary */
2504 resp = alloc_pages(page_mask, order);
2505 while ((!resp) && order) {
2506 --order;
2507 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2508 resp = alloc_pages(page_mask, order); /* try half */
2509 resSz = a_size;
2510 }
2511 if (resp) {
2512 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2513 memset(page_address(resp), 0, resSz);
2514 *retSzp = resSz;
2515 }
2516 return resp;
2517}
2518
2519static void
2520sg_page_free(struct page *page, int size)
2521{
2522 int order, a_size;
2523
2524 if (!page)
2525 return;
2526 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2527 order++, a_size <<= 1) ;
2528 __free_pages(page, order);
2529}
2530
2531#ifdef CONFIG_SCSI_PROC_FS 2175#ifdef CONFIG_SCSI_PROC_FS
2532static int 2176static int
2533sg_idr_max_id(int id, void *p, void *data) 2177sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..62b6633e3a97 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
331 331
332static int sr_prep_fn(struct request_queue *q, struct request *rq) 332static int sr_prep_fn(struct request_queue *q, struct request *rq)
333{ 333{
334 int block=0, this_count, s_size, timeout = SR_TIMEOUT; 334 int block = 0, this_count, s_size;
335 struct scsi_cd *cd; 335 struct scsi_cd *cd;
336 struct scsi_cmnd *SCpnt; 336 struct scsi_cmnd *SCpnt;
337 struct scsi_device *sdp = q->queuedata; 337 struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
461 SCpnt->transfersize = cd->device->sector_size; 461 SCpnt->transfersize = cd->device->sector_size;
462 SCpnt->underflow = this_count << 9; 462 SCpnt->underflow = this_count << 9;
463 SCpnt->allowed = MAX_RETRIES; 463 SCpnt->allowed = MAX_RETRIES;
464 SCpnt->timeout_per_command = timeout;
465 464
466 /* 465 /*
467 * This indicates that the command is ready from our end to be 466 * This indicates that the command is ready from our end to be
@@ -472,38 +471,31 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
472 return scsi_prep_return(q, rq, ret); 471 return scsi_prep_return(q, rq, ret);
473} 472}
474 473
475static int sr_block_open(struct inode *inode, struct file *file) 474static int sr_block_open(struct block_device *bdev, fmode_t mode)
476{ 475{
477 struct gendisk *disk = inode->i_bdev->bd_disk; 476 struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk);
478 struct scsi_cd *cd; 477 int ret = -ENXIO;
479 int ret = 0;
480
481 if(!(cd = scsi_cd_get(disk)))
482 return -ENXIO;
483
484 if((ret = cdrom_open(&cd->cdi, inode, file)) != 0)
485 scsi_cd_put(cd);
486 478
479 if (cd) {
480 ret = cdrom_open(&cd->cdi, bdev, mode);
481 if (ret)
482 scsi_cd_put(cd);
483 }
487 return ret; 484 return ret;
488} 485}
489 486
490static int sr_block_release(struct inode *inode, struct file *file) 487static int sr_block_release(struct gendisk *disk, fmode_t mode)
491{ 488{
492 int ret; 489 struct scsi_cd *cd = scsi_cd(disk);
493 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); 490 cdrom_release(&cd->cdi, mode);
494 ret = cdrom_release(&cd->cdi, file);
495 if(ret)
496 return ret;
497
498 scsi_cd_put(cd); 491 scsi_cd_put(cd);
499
500 return 0; 492 return 0;
501} 493}
502 494
503static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, 495static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
504 unsigned long arg) 496 unsigned long arg)
505{ 497{
506 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); 498 struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
507 struct scsi_device *sdev = cd->device; 499 struct scsi_device *sdev = cd->device;
508 void __user *argp = (void __user *)arg; 500 void __user *argp = (void __user *)arg;
509 int ret; 501 int ret;
@@ -518,7 +510,7 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
518 return scsi_ioctl(sdev, cmd, argp); 510 return scsi_ioctl(sdev, cmd, argp);
519 } 511 }
520 512
521 ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); 513 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
522 if (ret != -ENOSYS) 514 if (ret != -ENOSYS)
523 return ret; 515 return ret;
524 516
@@ -528,7 +520,8 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
528 * case fall through to scsi_ioctl, which will return ENDOEV again 520 * case fall through to scsi_ioctl, which will return ENDOEV again
529 * if it doesn't recognise the ioctl 521 * if it doesn't recognise the ioctl
530 */ 522 */
531 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL); 523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
524 (mode & FMODE_NDELAY_NOW) != 0);
532 if (ret != -ENODEV) 525 if (ret != -ENODEV)
533 return ret; 526 return ret;
534 return scsi_ioctl(sdev, cmd, argp); 527 return scsi_ioctl(sdev, cmd, argp);
@@ -545,7 +538,7 @@ static struct block_device_operations sr_bdops =
545 .owner = THIS_MODULE, 538 .owner = THIS_MODULE,
546 .open = sr_block_open, 539 .open = sr_block_open,
547 .release = sr_block_release, 540 .release = sr_block_release,
548 .ioctl = sr_block_ioctl, 541 .locked_ioctl = sr_block_ioctl,
549 .media_changed = sr_block_media_changed, 542 .media_changed = sr_block_media_changed,
550 /* 543 /*
551 * No compat_ioctl for now because sr_block_ioctl never 544 * No compat_ioctl for now because sr_block_ioctl never
@@ -620,6 +613,8 @@ static int sr_probe(struct device *dev)
620 disk->fops = &sr_bdops; 613 disk->fops = &sr_bdops;
621 disk->flags = GENHD_FL_CD; 614 disk->flags = GENHD_FL_CD;
622 615
616 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
617
623 cd->device = sdev; 618 cd->device = sdev;
624 cd->disk = disk; 619 cd->disk = disk;
625 cd->driver = &sr_template; 620 cd->driver = &sr_template;
@@ -878,7 +873,7 @@ static void sr_kref_release(struct kref *kref)
878 struct gendisk *disk = cd->disk; 873 struct gendisk *disk = cd->disk;
879 874
880 spin_lock(&sr_index_lock); 875 spin_lock(&sr_index_lock);
881 clear_bit(disk->first_minor, sr_index_bits); 876 clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
882 spin_unlock(&sr_index_lock); 877 spin_unlock(&sr_index_lock);
883 878
884 unregister_cdrom(&cd->cdi); 879 unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 4eb3da996b36..4ad3e017213f 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -223,9 +223,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
223 no_multi = 1; 223 no_multi = 1;
224 break; 224 break;
225 } 225 }
226 min = BCD2BIN(buffer[15]); 226 min = bcd2bin(buffer[15]);
227 sec = BCD2BIN(buffer[16]); 227 sec = bcd2bin(buffer[16]);
228 frame = BCD2BIN(buffer[17]); 228 frame = bcd2bin(buffer[17]);
229 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; 229 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
230 break; 230 break;
231 } 231 }
@@ -252,9 +252,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
252 } 252 }
253 if (rc != 0) 253 if (rc != 0)
254 break; 254 break;
255 min = BCD2BIN(buffer[1]); 255 min = bcd2bin(buffer[1]);
256 sec = BCD2BIN(buffer[2]); 256 sec = bcd2bin(buffer[2]);
257 frame = BCD2BIN(buffer[3]); 257 frame = bcd2bin(buffer[3]);
258 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; 258 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
259 if (sector) 259 if (sector)
260 sector -= CD_MSF_OFFSET; 260 sector -= CD_MSF_OFFSET;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2bb53e3d941..c959bdc55f4f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3263,7 +3263,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3263 * may try and take the device offline, in which case all further 3263 * may try and take the device offline, in which case all further
3264 * access to the device is prohibited. 3264 * access to the device is prohibited.
3265 */ 3265 */
3266 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file); 3266 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p,
3267 file->f_flags & O_NDELAY);
3267 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV) 3268 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV)
3268 goto out; 3269 goto out;
3269 retval = 0; 3270 retval = 0;
@@ -3567,8 +3568,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3567 !capable(CAP_SYS_RAWIO)) 3568 !capable(CAP_SYS_RAWIO))
3568 i = -EPERM; 3569 i = -EPERM;
3569 else 3570 else
3570 i = scsi_cmd_ioctl(file, STp->disk->queue, 3571 i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
3571 STp->disk, cmd_in, p); 3572 file->f_mode, cmd_in, p);
3572 if (i != -ENOTTY) 3573 if (i != -ENOTTY)
3573 return i; 3574 return i;
3574 break; 3575 break;
@@ -4428,13 +4429,10 @@ static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4428 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4429 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
4429 STp->disk->disk_name, st_formats[i]); 4430 STp->disk->disk_name, st_formats[i]);
4430 st_class_member = 4431 st_class_member =
4431 device_create_drvdata(st_sysfs_class, 4432 device_create(st_sysfs_class, &STp->device->sdev_gendev,
4432 &STp->device->sdev_gendev, 4433 MKDEV(SCSI_TAPE_MAJOR,
4433 MKDEV(SCSI_TAPE_MAJOR, 4434 TAPE_MINOR(dev_num, mode, rew)),
4434 TAPE_MINOR(dev_num, 4435 &STp->modes[mode], "%s", name);
4435 mode, rew)),
4436 &STp->modes[mode],
4437 "%s", name);
4438 if (IS_ERR(st_class_member)) { 4436 if (IS_ERR(st_class_member)) {
4439 printk(KERN_WARNING "st%d: device_create failed\n", 4437 printk(KERN_WARNING "st%d: device_create failed\n",
4440 dev_num); 4438 dev_num);
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 7514b3a0390e..34a99620e5bd 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -213,7 +213,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
213 esp->ops = &sun3x_esp_ops; 213 esp->ops = &sun3x_esp_ops;
214 214
215 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 215 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
216 if (!res && !res->start) 216 if (!res || !res->start)
217 goto fail_unlink; 217 goto fail_unlink;
218 218
219 esp->regs = ioremap_nocache(res->start, 0x20); 219 esp->regs = ioremap_nocache(res->start, 0x20);
@@ -221,7 +221,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
221 goto fail_unmap_regs; 221 goto fail_unmap_regs;
222 222
223 res = platform_get_resource(dev, IORESOURCE_MEM, 1); 223 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
224 if (!res && !res->start) 224 if (!res || !res->start)
225 goto fail_unmap_regs; 225 goto fail_unmap_regs;
226 226
227 esp->dma_regs = ioremap_nocache(res->start, 0x10); 227 esp->dma_regs = ioremap_nocache(res->start, 0x10);
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index f9cf70151366..3d73aad4bc82 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -1,6 +1,6 @@
1/* sun_esp.c: ESP front-end for Sparc SBUS systems. 1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -9,60 +9,70 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/dma-mapping.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
12 15
13#include <asm/irq.h> 16#include <asm/irq.h>
14#include <asm/io.h> 17#include <asm/io.h>
15#include <asm/dma.h> 18#include <asm/dma.h>
16 19
17#include <asm/sbus.h>
18
19#include <scsi/scsi_host.h> 20#include <scsi/scsi_host.h>
20 21
21#include "esp_scsi.h" 22#include "esp_scsi.h"
22 23
23#define DRV_MODULE_NAME "sun_esp" 24#define DRV_MODULE_NAME "sun_esp"
24#define PFX DRV_MODULE_NAME ": " 25#define PFX DRV_MODULE_NAME ": "
25#define DRV_VERSION "1.000" 26#define DRV_VERSION "1.100"
26#define DRV_MODULE_RELDATE "April 19, 2007" 27#define DRV_MODULE_RELDATE "August 27, 2008"
27 28
28#define dma_read32(REG) \ 29#define dma_read32(REG) \
29 sbus_readl(esp->dma_regs + (REG)) 30 sbus_readl(esp->dma_regs + (REG))
30#define dma_write32(VAL, REG) \ 31#define dma_write32(VAL, REG) \
31 sbus_writel((VAL), esp->dma_regs + (REG)) 32 sbus_writel((VAL), esp->dma_regs + (REG))
32 33
33static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) 34/* DVMA chip revisions */
34{ 35enum dvma_rev {
35 struct sbus_dev *sdev = esp->dev; 36 dvmarev0,
36 struct sbus_dma *dma; 37 dvmaesc1,
38 dvmarev1,
39 dvmarev2,
40 dvmarev3,
41 dvmarevplus,
42 dvmahme
43};
37 44
38 if (dma_sdev != NULL) { 45static int __devinit esp_sbus_setup_dma(struct esp *esp,
39 for_each_dvma(dma) { 46 struct of_device *dma_of)
40 if (dma->sdev == dma_sdev) 47{
41 break; 48 esp->dma = dma_of;
42 }
43 } else {
44 for_each_dvma(dma) {
45 if (dma->sdev == NULL)
46 break;
47 49
48 /* If bus + slot are the same and it has the 50 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
49 * correct OBP name, it's ours. 51 resource_size(&dma_of->resource[0]),
50 */ 52 "espdma");
51 if (sdev->bus == dma->sdev->bus && 53 if (!esp->dma_regs)
52 sdev->slot == dma->sdev->slot && 54 return -ENOMEM;
53 (!strcmp(dma->sdev->prom_name, "dma") ||
54 !strcmp(dma->sdev->prom_name, "espdma")))
55 break;
56 }
57 }
58 55
59 if (dma == NULL) { 56 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
60 printk(KERN_ERR PFX "[%s] Cannot find dma.\n", 57 case DMA_VERS0:
61 sdev->ofdev.node->full_name); 58 esp->dmarev = dvmarev0;
62 return -ENODEV; 59 break;
60 case DMA_ESCV1:
61 esp->dmarev = dvmaesc1;
62 break;
63 case DMA_VERS1:
64 esp->dmarev = dvmarev1;
65 break;
66 case DMA_VERS2:
67 esp->dmarev = dvmarev2;
68 break;
69 case DMA_VERHME:
70 esp->dmarev = dvmahme;
71 break;
72 case DMA_VERSPLUS:
73 esp->dmarev = dvmarevplus;
74 break;
63 } 75 }
64 esp->dma = dma;
65 esp->dma_regs = dma->regs;
66 76
67 return 0; 77 return 0;
68 78
@@ -70,18 +80,18 @@ static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sde
70 80
71static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) 81static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
72{ 82{
73 struct sbus_dev *sdev = esp->dev; 83 struct of_device *op = esp->dev;
74 struct resource *res; 84 struct resource *res;
75 85
76 /* On HME, two reg sets exist, first is DVMA, 86 /* On HME, two reg sets exist, first is DVMA,
77 * second is ESP registers. 87 * second is ESP registers.
78 */ 88 */
79 if (hme) 89 if (hme)
80 res = &sdev->resource[1]; 90 res = &op->resource[1];
81 else 91 else
82 res = &sdev->resource[0]; 92 res = &op->resource[0];
83 93
84 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); 94 esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
85 if (!esp->regs) 95 if (!esp->regs)
86 return -ENOMEM; 96 return -ENOMEM;
87 97
@@ -90,10 +100,11 @@ static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
90 100
91static int __devinit esp_sbus_map_command_block(struct esp *esp) 101static int __devinit esp_sbus_map_command_block(struct esp *esp)
92{ 102{
93 struct sbus_dev *sdev = esp->dev; 103 struct of_device *op = esp->dev;
94 104
95 esp->command_block = sbus_alloc_consistent(sdev, 16, 105 esp->command_block = dma_alloc_coherent(&op->dev, 16,
96 &esp->command_block_dma); 106 &esp->command_block_dma,
107 GFP_ATOMIC);
97 if (!esp->command_block) 108 if (!esp->command_block)
98 return -ENOMEM; 109 return -ENOMEM;
99 return 0; 110 return 0;
@@ -102,17 +113,18 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp)
102static int __devinit esp_sbus_register_irq(struct esp *esp) 113static int __devinit esp_sbus_register_irq(struct esp *esp)
103{ 114{
104 struct Scsi_Host *host = esp->host; 115 struct Scsi_Host *host = esp->host;
105 struct sbus_dev *sdev = esp->dev; 116 struct of_device *op = esp->dev;
106 117
107 host->irq = sdev->irqs[0]; 118 host->irq = op->irqs[0];
108 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 119 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
109} 120}
110 121
111static void __devinit esp_get_scsi_id(struct esp *esp) 122static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
112{ 123{
113 struct sbus_dev *sdev = esp->dev; 124 struct of_device *op = esp->dev;
114 struct device_node *dp = sdev->ofdev.node; 125 struct device_node *dp;
115 126
127 dp = op->node;
116 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); 128 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
117 if (esp->scsi_id != 0xff) 129 if (esp->scsi_id != 0xff)
118 goto done; 130 goto done;
@@ -121,13 +133,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp)
121 if (esp->scsi_id != 0xff) 133 if (esp->scsi_id != 0xff)
122 goto done; 134 goto done;
123 135
124 if (!sdev->bus) { 136 esp->scsi_id = of_getintprop_default(espdma->node,
125 /* SUN4 */
126 esp->scsi_id = 7;
127 goto done;
128 }
129
130 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
131 "scsi-initiator-id", 7); 137 "scsi-initiator-id", 7);
132 138
133done: 139done:
@@ -137,9 +143,10 @@ done:
137 143
138static void __devinit esp_get_differential(struct esp *esp) 144static void __devinit esp_get_differential(struct esp *esp)
139{ 145{
140 struct sbus_dev *sdev = esp->dev; 146 struct of_device *op = esp->dev;
141 struct device_node *dp = sdev->ofdev.node; 147 struct device_node *dp;
142 148
149 dp = op->node;
143 if (of_find_property(dp, "differential", NULL)) 150 if (of_find_property(dp, "differential", NULL))
144 esp->flags |= ESP_FLAG_DIFFERENTIAL; 151 esp->flags |= ESP_FLAG_DIFFERENTIAL;
145 else 152 else
@@ -148,43 +155,36 @@ static void __devinit esp_get_differential(struct esp *esp)
148 155
149static void __devinit esp_get_clock_params(struct esp *esp) 156static void __devinit esp_get_clock_params(struct esp *esp)
150{ 157{
151 struct sbus_dev *sdev = esp->dev; 158 struct of_device *op = esp->dev;
152 struct device_node *dp = sdev->ofdev.node; 159 struct device_node *bus_dp, *dp;
153 struct device_node *bus_dp;
154 int fmhz; 160 int fmhz;
155 161
156 bus_dp = NULL; 162 dp = op->node;
157 if (sdev != NULL && sdev->bus != NULL) 163 bus_dp = dp->parent;
158 bus_dp = sdev->bus->ofdev.node;
159 164
160 fmhz = of_getintprop_default(dp, "clock-frequency", 0); 165 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
161 if (fmhz == 0) 166 if (fmhz == 0)
162 fmhz = (!bus_dp) ? 0 : 167 fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
163 of_getintprop_default(bus_dp, "clock-frequency", 0);
164 168
165 esp->cfreq = fmhz; 169 esp->cfreq = fmhz;
166} 170}
167 171
168static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) 172static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
169{ 173{
170 struct sbus_dev *sdev = esp->dev; 174 struct device_node *dma_dp = dma_of->node;
171 struct device_node *dp = sdev->ofdev.node; 175 struct of_device *op = esp->dev;
172 u8 bursts; 176 struct device_node *dp;
177 u8 bursts, val;
173 178
179 dp = op->node;
174 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 180 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
175 if (dma) { 181 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
176 struct device_node *dma_dp = dma->ofdev.node; 182 if (val != 0xff)
177 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 183 bursts &= val;
178 if (val != 0xff)
179 bursts &= val;
180 }
181 184
182 if (sdev->bus) { 185 val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
183 u8 val = of_getintprop_default(sdev->bus->ofdev.node, 186 if (val != 0xff)
184 "burst-sizes", 0xff); 187 bursts &= val;
185 if (val != 0xff)
186 bursts &= val;
187 }
188 188
189 if (bursts == 0xff || 189 if (bursts == 0xff ||
190 (bursts & DMA_BURST16) == 0 || 190 (bursts & DMA_BURST16) == 0 ||
@@ -194,9 +194,9 @@ static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
194 esp->bursts = bursts; 194 esp->bursts = bursts;
195} 195}
196 196
197static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) 197static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
198{ 198{
199 esp_get_scsi_id(esp); 199 esp_get_scsi_id(esp, espdma);
200 esp_get_differential(esp); 200 esp_get_differential(esp);
201 esp_get_clock_params(esp); 201 esp_get_clock_params(esp);
202 esp_get_bursts(esp, espdma); 202 esp_get_bursts(esp, espdma);
@@ -215,25 +215,33 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, 215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
216 size_t sz, int dir) 216 size_t sz, int dir)
217{ 217{
218 return sbus_map_single(esp->dev, buf, sz, dir); 218 struct of_device *op = esp->dev;
219
220 return dma_map_single(&op->dev, buf, sz, dir);
219} 221}
220 222
221static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, 223static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
222 int num_sg, int dir) 224 int num_sg, int dir)
223{ 225{
224 return sbus_map_sg(esp->dev, sg, num_sg, dir); 226 struct of_device *op = esp->dev;
227
228 return dma_map_sg(&op->dev, sg, num_sg, dir);
225} 229}
226 230
227static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, 231static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
228 size_t sz, int dir) 232 size_t sz, int dir)
229{ 233{
230 sbus_unmap_single(esp->dev, addr, sz, dir); 234 struct of_device *op = esp->dev;
235
236 dma_unmap_single(&op->dev, addr, sz, dir);
231} 237}
232 238
233static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 239static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
234 int num_sg, int dir) 240 int num_sg, int dir)
235{ 241{
236 sbus_unmap_sg(esp->dev, sg, num_sg, dir); 242 struct of_device *op = esp->dev;
243
244 dma_unmap_sg(&op->dev, sg, num_sg, dir);
237} 245}
238 246
239static int sbus_esp_irq_pending(struct esp *esp) 247static int sbus_esp_irq_pending(struct esp *esp)
@@ -247,24 +255,26 @@ static void sbus_esp_reset_dma(struct esp *esp)
247{ 255{
248 int can_do_burst16, can_do_burst32, can_do_burst64; 256 int can_do_burst16, can_do_burst32, can_do_burst64;
249 int can_do_sbus64, lim; 257 int can_do_sbus64, lim;
258 struct of_device *op;
250 u32 val; 259 u32 val;
251 260
252 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; 261 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
253 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; 262 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
254 can_do_burst64 = 0; 263 can_do_burst64 = 0;
255 can_do_sbus64 = 0; 264 can_do_sbus64 = 0;
256 if (sbus_can_dma_64bit(esp->dev)) 265 op = esp->dev;
266 if (sbus_can_dma_64bit())
257 can_do_sbus64 = 1; 267 can_do_sbus64 = 1;
258 if (sbus_can_burst64(esp->sdev)) 268 if (sbus_can_burst64())
259 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; 269 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
260 270
261 /* Put the DVMA into a known state. */ 271 /* Put the DVMA into a known state. */
262 if (esp->dma->revision != dvmahme) { 272 if (esp->dmarev != dvmahme) {
263 val = dma_read32(DMA_CSR); 273 val = dma_read32(DMA_CSR);
264 dma_write32(val | DMA_RST_SCSI, DMA_CSR); 274 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
265 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 275 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
266 } 276 }
267 switch (esp->dma->revision) { 277 switch (esp->dmarev) {
268 case dvmahme: 278 case dvmahme:
269 dma_write32(DMA_RESET_FAS366, DMA_CSR); 279 dma_write32(DMA_RESET_FAS366, DMA_CSR);
270 dma_write32(DMA_RST_SCSI, DMA_CSR); 280 dma_write32(DMA_RST_SCSI, DMA_CSR);
@@ -282,7 +292,7 @@ static void sbus_esp_reset_dma(struct esp *esp)
282 292
283 if (can_do_sbus64) { 293 if (can_do_sbus64) {
284 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; 294 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
285 sbus_set_sbus64(esp->dev, esp->bursts); 295 sbus_set_sbus64(&op->dev, esp->bursts);
286 } 296 }
287 297
288 lim = 1000; 298 lim = 1000;
@@ -346,14 +356,14 @@ static void sbus_esp_dma_drain(struct esp *esp)
346 u32 csr; 356 u32 csr;
347 int lim; 357 int lim;
348 358
349 if (esp->dma->revision == dvmahme) 359 if (esp->dmarev == dvmahme)
350 return; 360 return;
351 361
352 csr = dma_read32(DMA_CSR); 362 csr = dma_read32(DMA_CSR);
353 if (!(csr & DMA_FIFO_ISDRAIN)) 363 if (!(csr & DMA_FIFO_ISDRAIN))
354 return; 364 return;
355 365
356 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) 366 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
357 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 367 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
358 368
359 lim = 1000; 369 lim = 1000;
@@ -369,7 +379,7 @@ static void sbus_esp_dma_drain(struct esp *esp)
369 379
370static void sbus_esp_dma_invalidate(struct esp *esp) 380static void sbus_esp_dma_invalidate(struct esp *esp)
371{ 381{
372 if (esp->dma->revision == dvmahme) { 382 if (esp->dmarev == dvmahme) {
373 dma_write32(DMA_RST_SCSI, DMA_CSR); 383 dma_write32(DMA_RST_SCSI, DMA_CSR);
374 384
375 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | 385 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
@@ -440,7 +450,7 @@ static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
440 else 450 else
441 csr &= ~DMA_ST_WRITE; 451 csr &= ~DMA_ST_WRITE;
442 dma_write32(csr, DMA_CSR); 452 dma_write32(csr, DMA_CSR);
443 if (esp->dma->revision == dvmaesc1) { 453 if (esp->dmarev == dvmaesc1) {
444 u32 end = PAGE_ALIGN(addr + dma_count + 16U); 454 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
445 dma_write32(end - addr, DMA_COUNT); 455 dma_write32(end - addr, DMA_COUNT);
446 } 456 }
@@ -476,10 +486,8 @@ static const struct esp_driver_ops sbus_esp_ops = {
476 .dma_error = sbus_esp_dma_error, 486 .dma_error = sbus_esp_dma_error,
477}; 487};
478 488
479static int __devinit esp_sbus_probe_one(struct device *dev, 489static int __devinit esp_sbus_probe_one(struct of_device *op,
480 struct sbus_dev *esp_dev, 490 struct of_device *espdma,
481 struct sbus_dev *espdma,
482 struct sbus_bus *sbus,
483 int hme) 491 int hme)
484{ 492{
485 struct scsi_host_template *tpnt = &scsi_esp_template; 493 struct scsi_host_template *tpnt = &scsi_esp_template;
@@ -497,13 +505,13 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
497 esp = shost_priv(host); 505 esp = shost_priv(host);
498 506
499 esp->host = host; 507 esp->host = host;
500 esp->dev = esp_dev; 508 esp->dev = op;
501 esp->ops = &sbus_esp_ops; 509 esp->ops = &sbus_esp_ops;
502 510
503 if (hme) 511 if (hme)
504 esp->flags |= ESP_FLAG_WIDE_CAPABLE; 512 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
505 513
506 err = esp_sbus_find_dma(esp, espdma); 514 err = esp_sbus_setup_dma(esp, espdma);
507 if (err < 0) 515 if (err < 0)
508 goto fail_unlink; 516 goto fail_unlink;
509 517
@@ -525,15 +533,15 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
525 * come up with the reset bit set, so make sure that 533 * come up with the reset bit set, so make sure that
526 * is clear first. 534 * is clear first.
527 */ 535 */
528 if (esp->dma->revision == dvmaesc1) { 536 if (esp->dmarev == dvmaesc1) {
529 u32 val = dma_read32(DMA_CSR); 537 u32 val = dma_read32(DMA_CSR);
530 538
531 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 539 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
532 } 540 }
533 541
534 dev_set_drvdata(&esp_dev->ofdev.dev, esp); 542 dev_set_drvdata(&op->dev, esp);
535 543
536 err = scsi_esp_register(esp, dev); 544 err = scsi_esp_register(esp, &op->dev);
537 if (err) 545 if (err)
538 goto fail_free_irq; 546 goto fail_free_irq;
539 547
@@ -542,41 +550,46 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
542fail_free_irq: 550fail_free_irq:
543 free_irq(host->irq, esp); 551 free_irq(host->irq, esp);
544fail_unmap_command_block: 552fail_unmap_command_block:
545 sbus_free_consistent(esp->dev, 16, 553 dma_free_coherent(&op->dev, 16,
546 esp->command_block, 554 esp->command_block,
547 esp->command_block_dma); 555 esp->command_block_dma);
548fail_unmap_regs: 556fail_unmap_regs:
549 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 557 of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
550fail_unlink: 558fail_unlink:
551 scsi_host_put(host); 559 scsi_host_put(host);
552fail: 560fail:
553 return err; 561 return err;
554} 562}
555 563
556static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) 564static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
557{ 565{
558 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 566 struct device_node *dma_node = NULL;
559 struct device_node *dp = dev->node; 567 struct device_node *dp = op->node;
560 struct sbus_dev *dma_sdev = NULL; 568 struct of_device *dma_of = NULL;
561 int hme = 0; 569 int hme = 0;
562 570
563 if (dp->parent && 571 if (dp->parent &&
564 (!strcmp(dp->parent->name, "espdma") || 572 (!strcmp(dp->parent->name, "espdma") ||
565 !strcmp(dp->parent->name, "dma"))) 573 !strcmp(dp->parent->name, "dma")))
566 dma_sdev = sdev->parent; 574 dma_node = dp->parent;
567 else if (!strcmp(dp->name, "SUNW,fas")) { 575 else if (!strcmp(dp->name, "SUNW,fas")) {
568 dma_sdev = sdev; 576 dma_node = op->node;
569 hme = 1; 577 hme = 1;
570 } 578 }
579 if (dma_node)
580 dma_of = of_find_device_by_node(dma_node);
581 if (!dma_of)
582 return -ENODEV;
571 583
572 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, 584 return esp_sbus_probe_one(op, dma_of, hme);
573 sdev->bus, hme);
574} 585}
575 586
576static int __devexit esp_sbus_remove(struct of_device *dev) 587static int __devexit esp_sbus_remove(struct of_device *op)
577{ 588{
578 struct esp *esp = dev_get_drvdata(&dev->dev); 589 struct esp *esp = dev_get_drvdata(&op->dev);
590 struct of_device *dma_of = esp->dma;
579 unsigned int irq = esp->host->irq; 591 unsigned int irq = esp->host->irq;
592 bool is_hme;
580 u32 val; 593 u32 val;
581 594
582 scsi_esp_unregister(esp); 595 scsi_esp_unregister(esp);
@@ -586,17 +599,25 @@ static int __devexit esp_sbus_remove(struct of_device *dev)
586 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); 599 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
587 600
588 free_irq(irq, esp); 601 free_irq(irq, esp);
589 sbus_free_consistent(esp->dev, 16, 602
590 esp->command_block, 603 is_hme = (esp->dmarev == dvmahme);
591 esp->command_block_dma); 604
592 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 605 dma_free_coherent(&op->dev, 16,
606 esp->command_block,
607 esp->command_block_dma);
608 of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
609 SBUS_ESP_REG_SIZE);
610 of_iounmap(&dma_of->resource[0], esp->dma_regs,
611 resource_size(&dma_of->resource[0]));
593 612
594 scsi_host_put(esp->host); 613 scsi_host_put(esp->host);
595 614
615 dev_set_drvdata(&op->dev, NULL);
616
596 return 0; 617 return 0;
597} 618}
598 619
599static struct of_device_id esp_match[] = { 620static const struct of_device_id esp_match[] = {
600 { 621 {
601 .name = "SUNW,esp", 622 .name = "SUNW,esp",
602 }, 623 },
@@ -619,7 +640,7 @@ static struct of_platform_driver esp_sbus_driver = {
619 640
620static int __init sunesp_init(void) 641static int __init sunesp_init(void)
621{ 642{
622 return of_register_driver(&esp_sbus_driver, &sbus_bus_type); 643 return of_register_driver(&esp_sbus_driver, &of_bus_type);
623} 644}
624 645
625static void __exit sunesp_exit(void) 646static void __exit sunesp_exit(void)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669b..f4e6cde1fd0d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
519 * Shorten our settle_time if needed for 519 * Shorten our settle_time if needed for
520 * this command not to time out. 520 * this command not to time out.
521 */ 521 */
522 if (np->s.settle_time_valid && cmd->timeout_per_command) { 522 if (np->s.settle_time_valid && cmd->request->timeout) {
523 unsigned long tlimit = jiffies + cmd->timeout_per_command; 523 unsigned long tlimit = jiffies + cmd->request->timeout;
524 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 524 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
525 if (time_after(np->s.settle_time, tlimit)) { 525 if (time_after(np->s.settle_time, tlimit)) {
526 np->s.settle_time = tlimit; 526 np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3f..69ac6e590f1d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering)
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n" 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 "\twith \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 2578
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2580 tmscsim[0] = 7;