diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-18 19:38:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-18 19:38:59 -0400 |
commit | 675e0655c12209ba1f40af0dff7cd76b17a1315c (patch) | |
tree | c29b8ddd6fdbd66161e7150feee566daaebe36d3 | |
parent | d974f09ea4970d0299a8267111312b80adbd20e6 (diff) | |
parent | e7ca7f9fa2cda220ba807620c992ce77c33a32ea (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"First round of SCSI updates for the 4.6+ merge window.
This batch includes the usual quota of driver updates (bnx2fc, mp3sas,
hpsa, ncr5380, lpfc, hisi_sas, snic, aacraid, megaraid_sas). There's
also a multiqueue update for scsi_debug, assorted bug fixes and a few
other minor updates (refactor of scsi_sg_pools into generic code, alua
and VPD updates, and struct timeval conversions)"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (138 commits)
mpt3sas: Used "synchronize_irq()"API to synchronize timed-out IO & TMs
mpt3sas: Set maximum transfer length per IO to 4MB for VDs
mpt3sas: Updating mpt3sas driver version to 13.100.00.00
mpt3sas: Fix initial Reference tag field for 4K PI drives.
mpt3sas: Handle active cable exception event
mpt3sas: Update MPI header to 2.00.42
Revert "lpfc: Delete unnecessary checks before the function call mempool_destroy"
eata_pio: missing break statement
hpsa: Fix type ZBC conditional checks
scsi_lib: Decode T10 vendor IDs
scsi_dh_alua: do not fail for unknown VPD identification
scsi_debug: use locally assigned naa
scsi_debug: uuid for lu name
scsi_debug: vpd and mode page work
scsi_debug: add multiple queue support
bfa: fix bfa_fcb_itnim_alloc() error handling
megaraid_sas: Downgrade two success messages to info
cxlflash: Fix to resolve dead-lock during EEH recovery
scsi_debug: rework resp_report_luns
scsi_debug: use pdt constants
...
117 files changed, 4839 insertions, 6481 deletions
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt index 3b80f567f818..fd880150aeea 100644 --- a/Documentation/scsi/g_NCR5380.txt +++ b/Documentation/scsi/g_NCR5380.txt | |||
@@ -23,11 +23,10 @@ supported by the driver. | |||
23 | 23 | ||
24 | If the default configuration does not work for you, you can use the kernel | 24 | If the default configuration does not work for you, you can use the kernel |
25 | command lines (eg using the lilo append command): | 25 | command lines (eg using the lilo append command): |
26 | ncr5380=port,irq,dma | 26 | ncr5380=addr,irq |
27 | ncr53c400=port,irq | 27 | ncr53c400=addr,irq |
28 | or | 28 | ncr53c400a=addr,irq |
29 | ncr5380=base,irq,dma | 29 | dtc3181e=addr,irq |
30 | ncr53c400=base,irq | ||
31 | 30 | ||
32 | The driver does not probe for any addresses or ports other than those in | 31 | The driver does not probe for any addresses or ports other than those in |
33 | the OVERRIDE or given to the kernel as above. | 32 | the OVERRIDE or given to the kernel as above. |
@@ -36,19 +35,17 @@ This driver provides some information on what it has detected in | |||
36 | /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot | 35 | /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot |
37 | time. More info to come in the future. | 36 | time. More info to come in the future. |
38 | 37 | ||
39 | When NCR53c400 support is compiled in, BIOS parameters will be returned by | ||
40 | the driver (the raw 5380 driver does not and I don't plan to fiddle with | ||
41 | it!). | ||
42 | |||
43 | This driver works as a module. | 38 | This driver works as a module. |
44 | When included as a module, parameters can be passed on the insmod/modprobe | 39 | When included as a module, parameters can be passed on the insmod/modprobe |
45 | command line: | 40 | command line: |
46 | ncr_irq=xx the interrupt | 41 | ncr_irq=xx the interrupt |
47 | ncr_addr=xx the port or base address (for port or memory | 42 | ncr_addr=xx the port or base address (for port or memory |
48 | mapped, resp.) | 43 | mapped, resp.) |
49 | ncr_dma=xx the DMA | ||
50 | ncr_5380=1 to set up for a NCR5380 board | 44 | ncr_5380=1 to set up for a NCR5380 board |
51 | ncr_53c400=1 to set up for a NCR53C400 board | 45 | ncr_53c400=1 to set up for a NCR53C400 board |
46 | ncr_53c400a=1 to set up for a NCR53C400A board | ||
47 | dtc_3181e=1 to set up for a Domex Technology Corp 3181E board | ||
48 | hp_c2502=1 to set up for a Hewlett Packard C2502 board | ||
52 | e.g. | 49 | e.g. |
53 | modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 | 50 | modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 |
54 | for a port mapped NCR5380 board or | 51 | for a port mapped NCR5380 board or |
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt index 2bfd6f6d2d3d..1241ac11edb1 100644 --- a/Documentation/scsi/scsi-parameters.txt +++ b/Documentation/scsi/scsi-parameters.txt | |||
@@ -27,13 +27,15 @@ parameters may be changed at runtime by the command | |||
27 | aic79xx= [HW,SCSI] | 27 | aic79xx= [HW,SCSI] |
28 | See Documentation/scsi/aic79xx.txt. | 28 | See Documentation/scsi/aic79xx.txt. |
29 | 29 | ||
30 | atascsi= [HW,SCSI] Atari SCSI | 30 | atascsi= [HW,SCSI] |
31 | See drivers/scsi/atari_scsi.c. | ||
31 | 32 | ||
32 | BusLogic= [HW,SCSI] | 33 | BusLogic= [HW,SCSI] |
33 | See drivers/scsi/BusLogic.c, comment before function | 34 | See drivers/scsi/BusLogic.c, comment before function |
34 | BusLogic_ParseDriverOptions(). | 35 | BusLogic_ParseDriverOptions(). |
35 | 36 | ||
36 | dtc3181e= [HW,SCSI] | 37 | dtc3181e= [HW,SCSI] |
38 | See Documentation/scsi/g_NCR5380.txt. | ||
37 | 39 | ||
38 | eata= [HW,SCSI] | 40 | eata= [HW,SCSI] |
39 | 41 | ||
@@ -51,8 +53,8 @@ parameters may be changed at runtime by the command | |||
51 | ips= [HW,SCSI] Adaptec / IBM ServeRAID controller | 53 | ips= [HW,SCSI] Adaptec / IBM ServeRAID controller |
52 | See header of drivers/scsi/ips.c. | 54 | See header of drivers/scsi/ips.c. |
53 | 55 | ||
54 | mac5380= [HW,SCSI] Format: | 56 | mac5380= [HW,SCSI] |
55 | <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> | 57 | See drivers/scsi/mac_scsi.c. |
56 | 58 | ||
57 | max_luns= [SCSI] Maximum number of LUNs to probe. | 59 | max_luns= [SCSI] Maximum number of LUNs to probe. |
58 | Should be between 1 and 2^32-1. | 60 | Should be between 1 and 2^32-1. |
@@ -65,10 +67,13 @@ parameters may be changed at runtime by the command | |||
65 | See header of drivers/scsi/NCR_D700.c. | 67 | See header of drivers/scsi/NCR_D700.c. |
66 | 68 | ||
67 | ncr5380= [HW,SCSI] | 69 | ncr5380= [HW,SCSI] |
70 | See Documentation/scsi/g_NCR5380.txt. | ||
68 | 71 | ||
69 | ncr53c400= [HW,SCSI] | 72 | ncr53c400= [HW,SCSI] |
73 | See Documentation/scsi/g_NCR5380.txt. | ||
70 | 74 | ||
71 | ncr53c400a= [HW,SCSI] | 75 | ncr53c400a= [HW,SCSI] |
76 | See Documentation/scsi/g_NCR5380.txt. | ||
72 | 77 | ||
73 | ncr53c406a= [HW,SCSI] | 78 | ncr53c406a= [HW,SCSI] |
74 | 79 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 804bc4fd154f..c7dd1a3401e5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7593,10 +7593,10 @@ M: Michael Schmitz <schmitzmic@gmail.com> | |||
7593 | L: linux-scsi@vger.kernel.org | 7593 | L: linux-scsi@vger.kernel.org |
7594 | S: Maintained | 7594 | S: Maintained |
7595 | F: Documentation/scsi/g_NCR5380.txt | 7595 | F: Documentation/scsi/g_NCR5380.txt |
7596 | F: Documentation/scsi/dtc3x80.txt | ||
7596 | F: drivers/scsi/NCR5380.* | 7597 | F: drivers/scsi/NCR5380.* |
7597 | F: drivers/scsi/arm/cumana_1.c | 7598 | F: drivers/scsi/arm/cumana_1.c |
7598 | F: drivers/scsi/arm/oak.c | 7599 | F: drivers/scsi/arm/oak.c |
7599 | F: drivers/scsi/atari_NCR5380.c | ||
7600 | F: drivers/scsi/atari_scsi.* | 7600 | F: drivers/scsi/atari_scsi.* |
7601 | F: drivers/scsi/dmx3191d.c | 7601 | F: drivers/scsi/dmx3191d.c |
7602 | F: drivers/scsi/dtc.* | 7602 | F: drivers/scsi/dtc.* |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index d7c732042a4f..188f2f2eb21f 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -294,7 +294,7 @@ static int icside_dma_init(struct pata_icside_info *info) | |||
294 | 294 | ||
295 | static struct scsi_host_template pata_icside_sht = { | 295 | static struct scsi_host_template pata_icside_sht = { |
296 | ATA_BASE_SHT(DRV_NAME), | 296 | ATA_BASE_SHT(DRV_NAME), |
297 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 297 | .sg_tablesize = SG_MAX_SEGMENTS, |
298 | .dma_boundary = IOMD_DMA_BOUNDARY, | 298 | .dma_boundary = IOMD_DMA_BOUNDARY, |
299 | }; | 299 | }; |
300 | 300 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b6bf20496021..369a75e1f44e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(cmd_sg_entries, | |||
81 | 81 | ||
82 | module_param(indirect_sg_entries, uint, 0444); | 82 | module_param(indirect_sg_entries, uint, 0444); |
83 | MODULE_PARM_DESC(indirect_sg_entries, | 83 | MODULE_PARM_DESC(indirect_sg_entries, |
84 | "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); | 84 | "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); |
85 | 85 | ||
86 | module_param(allow_ext_sg, bool, 0444); | 86 | module_param(allow_ext_sg, bool, 0444); |
87 | MODULE_PARM_DESC(allow_ext_sg, | 87 | MODULE_PARM_DESC(allow_ext_sg, |
@@ -2819,7 +2819,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |||
2819 | spin_unlock(&host->target_lock); | 2819 | spin_unlock(&host->target_lock); |
2820 | 2820 | ||
2821 | scsi_scan_target(&target->scsi_host->shost_gendev, | 2821 | scsi_scan_target(&target->scsi_host->shost_gendev, |
2822 | 0, target->scsi_id, SCAN_WILD_CARD, 0); | 2822 | 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); |
2823 | 2823 | ||
2824 | if (srp_connected_ch(target) < target->ch_count || | 2824 | if (srp_connected_ch(target) < target->ch_count || |
2825 | target->qp_in_error) { | 2825 | target->qp_in_error) { |
@@ -3097,7 +3097,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) | |||
3097 | 3097 | ||
3098 | case SRP_OPT_SG_TABLESIZE: | 3098 | case SRP_OPT_SG_TABLESIZE: |
3099 | if (match_int(args, &token) || token < 1 || | 3099 | if (match_int(args, &token) || token < 1 || |
3100 | token > SCSI_MAX_SG_CHAIN_SEGMENTS) { | 3100 | token > SG_MAX_SEGMENTS) { |
3101 | pr_warn("bad max sg_tablesize parameter '%s'\n", | 3101 | pr_warn("bad max sg_tablesize parameter '%s'\n", |
3102 | p); | 3102 | p); |
3103 | goto out; | 3103 | goto out; |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7ebccfa8072a..7ee1667acde4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -2281,7 +2281,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2281 | 2281 | ||
2282 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), | 2282 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), |
2283 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); | 2283 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); |
2284 | if (!dma_addr_out) | 2284 | if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out)) |
2285 | goto put_mf; | 2285 | goto put_mf; |
2286 | ioc->add_sge(psge, flagsLength, dma_addr_out); | 2286 | ioc->add_sge(psge, flagsLength, dma_addr_out); |
2287 | psge += ioc->SGE_size; | 2287 | psge += ioc->SGE_size; |
@@ -2296,7 +2296,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2296 | flagsLength |= blk_rq_bytes(rsp) + 4; | 2296 | flagsLength |= blk_rq_bytes(rsp) + 4; |
2297 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), | 2297 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), |
2298 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); | 2298 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); |
2299 | if (!dma_addr_in) | 2299 | if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in)) |
2300 | goto unmap; | 2300 | goto unmap; |
2301 | ioc->add_sge(psge, flagsLength, dma_addr_in); | 2301 | ioc->add_sge(psge, flagsLength, dma_addr_in); |
2302 | 2302 | ||
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 613231c16194..031e088edb5e 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
@@ -1150,7 +1150,7 @@ static void mpt_work_wrapper(struct work_struct *work) | |||
1150 | } | 1150 | } |
1151 | shost_printk(KERN_INFO, shost, MYIOC_s_FMT | 1151 | shost_printk(KERN_INFO, shost, MYIOC_s_FMT |
1152 | "Integrated RAID detects new device %d\n", ioc->name, disk); | 1152 | "Integrated RAID detects new device %d\n", ioc->name, disk); |
1153 | scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); | 1153 | scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | 1156 | ||
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 157d3d203ba1..9310a547b89f 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c | |||
@@ -26,7 +26,8 @@ void zfcp_unit_scsi_scan(struct zfcp_unit *unit) | |||
26 | lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); | 26 | lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); |
27 | 27 | ||
28 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) | 28 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) |
29 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); | 29 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, |
30 | SCSI_SCAN_MANUAL); | ||
30 | } | 31 | } |
31 | 32 | ||
32 | static void zfcp_unit_scsi_scan_work(struct work_struct *work) | 33 | static void zfcp_unit_scsi_scan_work(struct work_struct *work) |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e80768f8e579..98e5d51a3346 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -17,6 +17,7 @@ config SCSI | |||
17 | tristate "SCSI device support" | 17 | tristate "SCSI device support" |
18 | depends on BLOCK | 18 | depends on BLOCK |
19 | select SCSI_DMA if HAS_DMA | 19 | select SCSI_DMA if HAS_DMA |
20 | select SG_POOL | ||
20 | ---help--- | 21 | ---help--- |
21 | If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or | 22 | If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or |
22 | any other SCSI device under Linux, say Y and make sure that you know | 23 | any other SCSI device under Linux, say Y and make sure that you know |
@@ -202,12 +203,12 @@ config SCSI_ENCLOSURE | |||
202 | certain enclosure conditions to be reported and is not required. | 203 | certain enclosure conditions to be reported and is not required. |
203 | 204 | ||
204 | config SCSI_CONSTANTS | 205 | config SCSI_CONSTANTS |
205 | bool "Verbose SCSI error reporting (kernel size +=75K)" | 206 | bool "Verbose SCSI error reporting (kernel size += 36K)" |
206 | depends on SCSI | 207 | depends on SCSI |
207 | help | 208 | help |
208 | The error messages regarding your SCSI hardware will be easier to | 209 | The error messages regarding your SCSI hardware will be easier to |
209 | understand if you say Y here; it will enlarge your kernel by about | 210 | understand if you say Y here; it will enlarge your kernel by about |
210 | 75 KB. If in doubt, say Y. | 211 | 36 KB. If in doubt, say Y. |
211 | 212 | ||
212 | config SCSI_LOGGING | 213 | config SCSI_LOGGING |
213 | bool "SCSI logging facility" | 214 | bool "SCSI logging facility" |
@@ -813,17 +814,6 @@ config SCSI_GENERIC_NCR5380_MMIO | |||
813 | To compile this driver as a module, choose M here: the | 814 | To compile this driver as a module, choose M here: the |
814 | module will be called g_NCR5380_mmio. | 815 | module will be called g_NCR5380_mmio. |
815 | 816 | ||
816 | config SCSI_GENERIC_NCR53C400 | ||
817 | bool "Enable NCR53c400 extensions" | ||
818 | depends on SCSI_GENERIC_NCR5380 | ||
819 | help | ||
820 | This enables certain optimizations for the NCR53c400 SCSI cards. | ||
821 | You might as well try it out. Note that this driver will only probe | ||
822 | for the Trantor T130B in its default configuration; you might have | ||
823 | to pass a command line option to the kernel at boot time if it does | ||
824 | not detect your card. See the file | ||
825 | <file:Documentation/scsi/g_NCR5380.txt> for details. | ||
826 | |||
827 | config SCSI_IPS | 817 | config SCSI_IPS |
828 | tristate "IBM ServeRAID support" | 818 | tristate "IBM ServeRAID support" |
829 | depends on PCI && SCSI | 819 | depends on PCI && SCSI |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 3eff2a69fe08..43908bbb3b23 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -29,29 +29,9 @@ | |||
29 | * Ronald van Cuijlenborg, Alan Cox and others. | 29 | * Ronald van Cuijlenborg, Alan Cox and others. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | /* | 32 | /* Ported to Atari by Roman Hodek and others. */ |
33 | * Further development / testing that should be done : | ||
34 | * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete | ||
35 | * code so that everything does the same thing that's done at the | ||
36 | * end of a pseudo-DMA read operation. | ||
37 | * | ||
38 | * 2. Fix REAL_DMA (interrupt driven, polled works fine) - | ||
39 | * basically, transfer size needs to be reduced by one | ||
40 | * and the last byte read as is done with PSEUDO_DMA. | ||
41 | * | ||
42 | * 4. Test SCSI-II tagged queueing (I have no devices which support | ||
43 | * tagged queueing) | ||
44 | */ | ||
45 | 33 | ||
46 | #ifndef notyet | 34 | /* Adapted for the Sun 3 by Sam Creasey. */ |
47 | #undef REAL_DMA | ||
48 | #endif | ||
49 | |||
50 | #ifdef BOARD_REQUIRES_NO_DELAY | ||
51 | #define io_recovery_delay(x) | ||
52 | #else | ||
53 | #define io_recovery_delay(x) udelay(x) | ||
54 | #endif | ||
55 | 35 | ||
56 | /* | 36 | /* |
57 | * Design | 37 | * Design |
@@ -126,17 +106,10 @@ | |||
126 | * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential | 106 | * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential |
127 | * transceivers. | 107 | * transceivers. |
128 | * | 108 | * |
129 | * DONT_USE_INTR - if defined, never use interrupts, even if we probe or | ||
130 | * override-configure an IRQ. | ||
131 | * | ||
132 | * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. | 109 | * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. |
133 | * | 110 | * |
134 | * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. | 111 | * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. |
135 | * | 112 | * |
136 | * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't | ||
137 | * rely on phase mismatch and EOP interrupts to determine end | ||
138 | * of phase. | ||
139 | * | ||
140 | * These macros MUST be defined : | 113 | * These macros MUST be defined : |
141 | * | 114 | * |
142 | * NCR5380_read(register) - read from the specified register | 115 | * NCR5380_read(register) - read from the specified register |
@@ -147,29 +120,29 @@ | |||
147 | * specific implementation of the NCR5380 | 120 | * specific implementation of the NCR5380 |
148 | * | 121 | * |
149 | * Either real DMA *or* pseudo DMA may be implemented | 122 | * Either real DMA *or* pseudo DMA may be implemented |
150 | * REAL functions : | ||
151 | * NCR5380_REAL_DMA should be defined if real DMA is to be used. | ||
152 | * Note that the DMA setup functions should return the number of bytes | ||
153 | * that they were able to program the controller for. | ||
154 | * | ||
155 | * Also note that generic i386/PC versions of these macros are | ||
156 | * available as NCR5380_i386_dma_write_setup, | ||
157 | * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. | ||
158 | * | 123 | * |
159 | * NCR5380_dma_write_setup(instance, src, count) - initialize | 124 | * NCR5380_dma_write_setup(instance, src, count) - initialize |
160 | * NCR5380_dma_read_setup(instance, dst, count) - initialize | 125 | * NCR5380_dma_read_setup(instance, dst, count) - initialize |
161 | * NCR5380_dma_residual(instance); - residual count | 126 | * NCR5380_dma_residual(instance); - residual count |
162 | * | 127 | * |
163 | * PSEUDO functions : | ||
164 | * NCR5380_pwrite(instance, src, count) | ||
165 | * NCR5380_pread(instance, dst, count); | ||
166 | * | ||
167 | * The generic driver is initialized by calling NCR5380_init(instance), | 128 | * The generic driver is initialized by calling NCR5380_init(instance), |
168 | * after setting the appropriate host specific fields and ID. If the | 129 | * after setting the appropriate host specific fields and ID. If the |
169 | * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, | 130 | * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, |
170 | * possible) function may be used. | 131 | * possible) function may be used. |
171 | */ | 132 | */ |
172 | 133 | ||
134 | #ifndef NCR5380_io_delay | ||
135 | #define NCR5380_io_delay(x) | ||
136 | #endif | ||
137 | |||
138 | #ifndef NCR5380_acquire_dma_irq | ||
139 | #define NCR5380_acquire_dma_irq(x) (1) | ||
140 | #endif | ||
141 | |||
142 | #ifndef NCR5380_release_dma_irq | ||
143 | #define NCR5380_release_dma_irq(x) | ||
144 | #endif | ||
145 | |||
173 | static int do_abort(struct Scsi_Host *); | 146 | static int do_abort(struct Scsi_Host *); |
174 | static void do_reset(struct Scsi_Host *); | 147 | static void do_reset(struct Scsi_Host *); |
175 | 148 | ||
@@ -280,12 +253,20 @@ static struct { | |||
280 | {0, NULL} | 253 | {0, NULL} |
281 | }, | 254 | }, |
282 | basrs[] = { | 255 | basrs[] = { |
256 | {BASR_END_DMA_TRANSFER, "END OF DMA"}, | ||
257 | {BASR_DRQ, "DRQ"}, | ||
258 | {BASR_PARITY_ERROR, "PARITY ERROR"}, | ||
259 | {BASR_IRQ, "IRQ"}, | ||
260 | {BASR_PHASE_MATCH, "PHASE MATCH"}, | ||
261 | {BASR_BUSY_ERROR, "BUSY ERROR"}, | ||
283 | {BASR_ATN, "ATN"}, | 262 | {BASR_ATN, "ATN"}, |
284 | {BASR_ACK, "ACK"}, | 263 | {BASR_ACK, "ACK"}, |
285 | {0, NULL} | 264 | {0, NULL} |
286 | }, | 265 | }, |
287 | icrs[] = { | 266 | icrs[] = { |
288 | {ICR_ASSERT_RST, "ASSERT RST"}, | 267 | {ICR_ASSERT_RST, "ASSERT RST"}, |
268 | {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"}, | ||
269 | {ICR_ARBITRATION_LOST, "LOST ARB."}, | ||
289 | {ICR_ASSERT_ACK, "ASSERT ACK"}, | 270 | {ICR_ASSERT_ACK, "ASSERT ACK"}, |
290 | {ICR_ASSERT_BSY, "ASSERT BSY"}, | 271 | {ICR_ASSERT_BSY, "ASSERT BSY"}, |
291 | {ICR_ASSERT_SEL, "ASSERT SEL"}, | 272 | {ICR_ASSERT_SEL, "ASSERT SEL"}, |
@@ -294,14 +275,14 @@ icrs[] = { | |||
294 | {0, NULL} | 275 | {0, NULL} |
295 | }, | 276 | }, |
296 | mrs[] = { | 277 | mrs[] = { |
297 | {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, | 278 | {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"}, |
298 | {MR_TARGET, "MODE TARGET"}, | 279 | {MR_TARGET, "TARGET"}, |
299 | {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, | 280 | {MR_ENABLE_PAR_CHECK, "PARITY CHECK"}, |
300 | {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, | 281 | {MR_ENABLE_PAR_INTR, "PARITY INTR"}, |
301 | {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, | 282 | {MR_ENABLE_EOP_INTR, "EOP INTR"}, |
302 | {MR_MONITOR_BSY, "MODE MONITOR BSY"}, | 283 | {MR_MONITOR_BSY, "MONITOR BSY"}, |
303 | {MR_DMA_MODE, "MODE DMA"}, | 284 | {MR_DMA_MODE, "DMA MODE"}, |
304 | {MR_ARBITRATE, "MODE ARBITRATION"}, | 285 | {MR_ARBITRATE, "ARBITRATE"}, |
305 | {0, NULL} | 286 | {0, NULL} |
306 | }; | 287 | }; |
307 | 288 | ||
@@ -322,23 +303,23 @@ static void NCR5380_print(struct Scsi_Host *instance) | |||
322 | icr = NCR5380_read(INITIATOR_COMMAND_REG); | 303 | icr = NCR5380_read(INITIATOR_COMMAND_REG); |
323 | basr = NCR5380_read(BUS_AND_STATUS_REG); | 304 | basr = NCR5380_read(BUS_AND_STATUS_REG); |
324 | 305 | ||
325 | printk("STATUS_REG: %02x ", status); | 306 | printk(KERN_DEBUG "SR = 0x%02x : ", status); |
326 | for (i = 0; signals[i].mask; ++i) | 307 | for (i = 0; signals[i].mask; ++i) |
327 | if (status & signals[i].mask) | 308 | if (status & signals[i].mask) |
328 | printk(",%s", signals[i].name); | 309 | printk(KERN_CONT "%s, ", signals[i].name); |
329 | printk("\nBASR: %02x ", basr); | 310 | printk(KERN_CONT "\nBASR = 0x%02x : ", basr); |
330 | for (i = 0; basrs[i].mask; ++i) | 311 | for (i = 0; basrs[i].mask; ++i) |
331 | if (basr & basrs[i].mask) | 312 | if (basr & basrs[i].mask) |
332 | printk(",%s", basrs[i].name); | 313 | printk(KERN_CONT "%s, ", basrs[i].name); |
333 | printk("\nICR: %02x ", icr); | 314 | printk(KERN_CONT "\nICR = 0x%02x : ", icr); |
334 | for (i = 0; icrs[i].mask; ++i) | 315 | for (i = 0; icrs[i].mask; ++i) |
335 | if (icr & icrs[i].mask) | 316 | if (icr & icrs[i].mask) |
336 | printk(",%s", icrs[i].name); | 317 | printk(KERN_CONT "%s, ", icrs[i].name); |
337 | printk("\nMODE: %02x ", mr); | 318 | printk(KERN_CONT "\nMR = 0x%02x : ", mr); |
338 | for (i = 0; mrs[i].mask; ++i) | 319 | for (i = 0; mrs[i].mask; ++i) |
339 | if (mr & mrs[i].mask) | 320 | if (mr & mrs[i].mask) |
340 | printk(",%s", mrs[i].name); | 321 | printk(KERN_CONT "%s, ", mrs[i].name); |
341 | printk("\n"); | 322 | printk(KERN_CONT "\n"); |
342 | } | 323 | } |
343 | 324 | ||
344 | static struct { | 325 | static struct { |
@@ -477,52 +458,18 @@ static void prepare_info(struct Scsi_Host *instance) | |||
477 | instance->base, instance->irq, | 458 | instance->base, instance->irq, |
478 | instance->can_queue, instance->cmd_per_lun, | 459 | instance->can_queue, instance->cmd_per_lun, |
479 | instance->sg_tablesize, instance->this_id, | 460 | instance->sg_tablesize, instance->this_id, |
480 | hostdata->flags & FLAG_NO_DMA_FIXUP ? "NO_DMA_FIXUP " : "", | 461 | hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", |
481 | hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", | 462 | hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", |
482 | hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", | 463 | hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", |
483 | #ifdef AUTOPROBE_IRQ | ||
484 | "AUTOPROBE_IRQ " | ||
485 | #endif | ||
486 | #ifdef DIFFERENTIAL | 464 | #ifdef DIFFERENTIAL |
487 | "DIFFERENTIAL " | 465 | "DIFFERENTIAL " |
488 | #endif | 466 | #endif |
489 | #ifdef REAL_DMA | ||
490 | "REAL_DMA " | ||
491 | #endif | ||
492 | #ifdef REAL_DMA_POLL | ||
493 | "REAL_DMA_POLL " | ||
494 | #endif | ||
495 | #ifdef PARITY | 467 | #ifdef PARITY |
496 | "PARITY " | 468 | "PARITY " |
497 | #endif | 469 | #endif |
498 | #ifdef PSEUDO_DMA | ||
499 | "PSEUDO_DMA " | ||
500 | #endif | ||
501 | ""); | 470 | ""); |
502 | } | 471 | } |
503 | 472 | ||
504 | #ifdef PSEUDO_DMA | ||
505 | static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, | ||
506 | char *buffer, int length) | ||
507 | { | ||
508 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
509 | |||
510 | hostdata->spin_max_r = 0; | ||
511 | hostdata->spin_max_w = 0; | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static int __maybe_unused NCR5380_show_info(struct seq_file *m, | ||
516 | struct Scsi_Host *instance) | ||
517 | { | ||
518 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
519 | |||
520 | seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", | ||
521 | hostdata->spin_max_w, hostdata->spin_max_r); | ||
522 | return 0; | ||
523 | } | ||
524 | #endif | ||
525 | |||
526 | /** | 473 | /** |
527 | * NCR5380_init - initialise an NCR5380 | 474 | * NCR5380_init - initialise an NCR5380 |
528 | * @instance: adapter to configure | 475 | * @instance: adapter to configure |
@@ -543,6 +490,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) | |||
543 | int i; | 490 | int i; |
544 | unsigned long deadline; | 491 | unsigned long deadline; |
545 | 492 | ||
493 | instance->max_lun = 7; | ||
494 | |||
546 | hostdata->host = instance; | 495 | hostdata->host = instance; |
547 | hostdata->id_mask = 1 << instance->this_id; | 496 | hostdata->id_mask = 1 << instance->this_id; |
548 | hostdata->id_higher_mask = 0; | 497 | hostdata->id_higher_mask = 0; |
@@ -551,9 +500,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) | |||
551 | hostdata->id_higher_mask |= i; | 500 | hostdata->id_higher_mask |= i; |
552 | for (i = 0; i < 8; ++i) | 501 | for (i = 0; i < 8; ++i) |
553 | hostdata->busy[i] = 0; | 502 | hostdata->busy[i] = 0; |
554 | #ifdef REAL_DMA | 503 | hostdata->dma_len = 0; |
555 | hostdata->dmalen = 0; | 504 | |
556 | #endif | ||
557 | spin_lock_init(&hostdata->lock); | 505 | spin_lock_init(&hostdata->lock); |
558 | hostdata->connected = NULL; | 506 | hostdata->connected = NULL; |
559 | hostdata->sensing = NULL; | 507 | hostdata->sensing = NULL; |
@@ -719,6 +667,9 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, | |||
719 | 667 | ||
720 | cmd->result = 0; | 668 | cmd->result = 0; |
721 | 669 | ||
670 | if (!NCR5380_acquire_dma_irq(instance)) | ||
671 | return SCSI_MLQUEUE_HOST_BUSY; | ||
672 | |||
722 | spin_lock_irqsave(&hostdata->lock, flags); | 673 | spin_lock_irqsave(&hostdata->lock, flags); |
723 | 674 | ||
724 | /* | 675 | /* |
@@ -743,6 +694,19 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, | |||
743 | return 0; | 694 | return 0; |
744 | } | 695 | } |
745 | 696 | ||
697 | static inline void maybe_release_dma_irq(struct Scsi_Host *instance) | ||
698 | { | ||
699 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
700 | |||
701 | /* Caller does the locking needed to set & test these data atomically */ | ||
702 | if (list_empty(&hostdata->disconnected) && | ||
703 | list_empty(&hostdata->unissued) && | ||
704 | list_empty(&hostdata->autosense) && | ||
705 | !hostdata->connected && | ||
706 | !hostdata->selecting) | ||
707 | NCR5380_release_dma_irq(instance); | ||
708 | } | ||
709 | |||
746 | /** | 710 | /** |
747 | * dequeue_next_cmd - dequeue a command for processing | 711 | * dequeue_next_cmd - dequeue a command for processing |
748 | * @instance: the scsi host instance | 712 | * @instance: the scsi host instance |
@@ -844,17 +808,14 @@ static void NCR5380_main(struct work_struct *work) | |||
844 | 808 | ||
845 | if (!NCR5380_select(instance, cmd)) { | 809 | if (!NCR5380_select(instance, cmd)) { |
846 | dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); | 810 | dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); |
811 | maybe_release_dma_irq(instance); | ||
847 | } else { | 812 | } else { |
848 | dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, | 813 | dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, |
849 | "main: select failed, returning %p to queue\n", cmd); | 814 | "main: select failed, returning %p to queue\n", cmd); |
850 | requeue_cmd(instance, cmd); | 815 | requeue_cmd(instance, cmd); |
851 | } | 816 | } |
852 | } | 817 | } |
853 | if (hostdata->connected | 818 | if (hostdata->connected && !hostdata->dma_len) { |
854 | #ifdef REAL_DMA | ||
855 | && !hostdata->dmalen | ||
856 | #endif | ||
857 | ) { | ||
858 | dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); | 819 | dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); |
859 | NCR5380_information_transfer(instance); | 820 | NCR5380_information_transfer(instance); |
860 | done = 0; | 821 | done = 0; |
@@ -865,7 +826,88 @@ static void NCR5380_main(struct work_struct *work) | |||
865 | } while (!done); | 826 | } while (!done); |
866 | } | 827 | } |
867 | 828 | ||
868 | #ifndef DONT_USE_INTR | 829 | /* |
830 | * NCR5380_dma_complete - finish DMA transfer | ||
831 | * @instance: the scsi host instance | ||
832 | * | ||
833 | * Called by the interrupt handler when DMA finishes or a phase | ||
834 | * mismatch occurs (which would end the DMA transfer). | ||
835 | */ | ||
836 | |||
837 | static void NCR5380_dma_complete(struct Scsi_Host *instance) | ||
838 | { | ||
839 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
840 | int transferred; | ||
841 | unsigned char **data; | ||
842 | int *count; | ||
843 | int saved_data = 0, overrun = 0; | ||
844 | unsigned char p; | ||
845 | |||
846 | if (hostdata->read_overruns) { | ||
847 | p = hostdata->connected->SCp.phase; | ||
848 | if (p & SR_IO) { | ||
849 | udelay(10); | ||
850 | if ((NCR5380_read(BUS_AND_STATUS_REG) & | ||
851 | (BASR_PHASE_MATCH | BASR_ACK)) == | ||
852 | (BASR_PHASE_MATCH | BASR_ACK)) { | ||
853 | saved_data = NCR5380_read(INPUT_DATA_REG); | ||
854 | overrun = 1; | ||
855 | dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); | ||
856 | } | ||
857 | } | ||
858 | } | ||
859 | |||
860 | #ifdef CONFIG_SUN3 | ||
861 | if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { | ||
862 | pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", | ||
863 | instance->host_no); | ||
864 | BUG(); | ||
865 | } | ||
866 | |||
867 | if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == | ||
868 | (BASR_PHASE_MATCH | BASR_ACK)) { | ||
869 | pr_err("scsi%d: BASR %02x\n", instance->host_no, | ||
870 | NCR5380_read(BUS_AND_STATUS_REG)); | ||
871 | pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", | ||
872 | instance->host_no); | ||
873 | BUG(); | ||
874 | } | ||
875 | #endif | ||
876 | |||
877 | NCR5380_write(MODE_REG, MR_BASE); | ||
878 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
879 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
880 | |||
881 | transferred = hostdata->dma_len - NCR5380_dma_residual(instance); | ||
882 | hostdata->dma_len = 0; | ||
883 | |||
884 | data = (unsigned char **)&hostdata->connected->SCp.ptr; | ||
885 | count = &hostdata->connected->SCp.this_residual; | ||
886 | *data += transferred; | ||
887 | *count -= transferred; | ||
888 | |||
889 | if (hostdata->read_overruns) { | ||
890 | int cnt, toPIO; | ||
891 | |||
892 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { | ||
893 | cnt = toPIO = hostdata->read_overruns; | ||
894 | if (overrun) { | ||
895 | dsprintk(NDEBUG_DMA, instance, | ||
896 | "Got an input overrun, using saved byte\n"); | ||
897 | *(*data)++ = saved_data; | ||
898 | (*count)--; | ||
899 | cnt--; | ||
900 | toPIO--; | ||
901 | } | ||
902 | if (toPIO > 0) { | ||
903 | dsprintk(NDEBUG_DMA, instance, | ||
904 | "Doing %d byte PIO to 0x%p\n", cnt, *data); | ||
905 | NCR5380_transfer_pio(instance, &p, &cnt, data); | ||
906 | *count -= toPIO - cnt; | ||
907 | } | ||
908 | } | ||
909 | } | ||
910 | } | ||
869 | 911 | ||
870 | /** | 912 | /** |
871 | * NCR5380_intr - generic NCR5380 irq handler | 913 | * NCR5380_intr - generic NCR5380 irq handler |
@@ -901,7 +943,7 @@ static void NCR5380_main(struct work_struct *work) | |||
901 | * the Busy Monitor interrupt is enabled together with DMA Mode. | 943 | * the Busy Monitor interrupt is enabled together with DMA Mode. |
902 | */ | 944 | */ |
903 | 945 | ||
904 | static irqreturn_t NCR5380_intr(int irq, void *dev_id) | 946 | static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) |
905 | { | 947 | { |
906 | struct Scsi_Host *instance = dev_id; | 948 | struct Scsi_Host *instance = dev_id; |
907 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 949 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
@@ -919,7 +961,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
919 | dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", | 961 | dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", |
920 | irq, basr, sr, mr); | 962 | irq, basr, sr, mr); |
921 | 963 | ||
922 | #if defined(REAL_DMA) | ||
923 | if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { | 964 | if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { |
924 | /* Probably End of DMA, Phase Mismatch or Loss of BSY. | 965 | /* Probably End of DMA, Phase Mismatch or Loss of BSY. |
925 | * We ack IRQ after clearing Mode Register. Workarounds | 966 | * We ack IRQ after clearing Mode Register. Workarounds |
@@ -928,26 +969,14 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
928 | 969 | ||
929 | dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); | 970 | dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); |
930 | 971 | ||
931 | int transferred; | 972 | if (hostdata->connected) { |
932 | 973 | NCR5380_dma_complete(instance); | |
933 | if (!hostdata->connected) | 974 | queue_work(hostdata->work_q, &hostdata->main_task); |
934 | panic("scsi%d : DMA interrupt with no connected cmd\n", | 975 | } else { |
935 | instance->hostno); | 976 | NCR5380_write(MODE_REG, MR_BASE); |
936 | 977 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | |
937 | transferred = hostdata->dmalen - NCR5380_dma_residual(instance); | 978 | } |
938 | hostdata->connected->SCp.this_residual -= transferred; | 979 | } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && |
939 | hostdata->connected->SCp.ptr += transferred; | ||
940 | hostdata->dmalen = 0; | ||
941 | |||
942 | /* FIXME: we need to poll briefly then defer a workqueue task ! */ | ||
943 | NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ); | ||
944 | |||
945 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
946 | NCR5380_write(MODE_REG, MR_BASE); | ||
947 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
948 | } else | ||
949 | #endif /* REAL_DMA */ | ||
950 | if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && | ||
951 | (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { | 980 | (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { |
952 | /* Probably reselected */ | 981 | /* Probably reselected */ |
953 | NCR5380_write(SELECT_ENABLE_REG, 0); | 982 | NCR5380_write(SELECT_ENABLE_REG, 0); |
@@ -966,10 +995,16 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
966 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 995 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
967 | 996 | ||
968 | dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); | 997 | dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); |
998 | #ifdef SUN3_SCSI_VME | ||
999 | dregs->csr |= CSR_DMA_ENABLE; | ||
1000 | #endif | ||
969 | } | 1001 | } |
970 | handled = 1; | 1002 | handled = 1; |
971 | } else { | 1003 | } else { |
972 | shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); | 1004 | shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); |
1005 | #ifdef SUN3_SCSI_VME | ||
1006 | dregs->csr |= CSR_DMA_ENABLE; | ||
1007 | #endif | ||
973 | } | 1008 | } |
974 | 1009 | ||
975 | spin_unlock_irqrestore(&hostdata->lock, flags); | 1010 | spin_unlock_irqrestore(&hostdata->lock, flags); |
@@ -977,8 +1012,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
977 | return IRQ_RETVAL(handled); | 1012 | return IRQ_RETVAL(handled); |
978 | } | 1013 | } |
979 | 1014 | ||
980 | #endif | ||
981 | |||
982 | /* | 1015 | /* |
983 | * Function : int NCR5380_select(struct Scsi_Host *instance, | 1016 | * Function : int NCR5380_select(struct Scsi_Host *instance, |
984 | * struct scsi_cmnd *cmd) | 1017 | * struct scsi_cmnd *cmd) |
@@ -1217,14 +1250,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, | |||
1217 | * was true but before BSY was false during selection, the information | 1250 | * was true but before BSY was false during selection, the information |
1218 | * transfer phase should be a MESSAGE OUT phase so that we can send the | 1251 | * transfer phase should be a MESSAGE OUT phase so that we can send the |
1219 | * IDENTIFY message. | 1252 | * IDENTIFY message. |
1220 | * | ||
1221 | * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG | ||
1222 | * message (2 bytes) with a tag ID that we increment with every command | ||
1223 | * until it wraps back to 0. | ||
1224 | * | ||
1225 | * XXX - it turns out that there are some broken SCSI-II devices, | ||
1226 | * which claim to support tagged queuing but fail when more than | ||
1227 | * some number of commands are issued at once. | ||
1228 | */ | 1253 | */ |
1229 | 1254 | ||
1230 | /* Wait for start of REQ/ACK handshake */ | 1255 | /* Wait for start of REQ/ACK handshake */ |
@@ -1247,9 +1272,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, | |||
1247 | tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); | 1272 | tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); |
1248 | 1273 | ||
1249 | len = 1; | 1274 | len = 1; |
1250 | cmd->tag = 0; | ||
1251 | |||
1252 | /* Send message(s) */ | ||
1253 | data = tmp; | 1275 | data = tmp; |
1254 | phase = PHASE_MSGOUT; | 1276 | phase = PHASE_MSGOUT; |
1255 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 1277 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
@@ -1259,6 +1281,10 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, | |||
1259 | hostdata->connected = cmd; | 1281 | hostdata->connected = cmd; |
1260 | hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; | 1282 | hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; |
1261 | 1283 | ||
1284 | #ifdef SUN3_SCSI_VME | ||
1285 | dregs->csr |= CSR_INTR; | ||
1286 | #endif | ||
1287 | |||
1262 | initialize_SCp(cmd); | 1288 | initialize_SCp(cmd); |
1263 | 1289 | ||
1264 | cmd = NULL; | 1290 | cmd = NULL; |
@@ -1495,7 +1521,6 @@ timeout: | |||
1495 | return -1; | 1521 | return -1; |
1496 | } | 1522 | } |
1497 | 1523 | ||
1498 | #if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL) | ||
1499 | /* | 1524 | /* |
1500 | * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, | 1525 | * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, |
1501 | * unsigned char *phase, int *count, unsigned char **data) | 1526 | * unsigned char *phase, int *count, unsigned char **data) |
@@ -1520,53 +1545,47 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
1520 | unsigned char **data) | 1545 | unsigned char **data) |
1521 | { | 1546 | { |
1522 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 1547 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
1523 | register int c = *count; | 1548 | int c = *count; |
1524 | register unsigned char p = *phase; | 1549 | unsigned char p = *phase; |
1525 | register unsigned char *d = *data; | 1550 | unsigned char *d = *data; |
1526 | unsigned char tmp; | 1551 | unsigned char tmp; |
1527 | int foo; | 1552 | int result = 0; |
1528 | #if defined(REAL_DMA_POLL) | ||
1529 | int cnt, toPIO; | ||
1530 | unsigned char saved_data = 0, overrun = 0, residue; | ||
1531 | #endif | ||
1532 | 1553 | ||
1533 | if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { | 1554 | if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { |
1534 | *phase = tmp; | 1555 | *phase = tmp; |
1535 | return -1; | 1556 | return -1; |
1536 | } | 1557 | } |
1537 | #if defined(REAL_DMA) || defined(REAL_DMA_POLL) | 1558 | |
1559 | hostdata->connected->SCp.phase = p; | ||
1560 | |||
1538 | if (p & SR_IO) { | 1561 | if (p & SR_IO) { |
1539 | if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) | 1562 | if (hostdata->read_overruns) |
1540 | c -= 2; | 1563 | c -= hostdata->read_overruns; |
1564 | else if (hostdata->flags & FLAG_DMA_FIXUP) | ||
1565 | --c; | ||
1541 | } | 1566 | } |
1542 | hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); | ||
1543 | 1567 | ||
1544 | dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", | 1568 | dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", |
1545 | (p & SR_IO) ? "receive" : "send", c, *data); | 1569 | (p & SR_IO) ? "receive" : "send", c, d); |
1570 | |||
1571 | #ifdef CONFIG_SUN3 | ||
1572 | /* send start chain */ | ||
1573 | sun3scsi_dma_start(c, *data); | ||
1546 | #endif | 1574 | #endif |
1547 | 1575 | ||
1548 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | 1576 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); |
1549 | |||
1550 | #ifdef REAL_DMA | ||
1551 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | | 1577 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | |
1552 | MR_ENABLE_EOP_INTR); | 1578 | MR_ENABLE_EOP_INTR); |
1553 | #elif defined(REAL_DMA_POLL) | ||
1554 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); | ||
1555 | #else | ||
1556 | /* | ||
1557 | * Note : on my sample board, watch-dog timeouts occurred when interrupts | ||
1558 | * were not disabled for the duration of a single DMA transfer, from | ||
1559 | * before the setting of DMA mode to after transfer of the last byte. | ||
1560 | */ | ||
1561 | |||
1562 | if (hostdata->flags & FLAG_NO_DMA_FIXUP) | ||
1563 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | | ||
1564 | MR_ENABLE_EOP_INTR); | ||
1565 | else | ||
1566 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); | ||
1567 | #endif /* def REAL_DMA */ | ||
1568 | 1579 | ||
1569 | dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); | 1580 | if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { |
1581 | /* On the Medusa, it is a must to initialize the DMA before | ||
1582 | * starting the NCR. This is also the cleaner way for the TT. | ||
1583 | */ | ||
1584 | if (p & SR_IO) | ||
1585 | result = NCR5380_dma_recv_setup(instance, d, c); | ||
1586 | else | ||
1587 | result = NCR5380_dma_send_setup(instance, d, c); | ||
1588 | } | ||
1570 | 1589 | ||
1571 | /* | 1590 | /* |
1572 | * On the PAS16 at least I/O recovery delays are not needed here. | 1591 | * On the PAS16 at least I/O recovery delays are not needed here. |
@@ -1574,24 +1593,49 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
1574 | */ | 1593 | */ |
1575 | 1594 | ||
1576 | if (p & SR_IO) { | 1595 | if (p & SR_IO) { |
1577 | io_recovery_delay(1); | 1596 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
1597 | NCR5380_io_delay(1); | ||
1578 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); | 1598 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); |
1579 | } else { | 1599 | } else { |
1580 | io_recovery_delay(1); | 1600 | NCR5380_io_delay(1); |
1581 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | 1601 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); |
1582 | io_recovery_delay(1); | 1602 | NCR5380_io_delay(1); |
1583 | NCR5380_write(START_DMA_SEND_REG, 0); | 1603 | NCR5380_write(START_DMA_SEND_REG, 0); |
1584 | io_recovery_delay(1); | 1604 | NCR5380_io_delay(1); |
1585 | } | 1605 | } |
1586 | 1606 | ||
1587 | #if defined(REAL_DMA_POLL) | 1607 | #ifdef CONFIG_SUN3 |
1588 | do { | 1608 | #ifdef SUN3_SCSI_VME |
1589 | tmp = NCR5380_read(BUS_AND_STATUS_REG); | 1609 | dregs->csr |= CSR_DMA_ENABLE; |
1590 | } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER))); | 1610 | #endif |
1611 | sun3_dma_active = 1; | ||
1612 | #endif | ||
1613 | |||
1614 | if (hostdata->flags & FLAG_LATE_DMA_SETUP) { | ||
1615 | /* On the Falcon, the DMA setup must be done after the last | ||
1616 | * NCR access, else the DMA setup gets trashed! | ||
1617 | */ | ||
1618 | if (p & SR_IO) | ||
1619 | result = NCR5380_dma_recv_setup(instance, d, c); | ||
1620 | else | ||
1621 | result = NCR5380_dma_send_setup(instance, d, c); | ||
1622 | } | ||
1623 | |||
1624 | /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ | ||
1625 | if (result < 0) | ||
1626 | return result; | ||
1627 | |||
1628 | /* For real DMA, result is the byte count. DMA interrupt is expected. */ | ||
1629 | if (result > 0) { | ||
1630 | hostdata->dma_len = result; | ||
1631 | return 0; | ||
1632 | } | ||
1633 | |||
1634 | /* The result is zero iff pseudo DMA send/receive was completed. */ | ||
1635 | hostdata->dma_len = c; | ||
1591 | 1636 | ||
1592 | /* | 1637 | /* |
1593 | * At this point, either we've completed DMA, or we have a phase mismatch, | 1638 | * A note regarding the DMA errata workarounds for early NMOS silicon. |
1594 | * or we've unexpectedly lost BUSY (which is a real error). | ||
1595 | * | 1639 | * |
1596 | * For DMA sends, we want to wait until the last byte has been | 1640 | * For DMA sends, we want to wait until the last byte has been |
1597 | * transferred out over the bus before we turn off DMA mode. Alas, there | 1641 | * transferred out over the bus before we turn off DMA mode. Alas, there |
@@ -1618,79 +1662,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
1618 | * properly, or the target switches to MESSAGE IN phase to signal a | 1662 | * properly, or the target switches to MESSAGE IN phase to signal a |
1619 | * disconnection (either operation bringing the DMA to a clean halt). | 1663 | * disconnection (either operation bringing the DMA to a clean halt). |
1620 | * However, in order to handle scatter-receive, we must work around the | 1664 | * However, in order to handle scatter-receive, we must work around the |
1621 | * problem. The chosen fix is to DMA N-2 bytes, then check for the | 1665 | * problem. The chosen fix is to DMA fewer bytes, then check for the |
1622 | * condition before taking the NCR5380 out of DMA mode. One or two extra | 1666 | * condition before taking the NCR5380 out of DMA mode. One or two extra |
1623 | * bytes are transferred via PIO as necessary to fill out the original | 1667 | * bytes are transferred via PIO as necessary to fill out the original |
1624 | * request. | 1668 | * request. |
1625 | */ | 1669 | */ |
1626 | 1670 | ||
1627 | if (p & SR_IO) { | 1671 | if (hostdata->flags & FLAG_DMA_FIXUP) { |
1628 | if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) { | 1672 | if (p & SR_IO) { |
1629 | udelay(10); | ||
1630 | if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == | ||
1631 | (BASR_PHASE_MATCH | BASR_ACK)) { | ||
1632 | saved_data = NCR5380_read(INPUT_DATA_REGISTER); | ||
1633 | overrun = 1; | ||
1634 | } | ||
1635 | } | ||
1636 | } else { | ||
1637 | int limit = 100; | ||
1638 | while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) { | ||
1639 | if (!(tmp & BASR_PHASE_MATCH)) | ||
1640 | break; | ||
1641 | if (--limit < 0) | ||
1642 | break; | ||
1643 | } | ||
1644 | } | ||
1645 | |||
1646 | dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n", | ||
1647 | tmp, NCR5380_read(STATUS_REG)); | ||
1648 | |||
1649 | NCR5380_write(MODE_REG, MR_BASE); | ||
1650 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1651 | |||
1652 | residue = NCR5380_dma_residual(instance); | ||
1653 | c -= residue; | ||
1654 | *count -= c; | ||
1655 | *data += c; | ||
1656 | *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; | ||
1657 | |||
1658 | if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) && | ||
1659 | *phase == p && (p & SR_IO) && residue == 0) { | ||
1660 | if (overrun) { | ||
1661 | dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); | ||
1662 | **data = saved_data; | ||
1663 | *data += 1; | ||
1664 | *count -= 1; | ||
1665 | cnt = toPIO = 1; | ||
1666 | } else { | ||
1667 | printk("No overrun??\n"); | ||
1668 | cnt = toPIO = 2; | ||
1669 | } | ||
1670 | dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); | ||
1671 | NCR5380_transfer_pio(instance, phase, &cnt, data); | ||
1672 | *count -= toPIO - cnt; | ||
1673 | } | ||
1674 | |||
1675 | dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); | ||
1676 | return 0; | ||
1677 | |||
1678 | #elif defined(REAL_DMA) | ||
1679 | return 0; | ||
1680 | #else /* defined(REAL_DMA_POLL) */ | ||
1681 | if (p & SR_IO) { | ||
1682 | foo = NCR5380_pread(instance, d, | ||
1683 | hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1); | ||
1684 | if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { | ||
1685 | /* | 1673 | /* |
1686 | * We can't disable DMA mode after successfully transferring | 1674 | * The workaround was to transfer fewer bytes than we |
1687 | * what we plan to be the last byte, since that would open up | ||
1688 | * a race condition where if the target asserted REQ before | ||
1689 | * we got the DMA mode reset, the NCR5380 would have latched | ||
1690 | * an additional byte into the INPUT DATA register and we'd | ||
1691 | * have dropped it. | ||
1692 | * | ||
1693 | * The workaround was to transfer one fewer bytes than we | ||
1694 | * intended to with the pseudo-DMA read function, wait for | 1675 | * intended to with the pseudo-DMA read function, wait for |
1695 | * the chip to latch the last byte, read it, and then disable | 1676 | * the chip to latch the last byte, read it, and then disable |
1696 | * pseudo-DMA mode. | 1677 | * pseudo-DMA mode. |
@@ -1706,19 +1687,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
1706 | 1687 | ||
1707 | if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, | 1688 | if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, |
1708 | BASR_DRQ, BASR_DRQ, HZ) < 0) { | 1689 | BASR_DRQ, BASR_DRQ, HZ) < 0) { |
1709 | foo = -1; | 1690 | result = -1; |
1710 | shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); | 1691 | shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); |
1711 | } | 1692 | } |
1712 | if (NCR5380_poll_politely(instance, STATUS_REG, | 1693 | if (NCR5380_poll_politely(instance, STATUS_REG, |
1713 | SR_REQ, 0, HZ) < 0) { | 1694 | SR_REQ, 0, HZ) < 0) { |
1714 | foo = -1; | 1695 | result = -1; |
1715 | shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); | 1696 | shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); |
1716 | } | 1697 | } |
1717 | d[c - 1] = NCR5380_read(INPUT_DATA_REG); | 1698 | d[*count - 1] = NCR5380_read(INPUT_DATA_REG); |
1718 | } | 1699 | } else { |
1719 | } else { | ||
1720 | foo = NCR5380_pwrite(instance, d, c); | ||
1721 | if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { | ||
1722 | /* | 1700 | /* |
1723 | * Wait for the last byte to be sent. If REQ is being asserted for | 1701 | * Wait for the last byte to be sent. If REQ is being asserted for |
1724 | * the byte we're interested, we'll ACK it and it will go false. | 1702 | * the byte we're interested, we'll ACK it and it will go false. |
@@ -1726,21 +1704,15 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
1726 | if (NCR5380_poll_politely2(instance, | 1704 | if (NCR5380_poll_politely2(instance, |
1727 | BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, | 1705 | BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, |
1728 | BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { | 1706 | BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { |
1729 | foo = -1; | 1707 | result = -1; |
1730 | shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); | 1708 | shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); |
1731 | } | 1709 | } |
1732 | } | 1710 | } |
1733 | } | 1711 | } |
1734 | NCR5380_write(MODE_REG, MR_BASE); | 1712 | |
1735 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1713 | NCR5380_dma_complete(instance); |
1736 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1714 | return result; |
1737 | *data = d + c; | ||
1738 | *count = 0; | ||
1739 | *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; | ||
1740 | return foo; | ||
1741 | #endif /* def REAL_DMA */ | ||
1742 | } | 1715 | } |
1743 | #endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */ | ||
1744 | 1716 | ||
1745 | /* | 1717 | /* |
1746 | * Function : NCR5380_information_transfer (struct Scsi_Host *instance) | 1718 | * Function : NCR5380_information_transfer (struct Scsi_Host *instance) |
@@ -1770,6 +1742,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1770 | unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; | 1742 | unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; |
1771 | struct scsi_cmnd *cmd; | 1743 | struct scsi_cmnd *cmd; |
1772 | 1744 | ||
1745 | #ifdef SUN3_SCSI_VME | ||
1746 | dregs->csr |= CSR_INTR; | ||
1747 | #endif | ||
1748 | |||
1773 | while ((cmd = hostdata->connected)) { | 1749 | while ((cmd = hostdata->connected)) { |
1774 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); | 1750 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); |
1775 | 1751 | ||
@@ -1781,6 +1757,31 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1781 | old_phase = phase; | 1757 | old_phase = phase; |
1782 | NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); | 1758 | NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); |
1783 | } | 1759 | } |
1760 | #ifdef CONFIG_SUN3 | ||
1761 | if (phase == PHASE_CMDOUT) { | ||
1762 | void *d; | ||
1763 | unsigned long count; | ||
1764 | |||
1765 | if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { | ||
1766 | count = cmd->SCp.buffer->length; | ||
1767 | d = sg_virt(cmd->SCp.buffer); | ||
1768 | } else { | ||
1769 | count = cmd->SCp.this_residual; | ||
1770 | d = cmd->SCp.ptr; | ||
1771 | } | ||
1772 | |||
1773 | if (sun3_dma_setup_done != cmd && | ||
1774 | sun3scsi_dma_xfer_len(count, cmd) > 0) { | ||
1775 | sun3scsi_dma_setup(instance, d, count, | ||
1776 | rq_data_dir(cmd->request)); | ||
1777 | sun3_dma_setup_done = cmd; | ||
1778 | } | ||
1779 | #ifdef SUN3_SCSI_VME | ||
1780 | dregs->csr |= CSR_INTR; | ||
1781 | #endif | ||
1782 | } | ||
1783 | #endif /* CONFIG_SUN3 */ | ||
1784 | |||
1784 | if (sink && (phase != PHASE_MSGOUT)) { | 1785 | if (sink && (phase != PHASE_MSGOUT)) { |
1785 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | 1786 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); |
1786 | 1787 | ||
@@ -1831,13 +1832,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1831 | * in an unconditional loop. | 1832 | * in an unconditional loop. |
1832 | */ | 1833 | */ |
1833 | 1834 | ||
1834 | #if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) | ||
1835 | transfersize = 0; | 1835 | transfersize = 0; |
1836 | if (!cmd->device->borken && | 1836 | if (!cmd->device->borken) |
1837 | !(hostdata->flags & FLAG_NO_PSEUDO_DMA)) | ||
1838 | transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); | 1837 | transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); |
1839 | 1838 | ||
1840 | if (transfersize) { | 1839 | if (transfersize > 0) { |
1841 | len = transfersize; | 1840 | len = transfersize; |
1842 | if (NCR5380_transfer_dma(instance, &phase, | 1841 | if (NCR5380_transfer_dma(instance, &phase, |
1843 | &len, (unsigned char **)&cmd->SCp.ptr)) { | 1842 | &len, (unsigned char **)&cmd->SCp.ptr)) { |
@@ -1853,11 +1852,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1853 | do_abort(instance); | 1852 | do_abort(instance); |
1854 | cmd->result = DID_ERROR << 16; | 1853 | cmd->result = DID_ERROR << 16; |
1855 | /* XXX - need to source or sink data here, as appropriate */ | 1854 | /* XXX - need to source or sink data here, as appropriate */ |
1856 | } else | 1855 | } |
1857 | cmd->SCp.this_residual -= transfersize - len; | 1856 | } else { |
1858 | } else | ||
1859 | #endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */ | ||
1860 | { | ||
1861 | /* Break up transfer into 3 ms chunks, | 1857 | /* Break up transfer into 3 ms chunks, |
1862 | * presuming 6 accesses per handshake. | 1858 | * presuming 6 accesses per handshake. |
1863 | */ | 1859 | */ |
@@ -1868,6 +1864,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1868 | (unsigned char **)&cmd->SCp.ptr); | 1864 | (unsigned char **)&cmd->SCp.ptr); |
1869 | cmd->SCp.this_residual -= transfersize - len; | 1865 | cmd->SCp.this_residual -= transfersize - len; |
1870 | } | 1866 | } |
1867 | #ifdef CONFIG_SUN3 | ||
1868 | if (sun3_dma_setup_done == cmd) | ||
1869 | sun3_dma_setup_done = NULL; | ||
1870 | #endif | ||
1871 | return; | 1871 | return; |
1872 | case PHASE_MSGIN: | 1872 | case PHASE_MSGIN: |
1873 | len = 1; | 1873 | len = 1; |
@@ -1912,6 +1912,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1912 | 1912 | ||
1913 | /* Enable reselect interrupts */ | 1913 | /* Enable reselect interrupts */ |
1914 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1914 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1915 | |||
1916 | maybe_release_dma_irq(instance); | ||
1915 | return; | 1917 | return; |
1916 | case MESSAGE_REJECT: | 1918 | case MESSAGE_REJECT: |
1917 | /* Accept message by clearing ACK */ | 1919 | /* Accept message by clearing ACK */ |
@@ -1944,6 +1946,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
1944 | 1946 | ||
1945 | /* Enable reselect interrupts */ | 1947 | /* Enable reselect interrupts */ |
1946 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1948 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1949 | #ifdef SUN3_SCSI_VME | ||
1950 | dregs->csr |= CSR_DMA_ENABLE; | ||
1951 | #endif | ||
1947 | return; | 1952 | return; |
1948 | /* | 1953 | /* |
1949 | * The SCSI data pointer is *IMPLICITLY* saved on a disconnect | 1954 | * The SCSI data pointer is *IMPLICITLY* saved on a disconnect |
@@ -2047,6 +2052,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
2047 | hostdata->connected = NULL; | 2052 | hostdata->connected = NULL; |
2048 | cmd->result = DID_ERROR << 16; | 2053 | cmd->result = DID_ERROR << 16; |
2049 | complete_cmd(instance, cmd); | 2054 | complete_cmd(instance, cmd); |
2055 | maybe_release_dma_irq(instance); | ||
2050 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2056 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2051 | return; | 2057 | return; |
2052 | } | 2058 | } |
@@ -2094,10 +2100,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
2094 | { | 2100 | { |
2095 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 2101 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
2096 | unsigned char target_mask; | 2102 | unsigned char target_mask; |
2097 | unsigned char lun, phase; | 2103 | unsigned char lun; |
2098 | int len; | ||
2099 | unsigned char msg[3]; | 2104 | unsigned char msg[3]; |
2100 | unsigned char *data; | ||
2101 | struct NCR5380_cmd *ncmd; | 2105 | struct NCR5380_cmd *ncmd; |
2102 | struct scsi_cmnd *tmp; | 2106 | struct scsi_cmnd *tmp; |
2103 | 2107 | ||
@@ -2139,15 +2143,26 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
2139 | return; | 2143 | return; |
2140 | } | 2144 | } |
2141 | 2145 | ||
2142 | len = 1; | 2146 | #ifdef CONFIG_SUN3 |
2143 | data = msg; | 2147 | /* acknowledge toggle to MSGIN */ |
2144 | phase = PHASE_MSGIN; | 2148 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); |
2145 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2146 | 2149 | ||
2147 | if (len) { | 2150 | /* peek at the byte without really hitting the bus */ |
2148 | do_abort(instance); | 2151 | msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); |
2149 | return; | 2152 | #else |
2153 | { | ||
2154 | int len = 1; | ||
2155 | unsigned char *data = msg; | ||
2156 | unsigned char phase = PHASE_MSGIN; | ||
2157 | |||
2158 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2159 | |||
2160 | if (len) { | ||
2161 | do_abort(instance); | ||
2162 | return; | ||
2163 | } | ||
2150 | } | 2164 | } |
2165 | #endif /* CONFIG_SUN3 */ | ||
2151 | 2166 | ||
2152 | if (!(msg[0] & 0x80)) { | 2167 | if (!(msg[0] & 0x80)) { |
2153 | shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); | 2168 | shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); |
@@ -2195,59 +2210,37 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
2195 | return; | 2210 | return; |
2196 | } | 2211 | } |
2197 | 2212 | ||
2198 | /* Accept message by clearing ACK */ | 2213 | #ifdef CONFIG_SUN3 |
2199 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2214 | { |
2200 | 2215 | void *d; | |
2201 | hostdata->connected = tmp; | 2216 | unsigned long count; |
2202 | dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", | ||
2203 | scmd_id(tmp), tmp->device->lun, tmp->tag); | ||
2204 | } | ||
2205 | 2217 | ||
2206 | /* | 2218 | if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { |
2207 | * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) | 2219 | count = tmp->SCp.buffer->length; |
2208 | * | 2220 | d = sg_virt(tmp->SCp.buffer); |
2209 | * Purpose : called by interrupt handler when DMA finishes or a phase | 2221 | } else { |
2210 | * mismatch occurs (which would finish the DMA transfer). | 2222 | count = tmp->SCp.this_residual; |
2211 | * | 2223 | d = tmp->SCp.ptr; |
2212 | * Inputs : instance - this instance of the NCR5380. | 2224 | } |
2213 | * | ||
2214 | * Returns : pointer to the scsi_cmnd structure for which the I_T_L | ||
2215 | * nexus has been reestablished, on failure NULL is returned. | ||
2216 | */ | ||
2217 | |||
2218 | #ifdef REAL_DMA | ||
2219 | static void NCR5380_dma_complete(NCR5380_instance * instance) { | ||
2220 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
2221 | int transferred; | ||
2222 | 2225 | ||
2223 | /* | 2226 | if (sun3_dma_setup_done != tmp && |
2224 | * XXX this might not be right. | 2227 | sun3scsi_dma_xfer_len(count, tmp) > 0) { |
2225 | * | 2228 | sun3scsi_dma_setup(instance, d, count, |
2226 | * Wait for final byte to transfer, ie wait for ACK to go false. | 2229 | rq_data_dir(tmp->request)); |
2227 | * | 2230 | sun3_dma_setup_done = tmp; |
2228 | * We should use the Last Byte Sent bit, unfortunately this is | 2231 | } |
2229 | * not available on the 5380/5381 (only the various CMOS chips) | 2232 | } |
2230 | * | ||
2231 | * FIXME: timeout, and need to handle long timeout/irq case | ||
2232 | */ | ||
2233 | 2233 | ||
2234 | NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ); | 2234 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); |
2235 | #endif /* CONFIG_SUN3 */ | ||
2235 | 2236 | ||
2237 | /* Accept message by clearing ACK */ | ||
2236 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2238 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2237 | 2239 | ||
2238 | /* | 2240 | hostdata->connected = tmp; |
2239 | * The only places we should see a phase mismatch and have to send | 2241 | dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n", |
2240 | * data from the same set of pointers will be the data transfer | 2242 | scmd_id(tmp), tmp->device->lun); |
2241 | * phases. So, residual, requested length are only important here. | ||
2242 | */ | ||
2243 | |||
2244 | if (!(hostdata->connected->SCp.phase & SR_CD)) { | ||
2245 | transferred = instance->dmalen - NCR5380_dma_residual(); | ||
2246 | hostdata->connected->SCp.this_residual -= transferred; | ||
2247 | hostdata->connected->SCp.ptr += transferred; | ||
2248 | } | ||
2249 | } | 2243 | } |
2250 | #endif /* def REAL_DMA */ | ||
2251 | 2244 | ||
2252 | /** | 2245 | /** |
2253 | * list_find_cmd - test for presence of a command in a linked list | 2246 | * list_find_cmd - test for presence of a command in a linked list |
@@ -2360,9 +2353,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
2360 | if (hostdata->connected == cmd) { | 2353 | if (hostdata->connected == cmd) { |
2361 | dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); | 2354 | dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); |
2362 | hostdata->connected = NULL; | 2355 | hostdata->connected = NULL; |
2363 | #ifdef REAL_DMA | ||
2364 | hostdata->dma_len = 0; | 2356 | hostdata->dma_len = 0; |
2365 | #endif | ||
2366 | if (do_abort(instance)) { | 2357 | if (do_abort(instance)) { |
2367 | set_host_byte(cmd, DID_ERROR); | 2358 | set_host_byte(cmd, DID_ERROR); |
2368 | complete_cmd(instance, cmd); | 2359 | complete_cmd(instance, cmd); |
@@ -2388,6 +2379,7 @@ out: | |||
2388 | dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); | 2379 | dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); |
2389 | 2380 | ||
2390 | queue_work(hostdata->work_q, &hostdata->main_task); | 2381 | queue_work(hostdata->work_q, &hostdata->main_task); |
2382 | maybe_release_dma_irq(instance); | ||
2391 | spin_unlock_irqrestore(&hostdata->lock, flags); | 2383 | spin_unlock_irqrestore(&hostdata->lock, flags); |
2392 | 2384 | ||
2393 | return result; | 2385 | return result; |
@@ -2445,7 +2437,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
2445 | struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); | 2437 | struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); |
2446 | 2438 | ||
2447 | set_host_byte(cmd, DID_RESET); | 2439 | set_host_byte(cmd, DID_RESET); |
2448 | cmd->scsi_done(cmd); | 2440 | complete_cmd(instance, cmd); |
2449 | } | 2441 | } |
2450 | INIT_LIST_HEAD(&hostdata->disconnected); | 2442 | INIT_LIST_HEAD(&hostdata->disconnected); |
2451 | 2443 | ||
@@ -2465,11 +2457,10 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
2465 | 2457 | ||
2466 | for (i = 0; i < 8; ++i) | 2458 | for (i = 0; i < 8; ++i) |
2467 | hostdata->busy[i] = 0; | 2459 | hostdata->busy[i] = 0; |
2468 | #ifdef REAL_DMA | ||
2469 | hostdata->dma_len = 0; | 2460 | hostdata->dma_len = 0; |
2470 | #endif | ||
2471 | 2461 | ||
2472 | queue_work(hostdata->work_q, &hostdata->main_task); | 2462 | queue_work(hostdata->work_q, &hostdata->main_task); |
2463 | maybe_release_dma_irq(instance); | ||
2473 | spin_unlock_irqrestore(&hostdata->lock, flags); | 2464 | spin_unlock_irqrestore(&hostdata->lock, flags); |
2474 | 2465 | ||
2475 | return SUCCESS; | 2466 | return SUCCESS; |
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index a79288682a74..c60728785d89 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
@@ -199,13 +199,6 @@ | |||
199 | 199 | ||
200 | #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) | 200 | #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) |
201 | 201 | ||
202 | /* | ||
203 | * "Special" value for the (unsigned char) command tag, to indicate | ||
204 | * I_T_L nexus instead of I_T_L_Q. | ||
205 | */ | ||
206 | |||
207 | #define TAG_NONE 0xff | ||
208 | |||
209 | /* | 202 | /* |
210 | * These are "special" values for the irq and dma_channel fields of the | 203 | * These are "special" values for the irq and dma_channel fields of the |
211 | * Scsi_Host structure | 204 | * Scsi_Host structure |
@@ -220,28 +213,17 @@ | |||
220 | #define NO_IRQ 0 | 213 | #define NO_IRQ 0 |
221 | #endif | 214 | #endif |
222 | 215 | ||
223 | #define FLAG_NO_DMA_FIXUP 1 /* No DMA errata workarounds */ | 216 | #define FLAG_DMA_FIXUP 1 /* Use DMA errata workarounds */ |
224 | #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ | 217 | #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ |
225 | #define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ | 218 | #define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ |
226 | #define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */ | ||
227 | #define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ | 219 | #define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ |
228 | 220 | ||
229 | #ifdef SUPPORT_TAGS | ||
230 | struct tag_alloc { | ||
231 | DECLARE_BITMAP(allocated, MAX_TAGS); | ||
232 | int nr_allocated; | ||
233 | int queue_size; | ||
234 | }; | ||
235 | #endif | ||
236 | |||
237 | struct NCR5380_hostdata { | 221 | struct NCR5380_hostdata { |
238 | NCR5380_implementation_fields; /* implementation specific */ | 222 | NCR5380_implementation_fields; /* implementation specific */ |
239 | struct Scsi_Host *host; /* Host backpointer */ | 223 | struct Scsi_Host *host; /* Host backpointer */ |
240 | unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ | 224 | unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ |
241 | unsigned char busy[8]; /* index = target, bit = lun */ | 225 | unsigned char busy[8]; /* index = target, bit = lun */ |
242 | #if defined(REAL_DMA) || defined(REAL_DMA_POLL) | ||
243 | int dma_len; /* requested length of DMA */ | 226 | int dma_len; /* requested length of DMA */ |
244 | #endif | ||
245 | unsigned char last_message; /* last message OUT */ | 227 | unsigned char last_message; /* last message OUT */ |
246 | struct scsi_cmnd *connected; /* currently connected cmnd */ | 228 | struct scsi_cmnd *connected; /* currently connected cmnd */ |
247 | struct scsi_cmnd *selecting; /* cmnd to be connected */ | 229 | struct scsi_cmnd *selecting; /* cmnd to be connected */ |
@@ -256,13 +238,6 @@ struct NCR5380_hostdata { | |||
256 | int read_overruns; /* number of bytes to cut from a | 238 | int read_overruns; /* number of bytes to cut from a |
257 | * transfer to handle chip overruns */ | 239 | * transfer to handle chip overruns */ |
258 | struct work_struct main_task; | 240 | struct work_struct main_task; |
259 | #ifdef SUPPORT_TAGS | ||
260 | struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */ | ||
261 | #endif | ||
262 | #ifdef PSEUDO_DMA | ||
263 | unsigned spin_max_r; | ||
264 | unsigned spin_max_w; | ||
265 | #endif | ||
266 | struct workqueue_struct *work_q; | 241 | struct workqueue_struct *work_q; |
267 | unsigned long accesses_per_ms; /* chip register accesses per ms */ | 242 | unsigned long accesses_per_ms; /* chip register accesses per ms */ |
268 | }; | 243 | }; |
@@ -305,132 +280,20 @@ static void NCR5380_print(struct Scsi_Host *instance); | |||
305 | #define NCR5380_dprint_phase(flg, arg) do {} while (0) | 280 | #define NCR5380_dprint_phase(flg, arg) do {} while (0) |
306 | #endif | 281 | #endif |
307 | 282 | ||
308 | #if defined(AUTOPROBE_IRQ) | ||
309 | static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); | 283 | static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); |
310 | #endif | ||
311 | static int NCR5380_init(struct Scsi_Host *instance, int flags); | 284 | static int NCR5380_init(struct Scsi_Host *instance, int flags); |
312 | static int NCR5380_maybe_reset_bus(struct Scsi_Host *); | 285 | static int NCR5380_maybe_reset_bus(struct Scsi_Host *); |
313 | static void NCR5380_exit(struct Scsi_Host *instance); | 286 | static void NCR5380_exit(struct Scsi_Host *instance); |
314 | static void NCR5380_information_transfer(struct Scsi_Host *instance); | 287 | static void NCR5380_information_transfer(struct Scsi_Host *instance); |
315 | #ifndef DONT_USE_INTR | ||
316 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); | 288 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); |
317 | #endif | ||
318 | static void NCR5380_main(struct work_struct *work); | 289 | static void NCR5380_main(struct work_struct *work); |
319 | static const char *NCR5380_info(struct Scsi_Host *instance); | 290 | static const char *NCR5380_info(struct Scsi_Host *instance); |
320 | static void NCR5380_reselect(struct Scsi_Host *instance); | 291 | static void NCR5380_reselect(struct Scsi_Host *instance); |
321 | static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); | 292 | static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); |
322 | #if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) | ||
323 | static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); | 293 | static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); |
324 | #endif | ||
325 | static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); | 294 | static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); |
295 | static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int); | ||
296 | static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); | ||
326 | 297 | ||
327 | #if (defined(REAL_DMA) || defined(REAL_DMA_POLL)) | ||
328 | |||
329 | #if defined(i386) || defined(__alpha__) | ||
330 | |||
331 | /** | ||
332 | * NCR5380_pc_dma_setup - setup ISA DMA | ||
333 | * @instance: adapter to set up | ||
334 | * @ptr: block to transfer (virtual address) | ||
335 | * @count: number of bytes to transfer | ||
336 | * @mode: DMA controller mode to use | ||
337 | * | ||
338 | * Program the DMA controller ready to perform an ISA DMA transfer | ||
339 | * on this chip. | ||
340 | * | ||
341 | * Locks: takes and releases the ISA DMA lock. | ||
342 | */ | ||
343 | |||
344 | static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode) | ||
345 | { | ||
346 | unsigned limit; | ||
347 | unsigned long bus_addr = virt_to_bus(ptr); | ||
348 | unsigned long flags; | ||
349 | |||
350 | if (instance->dma_channel <= 3) { | ||
351 | if (count > 65536) | ||
352 | count = 65536; | ||
353 | limit = 65536 - (bus_addr & 0xFFFF); | ||
354 | } else { | ||
355 | if (count > 65536 * 2) | ||
356 | count = 65536 * 2; | ||
357 | limit = 65536 * 2 - (bus_addr & 0x1FFFF); | ||
358 | } | ||
359 | |||
360 | if (count > limit) | ||
361 | count = limit; | ||
362 | |||
363 | if ((count & 1) || (bus_addr & 1)) | ||
364 | panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no); | ||
365 | |||
366 | flags=claim_dma_lock(); | ||
367 | disable_dma(instance->dma_channel); | ||
368 | clear_dma_ff(instance->dma_channel); | ||
369 | set_dma_addr(instance->dma_channel, bus_addr); | ||
370 | set_dma_count(instance->dma_channel, count); | ||
371 | set_dma_mode(instance->dma_channel, mode); | ||
372 | enable_dma(instance->dma_channel); | ||
373 | release_dma_lock(flags); | ||
374 | |||
375 | return count; | ||
376 | } | ||
377 | |||
378 | /** | ||
379 | * NCR5380_pc_dma_write_setup - setup ISA DMA write | ||
380 | * @instance: adapter to set up | ||
381 | * @ptr: block to transfer (virtual address) | ||
382 | * @count: number of bytes to transfer | ||
383 | * | ||
384 | * Program the DMA controller ready to perform an ISA DMA write to the | ||
385 | * SCSI controller. | ||
386 | * | ||
387 | * Locks: called routines take and release the ISA DMA lock. | ||
388 | */ | ||
389 | |||
390 | static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) | ||
391 | { | ||
392 | return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE); | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * NCR5380_pc_dma_read_setup - setup ISA DMA read | ||
397 | * @instance: adapter to set up | ||
398 | * @ptr: block to transfer (virtual address) | ||
399 | * @count: number of bytes to transfer | ||
400 | * | ||
401 | * Program the DMA controller ready to perform an ISA DMA read from the | ||
402 | * SCSI controller. | ||
403 | * | ||
404 | * Locks: called routines take and release the ISA DMA lock. | ||
405 | */ | ||
406 | |||
407 | static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) | ||
408 | { | ||
409 | return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ); | ||
410 | } | ||
411 | |||
412 | /** | ||
413 | * NCR5380_pc_dma_residual - return bytes left | ||
414 | * @instance: adapter | ||
415 | * | ||
416 | * Reports the number of bytes left over after the DMA was terminated. | ||
417 | * | ||
418 | * Locks: takes and releases the ISA DMA lock. | ||
419 | */ | ||
420 | |||
421 | static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance) | ||
422 | { | ||
423 | unsigned long flags; | ||
424 | int tmp; | ||
425 | |||
426 | flags = claim_dma_lock(); | ||
427 | clear_dma_ff(instance->dma_channel); | ||
428 | tmp = get_dma_residue(instance->dma_channel); | ||
429 | release_dma_lock(flags); | ||
430 | |||
431 | return tmp; | ||
432 | } | ||
433 | #endif /* defined(i386) || defined(__alpha__) */ | ||
434 | #endif /* defined(REAL_DMA) */ | ||
435 | #endif /* __KERNEL__ */ | 298 | #endif /* __KERNEL__ */ |
436 | #endif /* NCR5380_H */ | 299 | #endif /* NCR5380_H */ |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7dfd0fa27255..6678d1fd897b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -555,8 +555,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) | |||
555 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; | 555 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
556 | 556 | ||
557 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 557 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
558 | if (!cmd_fibcontext) | ||
559 | return -ENOMEM; | ||
560 | 558 | ||
561 | aac_fib_init(cmd_fibcontext); | 559 | aac_fib_init(cmd_fibcontext); |
562 | dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); | 560 | dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); |
@@ -1037,8 +1035,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) | |||
1037 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; | 1035 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
1038 | 1036 | ||
1039 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 1037 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
1040 | if (!cmd_fibcontext) | ||
1041 | return -ENOMEM; | ||
1042 | 1038 | ||
1043 | aac_fib_init(cmd_fibcontext); | 1039 | aac_fib_init(cmd_fibcontext); |
1044 | dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); | 1040 | dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); |
@@ -1950,10 +1946,6 @@ static int aac_read(struct scsi_cmnd * scsicmd) | |||
1950 | * Alocate and initialize a Fib | 1946 | * Alocate and initialize a Fib |
1951 | */ | 1947 | */ |
1952 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 1948 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
1953 | if (!cmd_fibcontext) { | ||
1954 | printk(KERN_WARNING "aac_read: fib allocation failed\n"); | ||
1955 | return -1; | ||
1956 | } | ||
1957 | 1949 | ||
1958 | status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); | 1950 | status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); |
1959 | 1951 | ||
@@ -2048,16 +2040,6 @@ static int aac_write(struct scsi_cmnd * scsicmd) | |||
2048 | * Allocate and initialize a Fib then setup a BlockWrite command | 2040 | * Allocate and initialize a Fib then setup a BlockWrite command |
2049 | */ | 2041 | */ |
2050 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 2042 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
2051 | if (!cmd_fibcontext) { | ||
2052 | /* FIB temporarily unavailable,not catastrophic failure */ | ||
2053 | |||
2054 | /* scsicmd->result = DID_ERROR << 16; | ||
2055 | * scsicmd->scsi_done(scsicmd); | ||
2056 | * return 0; | ||
2057 | */ | ||
2058 | printk(KERN_WARNING "aac_write: fib allocation failed\n"); | ||
2059 | return -1; | ||
2060 | } | ||
2061 | 2043 | ||
2062 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); | 2044 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); |
2063 | 2045 | ||
@@ -2283,8 +2265,6 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) | |||
2283 | * Allocate and initialize a Fib | 2265 | * Allocate and initialize a Fib |
2284 | */ | 2266 | */ |
2285 | cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); | 2267 | cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); |
2286 | if (!cmd_fibcontext) | ||
2287 | return SCSI_MLQUEUE_HOST_BUSY; | ||
2288 | 2268 | ||
2289 | aac_fib_init(cmd_fibcontext); | 2269 | aac_fib_init(cmd_fibcontext); |
2290 | 2270 | ||
@@ -3184,8 +3164,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) | |||
3184 | * Allocate and initialize a Fib then setup a BlockWrite command | 3164 | * Allocate and initialize a Fib then setup a BlockWrite command |
3185 | */ | 3165 | */ |
3186 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 3166 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
3187 | if (!cmd_fibcontext) | ||
3188 | return -1; | ||
3189 | 3167 | ||
3190 | status = aac_adapter_scsi(cmd_fibcontext, scsicmd); | 3168 | status = aac_adapter_scsi(cmd_fibcontext, scsicmd); |
3191 | 3169 | ||
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index efa493cf1bc6..8f90d9e77104 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -29,6 +29,7 @@ enum { | |||
29 | #define AAC_INT_MODE_MSI (1<<1) | 29 | #define AAC_INT_MODE_MSI (1<<1) |
30 | #define AAC_INT_MODE_AIF (1<<2) | 30 | #define AAC_INT_MODE_AIF (1<<2) |
31 | #define AAC_INT_MODE_SYNC (1<<3) | 31 | #define AAC_INT_MODE_SYNC (1<<3) |
32 | #define AAC_INT_MODE_MSIX (1<<16) | ||
32 | 33 | ||
33 | #define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb | 34 | #define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb |
34 | #define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa | 35 | #define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa |
@@ -62,7 +63,7 @@ enum { | |||
62 | #define PMC_GLOBAL_INT_BIT0 0x00000001 | 63 | #define PMC_GLOBAL_INT_BIT0 0x00000001 |
63 | 64 | ||
64 | #ifndef AAC_DRIVER_BUILD | 65 | #ifndef AAC_DRIVER_BUILD |
65 | # define AAC_DRIVER_BUILD 41052 | 66 | # define AAC_DRIVER_BUILD 41066 |
66 | # define AAC_DRIVER_BRANCH "-ms" | 67 | # define AAC_DRIVER_BRANCH "-ms" |
67 | #endif | 68 | #endif |
68 | #define MAXIMUM_NUM_CONTAINERS 32 | 69 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -720,7 +721,7 @@ struct sa_registers { | |||
720 | }; | 721 | }; |
721 | 722 | ||
722 | 723 | ||
723 | #define Sa_MINIPORT_REVISION 1 | 724 | #define SA_INIT_NUM_MSIXVECTORS 1 |
724 | 725 | ||
725 | #define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) | 726 | #define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) |
726 | #define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) | 727 | #define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) |
@@ -2065,6 +2066,10 @@ extern struct aac_common aac_config; | |||
2065 | #define AifEnAddJBOD 30 /* JBOD created */ | 2066 | #define AifEnAddJBOD 30 /* JBOD created */ |
2066 | #define AifEnDeleteJBOD 31 /* JBOD deleted */ | 2067 | #define AifEnDeleteJBOD 31 /* JBOD deleted */ |
2067 | 2068 | ||
2069 | #define AifBuManagerEvent 42 /* Bu management*/ | ||
2070 | #define AifBuCacheDataLoss 10 | ||
2071 | #define AifBuCacheDataRecover 11 | ||
2072 | |||
2068 | #define AifCmdJobProgress 2 /* Progress report */ | 2073 | #define AifCmdJobProgress 2 /* Progress report */ |
2069 | #define AifJobCtrZero 101 /* Array Zero progress */ | 2074 | #define AifJobCtrZero 101 /* Array Zero progress */ |
2070 | #define AifJobStsSuccess 1 /* Job completes */ | 2075 | #define AifJobStsSuccess 1 /* Job completes */ |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 2b4e75380ae6..341ea327ae79 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
40 | #include <linux/delay.h> | ||
40 | #include <linux/completion.h> | 41 | #include <linux/completion.h> |
41 | #include <linux/mm.h> | 42 | #include <linux/mm.h> |
42 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
@@ -47,6 +48,20 @@ struct aac_common aac_config = { | |||
47 | .irq_mod = 1 | 48 | .irq_mod = 1 |
48 | }; | 49 | }; |
49 | 50 | ||
51 | static inline int aac_is_msix_mode(struct aac_dev *dev) | ||
52 | { | ||
53 | u32 status; | ||
54 | |||
55 | status = src_readl(dev, MUnit.OMR); | ||
56 | return (status & AAC_INT_MODE_MSIX); | ||
57 | } | ||
58 | |||
59 | static inline void aac_change_to_intx(struct aac_dev *dev) | ||
60 | { | ||
61 | aac_src_access_devreg(dev, AAC_DISABLE_MSIX); | ||
62 | aac_src_access_devreg(dev, AAC_ENABLE_INTX); | ||
63 | } | ||
64 | |||
50 | static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) | 65 | static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) |
51 | { | 66 | { |
52 | unsigned char *base; | 67 | unsigned char *base; |
@@ -91,7 +106,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
91 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); | 106 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); |
92 | if (dev->max_fib_size != sizeof(struct hw_fib)) | 107 | if (dev->max_fib_size != sizeof(struct hw_fib)) |
93 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); | 108 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); |
94 | init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION); | 109 | init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS); |
95 | init->fsrev = cpu_to_le32(dev->fsrev); | 110 | init->fsrev = cpu_to_le32(dev->fsrev); |
96 | 111 | ||
97 | /* | 112 | /* |
@@ -378,21 +393,8 @@ void aac_define_int_mode(struct aac_dev *dev) | |||
378 | msi_count = i; | 393 | msi_count = i; |
379 | } else { | 394 | } else { |
380 | dev->msi_enabled = 0; | 395 | dev->msi_enabled = 0; |
381 | printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", | 396 | dev_err(&dev->pdev->dev, |
382 | dev->name, dev->id, i); | 397 | "MSIX not supported!! Will try INTX 0x%x.\n", i); |
383 | } | ||
384 | } | ||
385 | |||
386 | if (!dev->msi_enabled) { | ||
387 | msi_count = 1; | ||
388 | i = pci_enable_msi(dev->pdev); | ||
389 | |||
390 | if (!i) { | ||
391 | dev->msi_enabled = 1; | ||
392 | dev->msi = 1; | ||
393 | } else { | ||
394 | printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n", | ||
395 | dev->name, dev->id, i); | ||
396 | } | 398 | } |
397 | } | 399 | } |
398 | 400 | ||
@@ -427,6 +429,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
427 | dev->comm_interface = AAC_COMM_PRODUCER; | 429 | dev->comm_interface = AAC_COMM_PRODUCER; |
428 | dev->raw_io_interface = dev->raw_io_64 = 0; | 430 | dev->raw_io_interface = dev->raw_io_64 = 0; |
429 | 431 | ||
432 | |||
433 | /* | ||
434 | * Enable INTX mode, if not done already Enabled | ||
435 | */ | ||
436 | if (aac_is_msix_mode(dev)) { | ||
437 | aac_change_to_intx(dev); | ||
438 | dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); | ||
439 | } | ||
440 | |||
430 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 441 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
431 | 0, 0, 0, 0, 0, 0, | 442 | 0, 0, 0, 0, 0, 0, |
432 | status+0, status+1, status+2, status+3, NULL)) && | 443 | status+0, status+1, status+2, status+3, NULL)) && |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 511bbc575062..0aeecec1f5ea 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -637,10 +637,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
637 | } | 637 | } |
638 | return -EFAULT; | 638 | return -EFAULT; |
639 | } | 639 | } |
640 | /* We used to udelay() here but that absorbed | 640 | /* |
641 | * a CPU when a timeout occured. Not very | 641 | * Allow other processes / CPUS to use core |
642 | * useful. */ | 642 | */ |
643 | cpu_relax(); | 643 | schedule(); |
644 | } | 644 | } |
645 | } else if (down_interruptible(&fibptr->event_wait)) { | 645 | } else if (down_interruptible(&fibptr->event_wait)) { |
646 | /* Do nothing ... satisfy | 646 | /* Do nothing ... satisfy |
@@ -901,6 +901,31 @@ void aac_printf(struct aac_dev *dev, u32 val) | |||
901 | memset(cp, 0, 256); | 901 | memset(cp, 0, 256); |
902 | } | 902 | } |
903 | 903 | ||
904 | static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) | ||
905 | { | ||
906 | return le32_to_cpu(((__le32 *)aifcmd->data)[index]); | ||
907 | } | ||
908 | |||
909 | |||
910 | static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) | ||
911 | { | ||
912 | switch (aac_aif_data(aifcmd, 1)) { | ||
913 | case AifBuCacheDataLoss: | ||
914 | if (aac_aif_data(aifcmd, 2)) | ||
915 | dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", | ||
916 | aac_aif_data(aifcmd, 2)); | ||
917 | else | ||
918 | dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); | ||
919 | break; | ||
920 | case AifBuCacheDataRecover: | ||
921 | if (aac_aif_data(aifcmd, 2)) | ||
922 | dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", | ||
923 | aac_aif_data(aifcmd, 2)); | ||
924 | else | ||
925 | dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); | ||
926 | break; | ||
927 | } | ||
928 | } | ||
904 | 929 | ||
905 | /** | 930 | /** |
906 | * aac_handle_aif - Handle a message from the firmware | 931 | * aac_handle_aif - Handle a message from the firmware |
@@ -1154,6 +1179,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) | |||
1154 | ADD : DELETE; | 1179 | ADD : DELETE; |
1155 | break; | 1180 | break; |
1156 | } | 1181 | } |
1182 | case AifBuManagerEvent: | ||
1183 | aac_handle_aif_bu(dev, aifcmd); | ||
1157 | break; | 1184 | break; |
1158 | } | 1185 | } |
1159 | 1186 | ||
@@ -1996,6 +2023,10 @@ int aac_command_thread(void *data) | |||
1996 | if (difference <= 0) | 2023 | if (difference <= 0) |
1997 | difference = 1; | 2024 | difference = 1; |
1998 | set_current_state(TASK_INTERRUPTIBLE); | 2025 | set_current_state(TASK_INTERRUPTIBLE); |
2026 | |||
2027 | if (kthread_should_stop()) | ||
2028 | break; | ||
2029 | |||
1999 | schedule_timeout(difference); | 2030 | schedule_timeout(difference); |
2000 | 2031 | ||
2001 | if (kthread_should_stop()) | 2032 | if (kthread_should_stop()) |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index d677b52860ae..7e836205aef1 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -392,9 +392,10 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, | |||
392 | if (likely(fib->callback && fib->callback_data)) { | 392 | if (likely(fib->callback && fib->callback_data)) { |
393 | fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; | 393 | fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; |
394 | fib->callback(fib->callback_data, fib); | 394 | fib->callback(fib->callback_data, fib); |
395 | } else { | 395 | } else |
396 | aac_fib_complete(fib); | 396 | dev_info(&dev->pdev->dev, |
397 | } | 397 | "Invalid callback_fib[%d] (*%p)(%p)\n", |
398 | index, fib->callback, fib->callback_data); | ||
398 | } else { | 399 | } else { |
399 | unsigned long flagv; | 400 | unsigned long flagv; |
400 | dprintk((KERN_INFO "event_wait up\n")); | 401 | dprintk((KERN_INFO "event_wait up\n")); |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index ff6caab8cc8b..a943bd230bc2 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -1299,6 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1299 | else | 1299 | else |
1300 | shost->this_id = shost->max_id; | 1300 | shost->this_id = shost->max_id; |
1301 | 1301 | ||
1302 | aac_intr_normal(aac, 0, 2, 0, NULL); | ||
1303 | |||
1302 | /* | 1304 | /* |
1303 | * dmb - we may need to move the setting of these parms somewhere else once | 1305 | * dmb - we may need to move the setting of these parms somewhere else once |
1304 | * we get a fib that can report the actual numbers | 1306 | * we get a fib that can report the actual numbers |
@@ -1431,8 +1433,8 @@ static int aac_acquire_resources(struct aac_dev *dev) | |||
1431 | /* After EEH recovery or suspend resume, max_msix count | 1433 | /* After EEH recovery or suspend resume, max_msix count |
1432 | * may change, therfore updating in init as well. | 1434 | * may change, therfore updating in init as well. |
1433 | */ | 1435 | */ |
1434 | aac_adapter_start(dev); | ||
1435 | dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); | 1436 | dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); |
1437 | aac_adapter_start(dev); | ||
1436 | } | 1438 | } |
1437 | return 0; | 1439 | return 0; |
1438 | 1440 | ||
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index bc0203f3d243..28f8b8a1b8a4 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c | |||
@@ -135,7 +135,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) | |||
135 | 135 | ||
136 | if (mode & AAC_INT_MODE_AIF) { | 136 | if (mode & AAC_INT_MODE_AIF) { |
137 | /* handle AIF */ | 137 | /* handle AIF */ |
138 | aac_intr_normal(dev, 0, 2, 0, NULL); | 138 | if (dev->aif_thread && dev->fsa_dev) |
139 | aac_intr_normal(dev, 0, 2, 0, NULL); | ||
139 | if (dev->msi_enabled) | 140 | if (dev->msi_enabled) |
140 | aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); | 141 | aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); |
141 | mode = 0; | 142 | mode = 0; |
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index 221f18c5df93..8e9cfe8f22f5 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c | |||
@@ -13,13 +13,14 @@ | |||
13 | 13 | ||
14 | #include <scsi/scsi_host.h> | 14 | #include <scsi/scsi_host.h> |
15 | 15 | ||
16 | #define PSEUDO_DMA | ||
17 | |||
18 | #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) | 16 | #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) |
19 | #define NCR5380_read(reg) cumanascsi_read(instance, reg) | 17 | #define NCR5380_read(reg) cumanascsi_read(instance, reg) |
20 | #define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) | 18 | #define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) |
21 | 19 | ||
22 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) | 20 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) |
21 | #define NCR5380_dma_recv_setup cumanascsi_pread | ||
22 | #define NCR5380_dma_send_setup cumanascsi_pwrite | ||
23 | #define NCR5380_dma_residual(instance) (0) | ||
23 | 24 | ||
24 | #define NCR5380_intr cumanascsi_intr | 25 | #define NCR5380_intr cumanascsi_intr |
25 | #define NCR5380_queue_command cumanascsi_queue_command | 26 | #define NCR5380_queue_command cumanascsi_queue_command |
@@ -41,8 +42,8 @@ void cumanascsi_setup(char *str, int *ints) | |||
41 | #define L(v) (((v)<<16)|((v) & 0x0000ffff)) | 42 | #define L(v) (((v)<<16)|((v) & 0x0000ffff)) |
42 | #define H(v) (((v)>>16)|((v) & 0xffff0000)) | 43 | #define H(v) (((v)>>16)|((v) & 0xffff0000)) |
43 | 44 | ||
44 | static inline int | 45 | static inline int cumanascsi_pwrite(struct Scsi_Host *host, |
45 | NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) | 46 | unsigned char *addr, int len) |
46 | { | 47 | { |
47 | unsigned long *laddr; | 48 | unsigned long *laddr; |
48 | void __iomem *dma = priv(host)->dma + 0x2000; | 49 | void __iomem *dma = priv(host)->dma + 0x2000; |
@@ -101,11 +102,14 @@ NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) | |||
101 | } | 102 | } |
102 | end: | 103 | end: |
103 | writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); | 104 | writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); |
104 | return len; | 105 | |
106 | if (len) | ||
107 | return -1; | ||
108 | return 0; | ||
105 | } | 109 | } |
106 | 110 | ||
107 | static inline int | 111 | static inline int cumanascsi_pread(struct Scsi_Host *host, |
108 | NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) | 112 | unsigned char *addr, int len) |
109 | { | 113 | { |
110 | unsigned long *laddr; | 114 | unsigned long *laddr; |
111 | void __iomem *dma = priv(host)->dma + 0x2000; | 115 | void __iomem *dma = priv(host)->dma + 0x2000; |
@@ -163,7 +167,10 @@ NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) | |||
163 | } | 167 | } |
164 | end: | 168 | end: |
165 | writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); | 169 | writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); |
166 | return len; | 170 | |
171 | if (len) | ||
172 | return -1; | ||
173 | return 0; | ||
167 | } | 174 | } |
168 | 175 | ||
169 | static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) | 176 | static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) |
@@ -239,7 +246,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, | |||
239 | 246 | ||
240 | host->irq = ec->irq; | 247 | host->irq = ec->irq; |
241 | 248 | ||
242 | ret = NCR5380_init(host, 0); | 249 | ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); |
243 | if (ret) | 250 | if (ret) |
244 | goto out_unmap; | 251 | goto out_unmap; |
245 | 252 | ||
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index faa1bee07c8a..edce5f3cfdba 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c | |||
@@ -365,7 +365,7 @@ static struct scsi_host_template cumanascsi2_template = { | |||
365 | .eh_abort_handler = fas216_eh_abort, | 365 | .eh_abort_handler = fas216_eh_abort, |
366 | .can_queue = 1, | 366 | .can_queue = 1, |
367 | .this_id = 7, | 367 | .this_id = 7, |
368 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 368 | .sg_tablesize = SG_MAX_SEGMENTS, |
369 | .dma_boundary = IOMD_DMA_BOUNDARY, | 369 | .dma_boundary = IOMD_DMA_BOUNDARY, |
370 | .use_clustering = DISABLE_CLUSTERING, | 370 | .use_clustering = DISABLE_CLUSTERING, |
371 | .proc_name = "cumanascsi2", | 371 | .proc_name = "cumanascsi2", |
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index a8ad6880dd91..e93e047f4316 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c | |||
@@ -484,7 +484,7 @@ static struct scsi_host_template eesox_template = { | |||
484 | .eh_abort_handler = fas216_eh_abort, | 484 | .eh_abort_handler = fas216_eh_abort, |
485 | .can_queue = 1, | 485 | .can_queue = 1, |
486 | .this_id = 7, | 486 | .this_id = 7, |
487 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 487 | .sg_tablesize = SG_MAX_SEGMENTS, |
488 | .dma_boundary = IOMD_DMA_BOUNDARY, | 488 | .dma_boundary = IOMD_DMA_BOUNDARY, |
489 | .use_clustering = DISABLE_CLUSTERING, | 489 | .use_clustering = DISABLE_CLUSTERING, |
490 | .proc_name = "eesox", | 490 | .proc_name = "eesox", |
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 1fab1d1896b1..a396024a3cae 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c | |||
@@ -14,9 +14,6 @@ | |||
14 | 14 | ||
15 | #include <scsi/scsi_host.h> | 15 | #include <scsi/scsi_host.h> |
16 | 16 | ||
17 | /*#define PSEUDO_DMA*/ | ||
18 | #define DONT_USE_INTR | ||
19 | |||
20 | #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) | 17 | #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) |
21 | 18 | ||
22 | #define NCR5380_read(reg) \ | 19 | #define NCR5380_read(reg) \ |
@@ -24,7 +21,10 @@ | |||
24 | #define NCR5380_write(reg, value) \ | 21 | #define NCR5380_write(reg, value) \ |
25 | writeb(value, priv(instance)->base + ((reg) << 2)) | 22 | writeb(value, priv(instance)->base + ((reg) << 2)) |
26 | 23 | ||
27 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) | 24 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (0) |
25 | #define NCR5380_dma_recv_setup oakscsi_pread | ||
26 | #define NCR5380_dma_send_setup oakscsi_pwrite | ||
27 | #define NCR5380_dma_residual(instance) (0) | ||
28 | 28 | ||
29 | #define NCR5380_queue_command oakscsi_queue_command | 29 | #define NCR5380_queue_command oakscsi_queue_command |
30 | #define NCR5380_info oakscsi_info | 30 | #define NCR5380_info oakscsi_info |
@@ -40,23 +40,23 @@ | |||
40 | #define STAT ((128 + 16) << 2) | 40 | #define STAT ((128 + 16) << 2) |
41 | #define DATA ((128 + 8) << 2) | 41 | #define DATA ((128 + 8) << 2) |
42 | 42 | ||
43 | static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr, | 43 | static inline int oakscsi_pwrite(struct Scsi_Host *instance, |
44 | int len) | 44 | unsigned char *addr, int len) |
45 | { | 45 | { |
46 | void __iomem *base = priv(instance)->base; | 46 | void __iomem *base = priv(instance)->base; |
47 | 47 | ||
48 | printk("writing %p len %d\n",addr, len); | 48 | printk("writing %p len %d\n",addr, len); |
49 | if(!len) return -1; | ||
50 | 49 | ||
51 | while(1) | 50 | while(1) |
52 | { | 51 | { |
53 | int status; | 52 | int status; |
54 | while (((status = readw(base + STAT)) & 0x100)==0); | 53 | while (((status = readw(base + STAT)) & 0x100)==0); |
55 | } | 54 | } |
55 | return 0; | ||
56 | } | 56 | } |
57 | 57 | ||
58 | static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr, | 58 | static inline int oakscsi_pread(struct Scsi_Host *instance, |
59 | int len) | 59 | unsigned char *addr, int len) |
60 | { | 60 | { |
61 | void __iomem *base = priv(instance)->base; | 61 | void __iomem *base = priv(instance)->base; |
62 | printk("reading %p len %d\n", addr, len); | 62 | printk("reading %p len %d\n", addr, len); |
@@ -73,7 +73,7 @@ printk("reading %p len %d\n", addr, len); | |||
73 | if(status & 0x200 || !timeout) | 73 | if(status & 0x200 || !timeout) |
74 | { | 74 | { |
75 | printk("status = %08X\n", status); | 75 | printk("status = %08X\n", status); |
76 | return 1; | 76 | return -1; |
77 | } | 77 | } |
78 | } | 78 | } |
79 | 79 | ||
@@ -143,7 +143,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
143 | host->irq = NO_IRQ; | 143 | host->irq = NO_IRQ; |
144 | host->n_io_port = 255; | 144 | host->n_io_port = 255; |
145 | 145 | ||
146 | ret = NCR5380_init(host, 0); | 146 | ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); |
147 | if (ret) | 147 | if (ret) |
148 | goto out_unmap; | 148 | goto out_unmap; |
149 | 149 | ||
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 5e1b73e1b743..79aa88911b7f 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c | |||
@@ -291,7 +291,7 @@ static struct scsi_host_template powertecscsi_template = { | |||
291 | 291 | ||
292 | .can_queue = 8, | 292 | .can_queue = 8, |
293 | .this_id = 7, | 293 | .this_id = 7, |
294 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 294 | .sg_tablesize = SG_MAX_SEGMENTS, |
295 | .dma_boundary = IOMD_DMA_BOUNDARY, | 295 | .dma_boundary = IOMD_DMA_BOUNDARY, |
296 | .cmd_per_lun = 2, | 296 | .cmd_per_lun = 2, |
297 | .use_clustering = ENABLE_CLUSTERING, | 297 | .use_clustering = ENABLE_CLUSTERING, |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c deleted file mode 100644 index 389825ba5d96..000000000000 --- a/drivers/scsi/atari_NCR5380.c +++ /dev/null | |||
@@ -1,2676 +0,0 @@ | |||
1 | /* | ||
2 | * NCR 5380 generic driver routines. These should make it *trivial* | ||
3 | * to implement 5380 SCSI drivers under Linux with a non-trantor | ||
4 | * architecture. | ||
5 | * | ||
6 | * Note that these routines also work with NR53c400 family chips. | ||
7 | * | ||
8 | * Copyright 1993, Drew Eckhardt | ||
9 | * Visionary Computing | ||
10 | * (Unix and Linux consulting and custom programming) | ||
11 | * drew@colorado.edu | ||
12 | * +1 (303) 666-5836 | ||
13 | * | ||
14 | * For more information, please consult | ||
15 | * | ||
16 | * NCR 5380 Family | ||
17 | * SCSI Protocol Controller | ||
18 | * Databook | ||
19 | * | ||
20 | * NCR Microelectronics | ||
21 | * 1635 Aeroplaza Drive | ||
22 | * Colorado Springs, CO 80916 | ||
23 | * 1+ (719) 578-3400 | ||
24 | * 1+ (800) 334-5454 | ||
25 | */ | ||
26 | |||
27 | /* Ported to Atari by Roman Hodek and others. */ | ||
28 | |||
29 | /* Adapted for the sun3 by Sam Creasey. */ | ||
30 | |||
31 | /* | ||
32 | * Design | ||
33 | * | ||
34 | * This is a generic 5380 driver. To use it on a different platform, | ||
35 | * one simply writes appropriate system specific macros (ie, data | ||
36 | * transfer - some PC's will use the I/O bus, 68K's must use | ||
37 | * memory mapped) and drops this file in their 'C' wrapper. | ||
38 | * | ||
39 | * As far as command queueing, two queues are maintained for | ||
40 | * each 5380 in the system - commands that haven't been issued yet, | ||
41 | * and commands that are currently executing. This means that an | ||
42 | * unlimited number of commands may be queued, letting | ||
43 | * more commands propagate from the higher driver levels giving higher | ||
44 | * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, | ||
45 | * allowing multiple commands to propagate all the way to a SCSI-II device | ||
46 | * while a command is already executing. | ||
47 | * | ||
48 | * | ||
49 | * Issues specific to the NCR5380 : | ||
50 | * | ||
51 | * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead | ||
52 | * piece of hardware that requires you to sit in a loop polling for | ||
53 | * the REQ signal as long as you are connected. Some devices are | ||
54 | * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect | ||
55 | * while doing long seek operations. [...] These | ||
56 | * broken devices are the exception rather than the rule and I'd rather | ||
57 | * spend my time optimizing for the normal case. | ||
58 | * | ||
59 | * Architecture : | ||
60 | * | ||
61 | * At the heart of the design is a coroutine, NCR5380_main, | ||
62 | * which is started from a workqueue for each NCR5380 host in the | ||
63 | * system. It attempts to establish I_T_L or I_T_L_Q nexuses by | ||
64 | * removing the commands from the issue queue and calling | ||
65 | * NCR5380_select() if a nexus is not established. | ||
66 | * | ||
67 | * Once a nexus is established, the NCR5380_information_transfer() | ||
68 | * phase goes through the various phases as instructed by the target. | ||
69 | * if the target goes into MSG IN and sends a DISCONNECT message, | ||
70 | * the command structure is placed into the per instance disconnected | ||
71 | * queue, and NCR5380_main tries to find more work. If the target is | ||
72 | * idle for too long, the system will try to sleep. | ||
73 | * | ||
74 | * If a command has disconnected, eventually an interrupt will trigger, | ||
75 | * calling NCR5380_intr() which will in turn call NCR5380_reselect | ||
76 | * to reestablish a nexus. This will run main if necessary. | ||
77 | * | ||
78 | * On command termination, the done function will be called as | ||
79 | * appropriate. | ||
80 | * | ||
81 | * SCSI pointers are maintained in the SCp field of SCSI command | ||
82 | * structures, being initialized after the command is connected | ||
83 | * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. | ||
84 | * Note that in violation of the standard, an implicit SAVE POINTERS operation | ||
85 | * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. | ||
86 | */ | ||
87 | |||
88 | /* | ||
89 | * Using this file : | ||
90 | * This file a skeleton Linux SCSI driver for the NCR 5380 series | ||
91 | * of chips. To use it, you write an architecture specific functions | ||
92 | * and macros and include this file in your driver. | ||
93 | * | ||
94 | * These macros control options : | ||
95 | * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically | ||
96 | * for commands that return with a CHECK CONDITION status. | ||
97 | * | ||
98 | * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential | ||
99 | * transceivers. | ||
100 | * | ||
101 | * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. | ||
102 | * | ||
103 | * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible | ||
104 | * | ||
105 | * These macros MUST be defined : | ||
106 | * | ||
107 | * NCR5380_read(register) - read from the specified register | ||
108 | * | ||
109 | * NCR5380_write(register, value) - write to the specific register | ||
110 | * | ||
111 | * NCR5380_implementation_fields - additional fields needed for this | ||
112 | * specific implementation of the NCR5380 | ||
113 | * | ||
114 | * Either real DMA *or* pseudo DMA may be implemented | ||
115 | * REAL functions : | ||
116 | * NCR5380_REAL_DMA should be defined if real DMA is to be used. | ||
117 | * Note that the DMA setup functions should return the number of bytes | ||
118 | * that they were able to program the controller for. | ||
119 | * | ||
120 | * Also note that generic i386/PC versions of these macros are | ||
121 | * available as NCR5380_i386_dma_write_setup, | ||
122 | * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. | ||
123 | * | ||
124 | * NCR5380_dma_write_setup(instance, src, count) - initialize | ||
125 | * NCR5380_dma_read_setup(instance, dst, count) - initialize | ||
126 | * NCR5380_dma_residual(instance); - residual count | ||
127 | * | ||
128 | * PSEUDO functions : | ||
129 | * NCR5380_pwrite(instance, src, count) | ||
130 | * NCR5380_pread(instance, dst, count); | ||
131 | * | ||
132 | * The generic driver is initialized by calling NCR5380_init(instance), | ||
133 | * after setting the appropriate host specific fields and ID. If the | ||
134 | * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, | ||
135 | * possible) function may be used. | ||
136 | */ | ||
137 | |||
138 | static int do_abort(struct Scsi_Host *); | ||
139 | static void do_reset(struct Scsi_Host *); | ||
140 | |||
141 | #ifdef SUPPORT_TAGS | ||
142 | |||
143 | /* | ||
144 | * Functions for handling tagged queuing | ||
145 | * ===================================== | ||
146 | * | ||
147 | * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: | ||
148 | * | ||
149 | * Using consecutive numbers for the tags is no good idea in my eyes. There | ||
150 | * could be wrong re-usings if the counter (8 bit!) wraps and some early | ||
151 | * command has been preempted for a long time. My solution: a bitfield for | ||
152 | * remembering used tags. | ||
153 | * | ||
154 | * There's also the problem that each target has a certain queue size, but we | ||
155 | * cannot know it in advance :-( We just see a QUEUE_FULL status being | ||
156 | * returned. So, in this case, the driver internal queue size assumption is | ||
157 | * reduced to the number of active tags if QUEUE_FULL is returned by the | ||
158 | * target. | ||
159 | * | ||
160 | * We're also not allowed running tagged commands as long as an untagged | ||
161 | * command is active. And REQUEST SENSE commands after a contingent allegiance | ||
162 | * condition _must_ be untagged. To keep track whether an untagged command has | ||
163 | * been issued, the host->busy array is still employed, as it is without | ||
164 | * support for tagged queuing. | ||
165 | * | ||
166 | * One could suspect that there are possible race conditions between | ||
167 | * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the | ||
168 | * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), | ||
169 | * which already guaranteed to be running at most once. It is also the only | ||
170 | * place where tags/LUNs are allocated. So no other allocation can slip | ||
171 | * between that pair, there could only happen a reselection, which can free a | ||
172 | * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes | ||
173 | * important: the tag bit must be cleared before 'nr_allocated' is decreased. | ||
174 | */ | ||
175 | |||
176 | static void __init init_tags(struct NCR5380_hostdata *hostdata) | ||
177 | { | ||
178 | int target, lun; | ||
179 | struct tag_alloc *ta; | ||
180 | |||
181 | if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) | ||
182 | return; | ||
183 | |||
184 | for (target = 0; target < 8; ++target) { | ||
185 | for (lun = 0; lun < 8; ++lun) { | ||
186 | ta = &hostdata->TagAlloc[target][lun]; | ||
187 | bitmap_zero(ta->allocated, MAX_TAGS); | ||
188 | ta->nr_allocated = 0; | ||
189 | /* At the beginning, assume the maximum queue size we could | ||
190 | * support (MAX_TAGS). This value will be decreased if the target | ||
191 | * returns QUEUE_FULL status. | ||
192 | */ | ||
193 | ta->queue_size = MAX_TAGS; | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | |||
198 | |||
199 | /* Check if we can issue a command to this LUN: First see if the LUN is marked | ||
200 | * busy by an untagged command. If the command should use tagged queuing, also | ||
201 | * check that there is a free tag and the target's queue won't overflow. This | ||
202 | * function should be called with interrupts disabled to avoid race | ||
203 | * conditions. | ||
204 | */ | ||
205 | |||
206 | static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) | ||
207 | { | ||
208 | u8 lun = cmd->device->lun; | ||
209 | struct Scsi_Host *instance = cmd->device->host; | ||
210 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
211 | |||
212 | if (hostdata->busy[cmd->device->id] & (1 << lun)) | ||
213 | return 1; | ||
214 | if (!should_be_tagged || | ||
215 | !(hostdata->flags & FLAG_TAGGED_QUEUING) || | ||
216 | !cmd->device->tagged_supported) | ||
217 | return 0; | ||
218 | if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= | ||
219 | hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { | ||
220 | dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n", | ||
221 | scmd_id(cmd), lun); | ||
222 | return 1; | ||
223 | } | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | |||
228 | /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() | ||
229 | * must be called before!), or reserve the LUN in 'busy' if the command is | ||
230 | * untagged. | ||
231 | */ | ||
232 | |||
233 | static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | ||
234 | { | ||
235 | u8 lun = cmd->device->lun; | ||
236 | struct Scsi_Host *instance = cmd->device->host; | ||
237 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
238 | |||
239 | /* If we or the target don't support tagged queuing, allocate the LUN for | ||
240 | * an untagged command. | ||
241 | */ | ||
242 | if (!should_be_tagged || | ||
243 | !(hostdata->flags & FLAG_TAGGED_QUEUING) || | ||
244 | !cmd->device->tagged_supported) { | ||
245 | cmd->tag = TAG_NONE; | ||
246 | hostdata->busy[cmd->device->id] |= (1 << lun); | ||
247 | dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n", | ||
248 | scmd_id(cmd), lun); | ||
249 | } else { | ||
250 | struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; | ||
251 | |||
252 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); | ||
253 | set_bit(cmd->tag, ta->allocated); | ||
254 | ta->nr_allocated++; | ||
255 | dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n", | ||
256 | cmd->tag, scmd_id(cmd), lun, ta->nr_allocated); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | |||
261 | /* Mark the tag of command 'cmd' as free, or in case of an untagged command, | ||
262 | * unlock the LUN. | ||
263 | */ | ||
264 | |||
265 | static void cmd_free_tag(struct scsi_cmnd *cmd) | ||
266 | { | ||
267 | u8 lun = cmd->device->lun; | ||
268 | struct Scsi_Host *instance = cmd->device->host; | ||
269 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
270 | |||
271 | if (cmd->tag == TAG_NONE) { | ||
272 | hostdata->busy[cmd->device->id] &= ~(1 << lun); | ||
273 | dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n", | ||
274 | scmd_id(cmd), lun); | ||
275 | } else if (cmd->tag >= MAX_TAGS) { | ||
276 | shost_printk(KERN_NOTICE, instance, | ||
277 | "trying to free bad tag %d!\n", cmd->tag); | ||
278 | } else { | ||
279 | struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; | ||
280 | clear_bit(cmd->tag, ta->allocated); | ||
281 | ta->nr_allocated--; | ||
282 | dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n", | ||
283 | cmd->tag, scmd_id(cmd), lun); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | |||
288 | static void free_all_tags(struct NCR5380_hostdata *hostdata) | ||
289 | { | ||
290 | int target, lun; | ||
291 | struct tag_alloc *ta; | ||
292 | |||
293 | if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) | ||
294 | return; | ||
295 | |||
296 | for (target = 0; target < 8; ++target) { | ||
297 | for (lun = 0; lun < 8; ++lun) { | ||
298 | ta = &hostdata->TagAlloc[target][lun]; | ||
299 | bitmap_zero(ta->allocated, MAX_TAGS); | ||
300 | ta->nr_allocated = 0; | ||
301 | } | ||
302 | } | ||
303 | } | ||
304 | |||
305 | #endif /* SUPPORT_TAGS */ | ||
306 | |||
307 | /** | ||
308 | * merge_contiguous_buffers - coalesce scatter-gather list entries | ||
309 | * @cmd: command requesting IO | ||
310 | * | ||
311 | * Try to merge several scatter-gather buffers into one DMA transfer. | ||
312 | * This is possible if the scatter buffers lie on physically | ||
313 | * contiguous addresses. The first scatter-gather buffer's data are | ||
314 | * assumed to be already transferred into cmd->SCp.this_residual. | ||
315 | * Every buffer merged avoids an interrupt and a DMA setup operation. | ||
316 | */ | ||
317 | |||
318 | static void merge_contiguous_buffers(struct scsi_cmnd *cmd) | ||
319 | { | ||
320 | #if !defined(CONFIG_SUN3) | ||
321 | unsigned long endaddr; | ||
322 | #if (NDEBUG & NDEBUG_MERGING) | ||
323 | unsigned long oldlen = cmd->SCp.this_residual; | ||
324 | int cnt = 1; | ||
325 | #endif | ||
326 | |||
327 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; | ||
328 | cmd->SCp.buffers_residual && | ||
329 | virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { | ||
330 | dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", | ||
331 | page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); | ||
332 | #if (NDEBUG & NDEBUG_MERGING) | ||
333 | ++cnt; | ||
334 | #endif | ||
335 | ++cmd->SCp.buffer; | ||
336 | --cmd->SCp.buffers_residual; | ||
337 | cmd->SCp.this_residual += cmd->SCp.buffer->length; | ||
338 | endaddr += cmd->SCp.buffer->length; | ||
339 | } | ||
340 | #if (NDEBUG & NDEBUG_MERGING) | ||
341 | if (oldlen != cmd->SCp.this_residual) | ||
342 | dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", | ||
343 | cnt, cmd->SCp.ptr, cmd->SCp.this_residual); | ||
344 | #endif | ||
345 | #endif /* !defined(CONFIG_SUN3) */ | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * initialize_SCp - init the scsi pointer field | ||
350 | * @cmd: command block to set up | ||
351 | * | ||
352 | * Set up the internal fields in the SCSI command. | ||
353 | */ | ||
354 | |||
355 | static inline void initialize_SCp(struct scsi_cmnd *cmd) | ||
356 | { | ||
357 | /* | ||
358 | * Initialize the Scsi Pointer field so that all of the commands in the | ||
359 | * various queues are valid. | ||
360 | */ | ||
361 | |||
362 | if (scsi_bufflen(cmd)) { | ||
363 | cmd->SCp.buffer = scsi_sglist(cmd); | ||
364 | cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; | ||
365 | cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); | ||
366 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | ||
367 | |||
368 | merge_contiguous_buffers(cmd); | ||
369 | } else { | ||
370 | cmd->SCp.buffer = NULL; | ||
371 | cmd->SCp.buffers_residual = 0; | ||
372 | cmd->SCp.ptr = NULL; | ||
373 | cmd->SCp.this_residual = 0; | ||
374 | } | ||
375 | |||
376 | cmd->SCp.Status = 0; | ||
377 | cmd->SCp.Message = 0; | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * NCR5380_poll_politely2 - wait for two chip register values | ||
382 | * @instance: controller to poll | ||
383 | * @reg1: 5380 register to poll | ||
384 | * @bit1: Bitmask to check | ||
385 | * @val1: Expected value | ||
386 | * @reg2: Second 5380 register to poll | ||
387 | * @bit2: Second bitmask to check | ||
388 | * @val2: Second expected value | ||
389 | * @wait: Time-out in jiffies | ||
390 | * | ||
391 | * Polls the chip in a reasonably efficient manner waiting for an | ||
392 | * event to occur. After a short quick poll we begin to yield the CPU | ||
393 | * (if possible). In irq contexts the time-out is arbitrarily limited. | ||
394 | * Callers may hold locks as long as they are held in irq mode. | ||
395 | * | ||
396 | * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. | ||
397 | */ | ||
398 | |||
399 | static int NCR5380_poll_politely2(struct Scsi_Host *instance, | ||
400 | int reg1, int bit1, int val1, | ||
401 | int reg2, int bit2, int val2, int wait) | ||
402 | { | ||
403 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
404 | unsigned long deadline = jiffies + wait; | ||
405 | unsigned long n; | ||
406 | |||
407 | /* Busy-wait for up to 10 ms */ | ||
408 | n = min(10000U, jiffies_to_usecs(wait)); | ||
409 | n *= hostdata->accesses_per_ms; | ||
410 | n /= 2000; | ||
411 | do { | ||
412 | if ((NCR5380_read(reg1) & bit1) == val1) | ||
413 | return 0; | ||
414 | if ((NCR5380_read(reg2) & bit2) == val2) | ||
415 | return 0; | ||
416 | cpu_relax(); | ||
417 | } while (n--); | ||
418 | |||
419 | if (irqs_disabled() || in_interrupt()) | ||
420 | return -ETIMEDOUT; | ||
421 | |||
422 | /* Repeatedly sleep for 1 ms until deadline */ | ||
423 | while (time_is_after_jiffies(deadline)) { | ||
424 | schedule_timeout_uninterruptible(1); | ||
425 | if ((NCR5380_read(reg1) & bit1) == val1) | ||
426 | return 0; | ||
427 | if ((NCR5380_read(reg2) & bit2) == val2) | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | return -ETIMEDOUT; | ||
432 | } | ||
433 | |||
434 | static inline int NCR5380_poll_politely(struct Scsi_Host *instance, | ||
435 | int reg, int bit, int val, int wait) | ||
436 | { | ||
437 | return NCR5380_poll_politely2(instance, reg, bit, val, | ||
438 | reg, bit, val, wait); | ||
439 | } | ||
440 | |||
441 | #if NDEBUG | ||
442 | static struct { | ||
443 | unsigned char mask; | ||
444 | const char *name; | ||
445 | } signals[] = { | ||
446 | {SR_DBP, "PARITY"}, | ||
447 | {SR_RST, "RST"}, | ||
448 | {SR_BSY, "BSY"}, | ||
449 | {SR_REQ, "REQ"}, | ||
450 | {SR_MSG, "MSG"}, | ||
451 | {SR_CD, "CD"}, | ||
452 | {SR_IO, "IO"}, | ||
453 | {SR_SEL, "SEL"}, | ||
454 | {0, NULL} | ||
455 | }, | ||
456 | basrs[] = { | ||
457 | {BASR_ATN, "ATN"}, | ||
458 | {BASR_ACK, "ACK"}, | ||
459 | {0, NULL} | ||
460 | }, | ||
461 | icrs[] = { | ||
462 | {ICR_ASSERT_RST, "ASSERT RST"}, | ||
463 | {ICR_ASSERT_ACK, "ASSERT ACK"}, | ||
464 | {ICR_ASSERT_BSY, "ASSERT BSY"}, | ||
465 | {ICR_ASSERT_SEL, "ASSERT SEL"}, | ||
466 | {ICR_ASSERT_ATN, "ASSERT ATN"}, | ||
467 | {ICR_ASSERT_DATA, "ASSERT DATA"}, | ||
468 | {0, NULL} | ||
469 | }, | ||
470 | mrs[] = { | ||
471 | {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, | ||
472 | {MR_TARGET, "MODE TARGET"}, | ||
473 | {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, | ||
474 | {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, | ||
475 | {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, | ||
476 | {MR_MONITOR_BSY, "MODE MONITOR BSY"}, | ||
477 | {MR_DMA_MODE, "MODE DMA"}, | ||
478 | {MR_ARBITRATE, "MODE ARBITRATION"}, | ||
479 | {0, NULL} | ||
480 | }; | ||
481 | |||
482 | /** | ||
483 | * NCR5380_print - print scsi bus signals | ||
484 | * @instance: adapter state to dump | ||
485 | * | ||
486 | * Print the SCSI bus signals for debugging purposes | ||
487 | */ | ||
488 | |||
489 | static void NCR5380_print(struct Scsi_Host *instance) | ||
490 | { | ||
491 | unsigned char status, data, basr, mr, icr, i; | ||
492 | |||
493 | data = NCR5380_read(CURRENT_SCSI_DATA_REG); | ||
494 | status = NCR5380_read(STATUS_REG); | ||
495 | mr = NCR5380_read(MODE_REG); | ||
496 | icr = NCR5380_read(INITIATOR_COMMAND_REG); | ||
497 | basr = NCR5380_read(BUS_AND_STATUS_REG); | ||
498 | |||
499 | printk("STATUS_REG: %02x ", status); | ||
500 | for (i = 0; signals[i].mask; ++i) | ||
501 | if (status & signals[i].mask) | ||
502 | printk(",%s", signals[i].name); | ||
503 | printk("\nBASR: %02x ", basr); | ||
504 | for (i = 0; basrs[i].mask; ++i) | ||
505 | if (basr & basrs[i].mask) | ||
506 | printk(",%s", basrs[i].name); | ||
507 | printk("\nICR: %02x ", icr); | ||
508 | for (i = 0; icrs[i].mask; ++i) | ||
509 | if (icr & icrs[i].mask) | ||
510 | printk(",%s", icrs[i].name); | ||
511 | printk("\nMODE: %02x ", mr); | ||
512 | for (i = 0; mrs[i].mask; ++i) | ||
513 | if (mr & mrs[i].mask) | ||
514 | printk(",%s", mrs[i].name); | ||
515 | printk("\n"); | ||
516 | } | ||
517 | |||
518 | static struct { | ||
519 | unsigned char value; | ||
520 | const char *name; | ||
521 | } phases[] = { | ||
522 | {PHASE_DATAOUT, "DATAOUT"}, | ||
523 | {PHASE_DATAIN, "DATAIN"}, | ||
524 | {PHASE_CMDOUT, "CMDOUT"}, | ||
525 | {PHASE_STATIN, "STATIN"}, | ||
526 | {PHASE_MSGOUT, "MSGOUT"}, | ||
527 | {PHASE_MSGIN, "MSGIN"}, | ||
528 | {PHASE_UNKNOWN, "UNKNOWN"} | ||
529 | }; | ||
530 | |||
531 | /** | ||
532 | * NCR5380_print_phase - show SCSI phase | ||
533 | * @instance: adapter to dump | ||
534 | * | ||
535 | * Print the current SCSI phase for debugging purposes | ||
536 | */ | ||
537 | |||
538 | static void NCR5380_print_phase(struct Scsi_Host *instance) | ||
539 | { | ||
540 | unsigned char status; | ||
541 | int i; | ||
542 | |||
543 | status = NCR5380_read(STATUS_REG); | ||
544 | if (!(status & SR_REQ)) | ||
545 | shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); | ||
546 | else { | ||
547 | for (i = 0; (phases[i].value != PHASE_UNKNOWN) && | ||
548 | (phases[i].value != (status & PHASE_MASK)); ++i) | ||
549 | ; | ||
550 | shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); | ||
551 | } | ||
552 | } | ||
553 | #endif | ||
554 | |||
555 | /** | ||
556 | * NCR58380_info - report driver and host information | ||
557 | * @instance: relevant scsi host instance | ||
558 | * | ||
559 | * For use as the host template info() handler. | ||
560 | */ | ||
561 | |||
562 | static const char *NCR5380_info(struct Scsi_Host *instance) | ||
563 | { | ||
564 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
565 | |||
566 | return hostdata->info; | ||
567 | } | ||
568 | |||
569 | static void prepare_info(struct Scsi_Host *instance) | ||
570 | { | ||
571 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
572 | |||
573 | snprintf(hostdata->info, sizeof(hostdata->info), | ||
574 | "%s, io_port 0x%lx, n_io_port %d, " | ||
575 | "base 0x%lx, irq %d, " | ||
576 | "can_queue %d, cmd_per_lun %d, " | ||
577 | "sg_tablesize %d, this_id %d, " | ||
578 | "flags { %s%s}, " | ||
579 | "options { %s} ", | ||
580 | instance->hostt->name, instance->io_port, instance->n_io_port, | ||
581 | instance->base, instance->irq, | ||
582 | instance->can_queue, instance->cmd_per_lun, | ||
583 | instance->sg_tablesize, instance->this_id, | ||
584 | hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", | ||
585 | hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", | ||
586 | #ifdef DIFFERENTIAL | ||
587 | "DIFFERENTIAL " | ||
588 | #endif | ||
589 | #ifdef REAL_DMA | ||
590 | "REAL_DMA " | ||
591 | #endif | ||
592 | #ifdef PARITY | ||
593 | "PARITY " | ||
594 | #endif | ||
595 | #ifdef SUPPORT_TAGS | ||
596 | "SUPPORT_TAGS " | ||
597 | #endif | ||
598 | ""); | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * NCR5380_init - initialise an NCR5380 | ||
603 | * @instance: adapter to configure | ||
604 | * @flags: control flags | ||
605 | * | ||
606 | * Initializes *instance and corresponding 5380 chip, | ||
607 | * with flags OR'd into the initial flags value. | ||
608 | * | ||
609 | * Notes : I assume that the host, hostno, and id bits have been | ||
610 | * set correctly. I don't care about the irq and other fields. | ||
611 | * | ||
612 | * Returns 0 for success | ||
613 | */ | ||
614 | |||
615 | static int __init NCR5380_init(struct Scsi_Host *instance, int flags) | ||
616 | { | ||
617 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
618 | int i; | ||
619 | unsigned long deadline; | ||
620 | |||
621 | hostdata->host = instance; | ||
622 | hostdata->id_mask = 1 << instance->this_id; | ||
623 | hostdata->id_higher_mask = 0; | ||
624 | for (i = hostdata->id_mask; i <= 0x80; i <<= 1) | ||
625 | if (i > hostdata->id_mask) | ||
626 | hostdata->id_higher_mask |= i; | ||
627 | for (i = 0; i < 8; ++i) | ||
628 | hostdata->busy[i] = 0; | ||
629 | #ifdef SUPPORT_TAGS | ||
630 | init_tags(hostdata); | ||
631 | #endif | ||
632 | #if defined (REAL_DMA) | ||
633 | hostdata->dma_len = 0; | ||
634 | #endif | ||
635 | spin_lock_init(&hostdata->lock); | ||
636 | hostdata->connected = NULL; | ||
637 | hostdata->sensing = NULL; | ||
638 | INIT_LIST_HEAD(&hostdata->autosense); | ||
639 | INIT_LIST_HEAD(&hostdata->unissued); | ||
640 | INIT_LIST_HEAD(&hostdata->disconnected); | ||
641 | |||
642 | hostdata->flags = flags; | ||
643 | |||
644 | INIT_WORK(&hostdata->main_task, NCR5380_main); | ||
645 | hostdata->work_q = alloc_workqueue("ncr5380_%d", | ||
646 | WQ_UNBOUND | WQ_MEM_RECLAIM, | ||
647 | 1, instance->host_no); | ||
648 | if (!hostdata->work_q) | ||
649 | return -ENOMEM; | ||
650 | |||
651 | prepare_info(instance); | ||
652 | |||
653 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
654 | NCR5380_write(MODE_REG, MR_BASE); | ||
655 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
656 | NCR5380_write(SELECT_ENABLE_REG, 0); | ||
657 | |||
658 | /* Calibrate register polling loop */ | ||
659 | i = 0; | ||
660 | deadline = jiffies + 1; | ||
661 | do { | ||
662 | cpu_relax(); | ||
663 | } while (time_is_after_jiffies(deadline)); | ||
664 | deadline += msecs_to_jiffies(256); | ||
665 | do { | ||
666 | NCR5380_read(STATUS_REG); | ||
667 | ++i; | ||
668 | cpu_relax(); | ||
669 | } while (time_is_after_jiffies(deadline)); | ||
670 | hostdata->accesses_per_ms = i / 256; | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. | ||
677 | * @instance: adapter to check | ||
678 | * | ||
679 | * If the system crashed, it may have crashed with a connected target and | ||
680 | * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the | ||
681 | * currently established nexus, which we know nothing about. Failing that | ||
682 | * do a bus reset. | ||
683 | * | ||
684 | * Note that a bus reset will cause the chip to assert IRQ. | ||
685 | * | ||
686 | * Returns 0 if successful, otherwise -ENXIO. | ||
687 | */ | ||
688 | |||
689 | static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) | ||
690 | { | ||
691 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
692 | int pass; | ||
693 | |||
694 | for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { | ||
695 | switch (pass) { | ||
696 | case 1: | ||
697 | case 3: | ||
698 | case 5: | ||
699 | shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); | ||
700 | NCR5380_poll_politely(instance, | ||
701 | STATUS_REG, SR_BSY, 0, 5 * HZ); | ||
702 | break; | ||
703 | case 2: | ||
704 | shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); | ||
705 | do_abort(instance); | ||
706 | break; | ||
707 | case 4: | ||
708 | shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); | ||
709 | do_reset(instance); | ||
710 | /* Wait after a reset; the SCSI standard calls for | ||
711 | * 250ms, we wait 500ms to be on the safe side. | ||
712 | * But some Toshiba CD-ROMs need ten times that. | ||
713 | */ | ||
714 | if (hostdata->flags & FLAG_TOSHIBA_DELAY) | ||
715 | msleep(2500); | ||
716 | else | ||
717 | msleep(500); | ||
718 | break; | ||
719 | case 6: | ||
720 | shost_printk(KERN_ERR, instance, "bus locked solid\n"); | ||
721 | return -ENXIO; | ||
722 | } | ||
723 | } | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * NCR5380_exit - remove an NCR5380 | ||
729 | * @instance: adapter to remove | ||
730 | * | ||
731 | * Assumes that no more work can be queued (e.g. by NCR5380_intr). | ||
732 | */ | ||
733 | |||
734 | static void NCR5380_exit(struct Scsi_Host *instance) | ||
735 | { | ||
736 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
737 | |||
738 | cancel_work_sync(&hostdata->main_task); | ||
739 | destroy_workqueue(hostdata->work_q); | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * complete_cmd - finish processing a command and return it to the SCSI ML | ||
744 | * @instance: the host instance | ||
745 | * @cmd: command to complete | ||
746 | */ | ||
747 | |||
748 | static void complete_cmd(struct Scsi_Host *instance, | ||
749 | struct scsi_cmnd *cmd) | ||
750 | { | ||
751 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
752 | |||
753 | dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); | ||
754 | |||
755 | if (hostdata->sensing == cmd) { | ||
756 | /* Autosense processing ends here */ | ||
757 | if ((cmd->result & 0xff) != SAM_STAT_GOOD) { | ||
758 | scsi_eh_restore_cmnd(cmd, &hostdata->ses); | ||
759 | set_host_byte(cmd, DID_ERROR); | ||
760 | } else | ||
761 | scsi_eh_restore_cmnd(cmd, &hostdata->ses); | ||
762 | hostdata->sensing = NULL; | ||
763 | } | ||
764 | |||
765 | #ifdef SUPPORT_TAGS | ||
766 | cmd_free_tag(cmd); | ||
767 | #else | ||
768 | hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); | ||
769 | #endif | ||
770 | cmd->scsi_done(cmd); | ||
771 | } | ||
772 | |||
773 | /** | ||
774 | * NCR5380_queue_command - queue a command | ||
775 | * @instance: the relevant SCSI adapter | ||
776 | * @cmd: SCSI command | ||
777 | * | ||
778 | * cmd is added to the per-instance issue queue, with minor | ||
779 | * twiddling done to the host specific fields of cmd. If the | ||
780 | * main coroutine is not running, it is restarted. | ||
781 | */ | ||
782 | |||
783 | static int NCR5380_queue_command(struct Scsi_Host *instance, | ||
784 | struct scsi_cmnd *cmd) | ||
785 | { | ||
786 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
787 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); | ||
788 | unsigned long flags; | ||
789 | |||
790 | #if (NDEBUG & NDEBUG_NO_WRITE) | ||
791 | switch (cmd->cmnd[0]) { | ||
792 | case WRITE_6: | ||
793 | case WRITE_10: | ||
794 | shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); | ||
795 | cmd->result = (DID_ERROR << 16); | ||
796 | cmd->scsi_done(cmd); | ||
797 | return 0; | ||
798 | } | ||
799 | #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ | ||
800 | |||
801 | cmd->result = 0; | ||
802 | |||
803 | /* | ||
804 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, | ||
805 | * because also a timer int can trigger an abort or reset, which would | ||
806 | * alter queues and touch the lock. | ||
807 | */ | ||
808 | if (!NCR5380_acquire_dma_irq(instance)) | ||
809 | return SCSI_MLQUEUE_HOST_BUSY; | ||
810 | |||
811 | spin_lock_irqsave(&hostdata->lock, flags); | ||
812 | |||
813 | /* | ||
814 | * Insert the cmd into the issue queue. Note that REQUEST SENSE | ||
815 | * commands are added to the head of the queue since any command will | ||
816 | * clear the contingent allegiance condition that exists and the | ||
817 | * sense data is only guaranteed to be valid while the condition exists. | ||
818 | */ | ||
819 | |||
820 | if (cmd->cmnd[0] == REQUEST_SENSE) | ||
821 | list_add(&ncmd->list, &hostdata->unissued); | ||
822 | else | ||
823 | list_add_tail(&ncmd->list, &hostdata->unissued); | ||
824 | |||
825 | spin_unlock_irqrestore(&hostdata->lock, flags); | ||
826 | |||
827 | dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", | ||
828 | cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); | ||
829 | |||
830 | /* Kick off command processing */ | ||
831 | queue_work(hostdata->work_q, &hostdata->main_task); | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | static inline void maybe_release_dma_irq(struct Scsi_Host *instance) | ||
836 | { | ||
837 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
838 | |||
839 | /* Caller does the locking needed to set & test these data atomically */ | ||
840 | if (list_empty(&hostdata->disconnected) && | ||
841 | list_empty(&hostdata->unissued) && | ||
842 | list_empty(&hostdata->autosense) && | ||
843 | !hostdata->connected && | ||
844 | !hostdata->selecting) | ||
845 | NCR5380_release_dma_irq(instance); | ||
846 | } | ||
847 | |||
848 | /** | ||
849 | * dequeue_next_cmd - dequeue a command for processing | ||
850 | * @instance: the scsi host instance | ||
851 | * | ||
852 | * Priority is given to commands on the autosense queue. These commands | ||
853 | * need autosense because of a CHECK CONDITION result. | ||
854 | * | ||
855 | * Returns a command pointer if a command is found for a target that is | ||
856 | * not already busy. Otherwise returns NULL. | ||
857 | */ | ||
858 | |||
859 | static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) | ||
860 | { | ||
861 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
862 | struct NCR5380_cmd *ncmd; | ||
863 | struct scsi_cmnd *cmd; | ||
864 | |||
865 | if (hostdata->sensing || list_empty(&hostdata->autosense)) { | ||
866 | list_for_each_entry(ncmd, &hostdata->unissued, list) { | ||
867 | cmd = NCR5380_to_scmd(ncmd); | ||
868 | dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", | ||
869 | cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); | ||
870 | |||
871 | if ( | ||
872 | #ifdef SUPPORT_TAGS | ||
873 | !is_lun_busy(cmd, 1) | ||
874 | #else | ||
875 | !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun)) | ||
876 | #endif | ||
877 | ) { | ||
878 | list_del(&ncmd->list); | ||
879 | dsprintk(NDEBUG_QUEUES, instance, | ||
880 | "dequeue: removed %p from issue queue\n", cmd); | ||
881 | return cmd; | ||
882 | } | ||
883 | } | ||
884 | } else { | ||
885 | /* Autosense processing begins here */ | ||
886 | ncmd = list_first_entry(&hostdata->autosense, | ||
887 | struct NCR5380_cmd, list); | ||
888 | list_del(&ncmd->list); | ||
889 | cmd = NCR5380_to_scmd(ncmd); | ||
890 | dsprintk(NDEBUG_QUEUES, instance, | ||
891 | "dequeue: removed %p from autosense queue\n", cmd); | ||
892 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); | ||
893 | hostdata->sensing = cmd; | ||
894 | return cmd; | ||
895 | } | ||
896 | return NULL; | ||
897 | } | ||
898 | |||
899 | static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) | ||
900 | { | ||
901 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
902 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); | ||
903 | |||
904 | if (hostdata->sensing == cmd) { | ||
905 | scsi_eh_restore_cmnd(cmd, &hostdata->ses); | ||
906 | list_add(&ncmd->list, &hostdata->autosense); | ||
907 | hostdata->sensing = NULL; | ||
908 | } else | ||
909 | list_add(&ncmd->list, &hostdata->unissued); | ||
910 | } | ||
911 | |||
912 | /** | ||
913 | * NCR5380_main - NCR state machines | ||
914 | * | ||
915 | * NCR5380_main is a coroutine that runs as long as more work can | ||
916 | * be done on the NCR5380 host adapters in a system. Both | ||
917 | * NCR5380_queue_command() and NCR5380_intr() will try to start it | ||
918 | * in case it is not running. | ||
919 | */ | ||
920 | |||
921 | static void NCR5380_main(struct work_struct *work) | ||
922 | { | ||
923 | struct NCR5380_hostdata *hostdata = | ||
924 | container_of(work, struct NCR5380_hostdata, main_task); | ||
925 | struct Scsi_Host *instance = hostdata->host; | ||
926 | int done; | ||
927 | |||
928 | /* | ||
929 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, | ||
930 | * because also a timer int can trigger an abort or reset, which can | ||
931 | * alter queues and touch the Falcon lock. | ||
932 | */ | ||
933 | |||
934 | do { | ||
935 | done = 1; | ||
936 | |||
937 | spin_lock_irq(&hostdata->lock); | ||
938 | while (!hostdata->connected && !hostdata->selecting) { | ||
939 | struct scsi_cmnd *cmd = dequeue_next_cmd(instance); | ||
940 | |||
941 | if (!cmd) | ||
942 | break; | ||
943 | |||
944 | dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); | ||
945 | |||
946 | /* | ||
947 | * Attempt to establish an I_T_L nexus here. | ||
948 | * On success, instance->hostdata->connected is set. | ||
949 | * On failure, we must add the command back to the | ||
950 | * issue queue so we can keep trying. | ||
951 | */ | ||
952 | /* | ||
953 | * REQUEST SENSE commands are issued without tagged | ||
954 | * queueing, even on SCSI-II devices because the | ||
955 | * contingent allegiance condition exists for the | ||
956 | * entire unit. | ||
957 | */ | ||
958 | /* ++roman: ...and the standard also requires that | ||
959 | * REQUEST SENSE command are untagged. | ||
960 | */ | ||
961 | |||
962 | #ifdef SUPPORT_TAGS | ||
963 | cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE); | ||
964 | #endif | ||
965 | if (!NCR5380_select(instance, cmd)) { | ||
966 | dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); | ||
967 | maybe_release_dma_irq(instance); | ||
968 | } else { | ||
969 | dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, | ||
970 | "main: select failed, returning %p to queue\n", cmd); | ||
971 | requeue_cmd(instance, cmd); | ||
972 | #ifdef SUPPORT_TAGS | ||
973 | cmd_free_tag(cmd); | ||
974 | #endif | ||
975 | } | ||
976 | } | ||
977 | if (hostdata->connected | ||
978 | #ifdef REAL_DMA | ||
979 | && !hostdata->dma_len | ||
980 | #endif | ||
981 | ) { | ||
982 | dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); | ||
983 | NCR5380_information_transfer(instance); | ||
984 | done = 0; | ||
985 | } | ||
986 | spin_unlock_irq(&hostdata->lock); | ||
987 | if (!done) | ||
988 | cond_resched(); | ||
989 | } while (!done); | ||
990 | } | ||
991 | |||
992 | |||
993 | #ifdef REAL_DMA | ||
994 | /* | ||
995 | * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) | ||
996 | * | ||
997 | * Purpose : Called by interrupt handler when DMA finishes or a phase | ||
998 | * mismatch occurs (which would finish the DMA transfer). | ||
999 | * | ||
1000 | * Inputs : instance - this instance of the NCR5380. | ||
1001 | */ | ||
1002 | |||
1003 | static void NCR5380_dma_complete(struct Scsi_Host *instance) | ||
1004 | { | ||
1005 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
1006 | int transferred; | ||
1007 | unsigned char **data; | ||
1008 | int *count; | ||
1009 | int saved_data = 0, overrun = 0; | ||
1010 | unsigned char p; | ||
1011 | |||
1012 | if (hostdata->read_overruns) { | ||
1013 | p = hostdata->connected->SCp.phase; | ||
1014 | if (p & SR_IO) { | ||
1015 | udelay(10); | ||
1016 | if ((NCR5380_read(BUS_AND_STATUS_REG) & | ||
1017 | (BASR_PHASE_MATCH|BASR_ACK)) == | ||
1018 | (BASR_PHASE_MATCH|BASR_ACK)) { | ||
1019 | saved_data = NCR5380_read(INPUT_DATA_REG); | ||
1020 | overrun = 1; | ||
1021 | dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); | ||
1022 | } | ||
1023 | } | ||
1024 | } | ||
1025 | |||
1026 | #if defined(CONFIG_SUN3) | ||
1027 | if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { | ||
1028 | pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", | ||
1029 | instance->host_no); | ||
1030 | BUG(); | ||
1031 | } | ||
1032 | |||
1033 | /* make sure we're not stuck in a data phase */ | ||
1034 | if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == | ||
1035 | (BASR_PHASE_MATCH | BASR_ACK)) { | ||
1036 | pr_err("scsi%d: BASR %02x\n", instance->host_no, | ||
1037 | NCR5380_read(BUS_AND_STATUS_REG)); | ||
1038 | pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", | ||
1039 | instance->host_no); | ||
1040 | BUG(); | ||
1041 | } | ||
1042 | #endif | ||
1043 | |||
1044 | NCR5380_write(MODE_REG, MR_BASE); | ||
1045 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1046 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
1047 | |||
1048 | transferred = hostdata->dma_len - NCR5380_dma_residual(instance); | ||
1049 | hostdata->dma_len = 0; | ||
1050 | |||
1051 | data = (unsigned char **)&hostdata->connected->SCp.ptr; | ||
1052 | count = &hostdata->connected->SCp.this_residual; | ||
1053 | *data += transferred; | ||
1054 | *count -= transferred; | ||
1055 | |||
1056 | if (hostdata->read_overruns) { | ||
1057 | int cnt, toPIO; | ||
1058 | |||
1059 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { | ||
1060 | cnt = toPIO = hostdata->read_overruns; | ||
1061 | if (overrun) { | ||
1062 | dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); | ||
1063 | *(*data)++ = saved_data; | ||
1064 | (*count)--; | ||
1065 | cnt--; | ||
1066 | toPIO--; | ||
1067 | } | ||
1068 | dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); | ||
1069 | NCR5380_transfer_pio(instance, &p, &cnt, data); | ||
1070 | *count -= toPIO - cnt; | ||
1071 | } | ||
1072 | } | ||
1073 | } | ||
1074 | #endif /* REAL_DMA */ | ||
1075 | |||
1076 | |||
1077 | /** | ||
1078 | * NCR5380_intr - generic NCR5380 irq handler | ||
1079 | * @irq: interrupt number | ||
1080 | * @dev_id: device info | ||
1081 | * | ||
1082 | * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses | ||
1083 | * from the disconnected queue, and restarting NCR5380_main() | ||
1084 | * as required. | ||
1085 | * | ||
1086 | * The chip can assert IRQ in any of six different conditions. The IRQ flag | ||
1087 | * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). | ||
1088 | * Three of these six conditions are latched in the Bus and Status Register: | ||
1089 | * - End of DMA (cleared by ending DMA Mode) | ||
1090 | * - Parity error (cleared by reading RPIR) | ||
1091 | * - Loss of BSY (cleared by reading RPIR) | ||
1092 | * Two conditions have flag bits that are not latched: | ||
1093 | * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) | ||
1094 | * - Bus reset (non-maskable) | ||
1095 | * The remaining condition has no flag bit at all: | ||
1096 | * - Selection/reselection | ||
1097 | * | ||
1098 | * Hence, establishing the cause(s) of any interrupt is partly guesswork. | ||
1099 | * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor | ||
1100 | * claimed that "the design of the [DP8490] interrupt logic ensures | ||
1101 | * interrupts will not be lost (they can be on the DP5380)." | ||
1102 | * The L5380/53C80 datasheet from LOGIC Devices has more details. | ||
1103 | * | ||
1104 | * Checking for bus reset by reading RST is futile because of interrupt | ||
1105 | * latency, but a bus reset will reset chip logic. Checking for parity error | ||
1106 | * is unnecessary because that interrupt is never enabled. A Loss of BSY | ||
1107 | * condition will clear DMA Mode. We can tell when this occurs because the | ||
1108 | * the Busy Monitor interrupt is enabled together with DMA Mode. | ||
1109 | */ | ||
1110 | |||
1111 | static irqreturn_t NCR5380_intr(int irq, void *dev_id) | ||
1112 | { | ||
1113 | struct Scsi_Host *instance = dev_id; | ||
1114 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
1115 | int handled = 0; | ||
1116 | unsigned char basr; | ||
1117 | unsigned long flags; | ||
1118 | |||
1119 | spin_lock_irqsave(&hostdata->lock, flags); | ||
1120 | |||
1121 | basr = NCR5380_read(BUS_AND_STATUS_REG); | ||
1122 | if (basr & BASR_IRQ) { | ||
1123 | unsigned char mr = NCR5380_read(MODE_REG); | ||
1124 | unsigned char sr = NCR5380_read(STATUS_REG); | ||
1125 | |||
1126 | dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", | ||
1127 | irq, basr, sr, mr); | ||
1128 | |||
1129 | #if defined(REAL_DMA) | ||
1130 | if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { | ||
1131 | /* Probably End of DMA, Phase Mismatch or Loss of BSY. | ||
1132 | * We ack IRQ after clearing Mode Register. Workarounds | ||
1133 | * for End of DMA errata need to happen in DMA Mode. | ||
1134 | */ | ||
1135 | |||
1136 | dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); | ||
1137 | |||
1138 | if (hostdata->connected) { | ||
1139 | NCR5380_dma_complete(instance); | ||
1140 | queue_work(hostdata->work_q, &hostdata->main_task); | ||
1141 | } else { | ||
1142 | NCR5380_write(MODE_REG, MR_BASE); | ||
1143 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
1144 | } | ||
1145 | } else | ||
1146 | #endif /* REAL_DMA */ | ||
1147 | if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && | ||
1148 | (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { | ||
1149 | /* Probably reselected */ | ||
1150 | NCR5380_write(SELECT_ENABLE_REG, 0); | ||
1151 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
1152 | |||
1153 | dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); | ||
1154 | |||
1155 | if (!hostdata->connected) { | ||
1156 | NCR5380_reselect(instance); | ||
1157 | queue_work(hostdata->work_q, &hostdata->main_task); | ||
1158 | } | ||
1159 | if (!hostdata->connected) | ||
1160 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
1161 | } else { | ||
1162 | /* Probably Bus Reset */ | ||
1163 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
1164 | |||
1165 | dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); | ||
1166 | #ifdef SUN3_SCSI_VME | ||
1167 | dregs->csr |= CSR_DMA_ENABLE; | ||
1168 | #endif | ||
1169 | } | ||
1170 | handled = 1; | ||
1171 | } else { | ||
1172 | shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); | ||
1173 | #ifdef SUN3_SCSI_VME | ||
1174 | dregs->csr |= CSR_DMA_ENABLE; | ||
1175 | #endif | ||
1176 | } | ||
1177 | |||
1178 | spin_unlock_irqrestore(&hostdata->lock, flags); | ||
1179 | |||
1180 | return IRQ_RETVAL(handled); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1184 | * Function : int NCR5380_select(struct Scsi_Host *instance, | ||
1185 | * struct scsi_cmnd *cmd) | ||
1186 | * | ||
1187 | * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, | ||
1188 | * including ARBITRATION, SELECTION, and initial message out for | ||
1189 | * IDENTIFY and queue messages. | ||
1190 | * | ||
1191 | * Inputs : instance - instantiation of the 5380 driver on which this | ||
1192 | * target lives, cmd - SCSI command to execute. | ||
1193 | * | ||
1194 | * Returns cmd if selection failed but should be retried, | ||
1195 | * NULL if selection failed and should not be retried, or | ||
1196 | * NULL if selection succeeded (hostdata->connected == cmd). | ||
1197 | * | ||
1198 | * Side effects : | ||
1199 | * If bus busy, arbitration failed, etc, NCR5380_select() will exit | ||
1200 | * with registers as they should have been on entry - ie | ||
1201 | * SELECT_ENABLE will be set appropriately, the NCR5380 | ||
1202 | * will cease to drive any SCSI bus signals. | ||
1203 | * | ||
1204 | * If successful : I_T_L or I_T_L_Q nexus will be established, | ||
1205 | * instance->connected will be set to cmd. | ||
1206 | * SELECT interrupt will be disabled. | ||
1207 | * | ||
1208 | * If failed (no target) : cmd->scsi_done() will be called, and the | ||
1209 | * cmd->result host byte set to DID_BAD_TARGET. | ||
1210 | */ | ||
1211 | |||
1212 | static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, | ||
1213 | struct scsi_cmnd *cmd) | ||
1214 | { | ||
1215 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
1216 | unsigned char tmp[3], phase; | ||
1217 | unsigned char *data; | ||
1218 | int len; | ||
1219 | int err; | ||
1220 | |||
1221 | NCR5380_dprint(NDEBUG_ARBITRATION, instance); | ||
1222 | dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", | ||
1223 | instance->this_id); | ||
1224 | |||
1225 | /* | ||
1226 | * Arbitration and selection phases are slow and involve dropping the | ||
1227 | * lock, so we have to watch out for EH. An exception handler may | ||
1228 | * change 'selecting' to NULL. This function will then return NULL | ||
1229 | * so that the caller will forget about 'cmd'. (During information | ||
1230 | * transfer phases, EH may change 'connected' to NULL.) | ||
1231 | */ | ||
1232 | hostdata->selecting = cmd; | ||
1233 | |||
1234 | /* | ||
1235 | * Set the phase bits to 0, otherwise the NCR5380 won't drive the | ||
1236 | * data bus during SELECTION. | ||
1237 | */ | ||
1238 | |||
1239 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
1240 | |||
1241 | /* | ||
1242 | * Start arbitration. | ||
1243 | */ | ||
1244 | |||
1245 | NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); | ||
1246 | NCR5380_write(MODE_REG, MR_ARBITRATE); | ||
1247 | |||
1248 | /* The chip now waits for BUS FREE phase. Then after the 800 ns | ||
1249 | * Bus Free Delay, arbitration will begin. | ||
1250 | */ | ||
1251 | |||
1252 | spin_unlock_irq(&hostdata->lock); | ||
1253 | err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, | ||
1254 | INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, | ||
1255 | ICR_ARBITRATION_PROGRESS, HZ); | ||
1256 | spin_lock_irq(&hostdata->lock); | ||
1257 | if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { | ||
1258 | /* Reselection interrupt */ | ||
1259 | goto out; | ||
1260 | } | ||
1261 | if (!hostdata->selecting) { | ||
1262 | /* Command was aborted */ | ||
1263 | NCR5380_write(MODE_REG, MR_BASE); | ||
1264 | goto out; | ||
1265 | } | ||
1266 | if (err < 0) { | ||
1267 | NCR5380_write(MODE_REG, MR_BASE); | ||
1268 | shost_printk(KERN_ERR, instance, | ||
1269 | "select: arbitration timeout\n"); | ||
1270 | goto out; | ||
1271 | } | ||
1272 | spin_unlock_irq(&hostdata->lock); | ||
1273 | |||
1274 | /* The SCSI-2 arbitration delay is 2.4 us */ | ||
1275 | udelay(3); | ||
1276 | |||
1277 | /* Check for lost arbitration */ | ||
1278 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1279 | (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || | ||
1280 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { | ||
1281 | NCR5380_write(MODE_REG, MR_BASE); | ||
1282 | dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); | ||
1283 | spin_lock_irq(&hostdata->lock); | ||
1284 | goto out; | ||
1285 | } | ||
1286 | |||
1287 | /* After/during arbitration, BSY should be asserted. | ||
1288 | * IBM DPES-31080 Version S31Q works now | ||
1289 | * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) | ||
1290 | */ | ||
1291 | NCR5380_write(INITIATOR_COMMAND_REG, | ||
1292 | ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); | ||
1293 | |||
1294 | /* | ||
1295 | * Again, bus clear + bus settle time is 1.2us, however, this is | ||
1296 | * a minimum so we'll udelay ceil(1.2) | ||
1297 | */ | ||
1298 | |||
1299 | if (hostdata->flags & FLAG_TOSHIBA_DELAY) | ||
1300 | udelay(15); | ||
1301 | else | ||
1302 | udelay(2); | ||
1303 | |||
1304 | spin_lock_irq(&hostdata->lock); | ||
1305 | |||
1306 | /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ | ||
1307 | if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) | ||
1308 | goto out; | ||
1309 | |||
1310 | if (!hostdata->selecting) { | ||
1311 | NCR5380_write(MODE_REG, MR_BASE); | ||
1312 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1313 | goto out; | ||
1314 | } | ||
1315 | |||
1316 | dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); | ||
1317 | |||
1318 | /* | ||
1319 | * Now that we have won arbitration, start Selection process, asserting | ||
1320 | * the host and target ID's on the SCSI bus. | ||
1321 | */ | ||
1322 | |||
1323 | NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); | ||
1324 | |||
1325 | /* | ||
1326 | * Raise ATN while SEL is true before BSY goes false from arbitration, | ||
1327 | * since this is the only way to guarantee that we'll get a MESSAGE OUT | ||
1328 | * phase immediately after selection. | ||
1329 | */ | ||
1330 | |||
1331 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | | ||
1332 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); | ||
1333 | NCR5380_write(MODE_REG, MR_BASE); | ||
1334 | |||
1335 | /* | ||
1336 | * Reselect interrupts must be turned off prior to the dropping of BSY, | ||
1337 | * otherwise we will trigger an interrupt. | ||
1338 | */ | ||
1339 | NCR5380_write(SELECT_ENABLE_REG, 0); | ||
1340 | |||
1341 | spin_unlock_irq(&hostdata->lock); | ||
1342 | |||
1343 | /* | ||
1344 | * The initiator shall then wait at least two deskew delays and release | ||
1345 | * the BSY signal. | ||
1346 | */ | ||
1347 | udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ | ||
1348 | |||
1349 | /* Reset BSY */ | ||
1350 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | | ||
1351 | ICR_ASSERT_ATN | ICR_ASSERT_SEL); | ||
1352 | |||
1353 | /* | ||
1354 | * Something weird happens when we cease to drive BSY - looks | ||
1355 | * like the board/chip is letting us do another read before the | ||
1356 | * appropriate propagation delay has expired, and we're confusing | ||
1357 | * a BSY signal from ourselves as the target's response to SELECTION. | ||
1358 | * | ||
1359 | * A small delay (the 'C++' frontend breaks the pipeline with an | ||
1360 | * unnecessary jump, making it work on my 386-33/Trantor T128, the | ||
1361 | * tighter 'C' code breaks and requires this) solves the problem - | ||
1362 | * the 1 us delay is arbitrary, and only used because this delay will | ||
1363 | * be the same on other platforms and since it works here, it should | ||
1364 | * work there. | ||
1365 | * | ||
1366 | * wingel suggests that this could be due to failing to wait | ||
1367 | * one deskew delay. | ||
1368 | */ | ||
1369 | |||
1370 | udelay(1); | ||
1371 | |||
1372 | dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); | ||
1373 | |||
1374 | /* | ||
1375 | * The SCSI specification calls for a 250 ms timeout for the actual | ||
1376 | * selection. | ||
1377 | */ | ||
1378 | |||
1379 | err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, | ||
1380 | msecs_to_jiffies(250)); | ||
1381 | |||
1382 | if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { | ||
1383 | spin_lock_irq(&hostdata->lock); | ||
1384 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1385 | NCR5380_reselect(instance); | ||
1386 | if (!hostdata->connected) | ||
1387 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
1388 | shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); | ||
1389 | goto out; | ||
1390 | } | ||
1391 | |||
1392 | if (err < 0) { | ||
1393 | spin_lock_irq(&hostdata->lock); | ||
1394 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1395 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
1396 | /* Can't touch cmd if it has been reclaimed by the scsi ML */ | ||
1397 | if (hostdata->selecting) { | ||
1398 | cmd->result = DID_BAD_TARGET << 16; | ||
1399 | complete_cmd(instance, cmd); | ||
1400 | dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); | ||
1401 | cmd = NULL; | ||
1402 | } | ||
1403 | goto out; | ||
1404 | } | ||
1405 | |||
1406 | /* | ||
1407 | * No less than two deskew delays after the initiator detects the | ||
1408 | * BSY signal is true, it shall release the SEL signal and may | ||
1409 | * change the DATA BUS. -wingel | ||
1410 | */ | ||
1411 | |||
1412 | udelay(1); | ||
1413 | |||
1414 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1415 | |||
1416 | /* | ||
1417 | * Since we followed the SCSI spec, and raised ATN while SEL | ||
1418 | * was true but before BSY was false during selection, the information | ||
1419 | * transfer phase should be a MESSAGE OUT phase so that we can send the | ||
1420 | * IDENTIFY message. | ||
1421 | * | ||
1422 | * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG | ||
1423 | * message (2 bytes) with a tag ID that we increment with every command | ||
1424 | * until it wraps back to 0. | ||
1425 | * | ||
1426 | * XXX - it turns out that there are some broken SCSI-II devices, | ||
1427 | * which claim to support tagged queuing but fail when more than | ||
1428 | * some number of commands are issued at once. | ||
1429 | */ | ||
1430 | |||
1431 | /* Wait for start of REQ/ACK handshake */ | ||
1432 | |||
1433 | err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); | ||
1434 | spin_lock_irq(&hostdata->lock); | ||
1435 | if (err < 0) { | ||
1436 | shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); | ||
1437 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1438 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
1439 | goto out; | ||
1440 | } | ||
1441 | if (!hostdata->selecting) { | ||
1442 | do_abort(instance); | ||
1443 | goto out; | ||
1444 | } | ||
1445 | |||
1446 | dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", | ||
1447 | scmd_id(cmd)); | ||
1448 | tmp[0] = IDENTIFY(1, cmd->device->lun); | ||
1449 | |||
1450 | #ifdef SUPPORT_TAGS | ||
1451 | if (cmd->tag != TAG_NONE) { | ||
1452 | tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; | ||
1453 | tmp[2] = cmd->tag; | ||
1454 | len = 3; | ||
1455 | } else | ||
1456 | len = 1; | ||
1457 | #else | ||
1458 | len = 1; | ||
1459 | cmd->tag = 0; | ||
1460 | #endif /* SUPPORT_TAGS */ | ||
1461 | |||
1462 | /* Send message(s) */ | ||
1463 | data = tmp; | ||
1464 | phase = PHASE_MSGOUT; | ||
1465 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
1466 | dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); | ||
1467 | /* XXX need to handle errors here */ | ||
1468 | |||
1469 | hostdata->connected = cmd; | ||
1470 | #ifndef SUPPORT_TAGS | ||
1471 | hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; | ||
1472 | #endif | ||
1473 | #ifdef SUN3_SCSI_VME | ||
1474 | dregs->csr |= CSR_INTR; | ||
1475 | #endif | ||
1476 | |||
1477 | initialize_SCp(cmd); | ||
1478 | |||
1479 | cmd = NULL; | ||
1480 | |||
1481 | out: | ||
1482 | if (!hostdata->selecting) | ||
1483 | return NULL; | ||
1484 | hostdata->selecting = NULL; | ||
1485 | return cmd; | ||
1486 | } | ||
1487 | |||
1488 | /* | ||
1489 | * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, | ||
1490 | * unsigned char *phase, int *count, unsigned char **data) | ||
1491 | * | ||
1492 | * Purpose : transfers data in given phase using polled I/O | ||
1493 | * | ||
1494 | * Inputs : instance - instance of driver, *phase - pointer to | ||
1495 | * what phase is expected, *count - pointer to number of | ||
1496 | * bytes to transfer, **data - pointer to data pointer. | ||
1497 | * | ||
1498 | * Returns : -1 when different phase is entered without transferring | ||
1499 | * maximum number of bytes, 0 if all bytes are transferred or exit | ||
1500 | * is in same phase. | ||
1501 | * | ||
1502 | * Also, *phase, *count, *data are modified in place. | ||
1503 | * | ||
1504 | * XXX Note : handling for bus free may be useful. | ||
1505 | */ | ||
1506 | |||
1507 | /* | ||
1508 | * Note : this code is not as quick as it could be, however it | ||
1509 | * IS 100% reliable, and for the actual data transfer where speed | ||
1510 | * counts, we will always do a pseudo DMA or DMA transfer. | ||
1511 | */ | ||
1512 | |||
1513 | static int NCR5380_transfer_pio(struct Scsi_Host *instance, | ||
1514 | unsigned char *phase, int *count, | ||
1515 | unsigned char **data) | ||
1516 | { | ||
1517 | unsigned char p = *phase, tmp; | ||
1518 | int c = *count; | ||
1519 | unsigned char *d = *data; | ||
1520 | |||
1521 | /* | ||
1522 | * The NCR5380 chip will only drive the SCSI bus when the | ||
1523 | * phase specified in the appropriate bits of the TARGET COMMAND | ||
1524 | * REGISTER match the STATUS REGISTER | ||
1525 | */ | ||
1526 | |||
1527 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | ||
1528 | |||
1529 | do { | ||
1530 | /* | ||
1531 | * Wait for assertion of REQ, after which the phase bits will be | ||
1532 | * valid | ||
1533 | */ | ||
1534 | |||
1535 | if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) | ||
1536 | break; | ||
1537 | |||
1538 | dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); | ||
1539 | |||
1540 | /* Check for phase mismatch */ | ||
1541 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { | ||
1542 | dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); | ||
1543 | NCR5380_dprint_phase(NDEBUG_PIO, instance); | ||
1544 | break; | ||
1545 | } | ||
1546 | |||
1547 | /* Do actual transfer from SCSI bus to / from memory */ | ||
1548 | if (!(p & SR_IO)) | ||
1549 | NCR5380_write(OUTPUT_DATA_REG, *d); | ||
1550 | else | ||
1551 | *d = NCR5380_read(CURRENT_SCSI_DATA_REG); | ||
1552 | |||
1553 | ++d; | ||
1554 | |||
1555 | /* | ||
1556 | * The SCSI standard suggests that in MSGOUT phase, the initiator | ||
1557 | * should drop ATN on the last byte of the message phase | ||
1558 | * after REQ has been asserted for the handshake but before | ||
1559 | * the initiator raises ACK. | ||
1560 | */ | ||
1561 | |||
1562 | if (!(p & SR_IO)) { | ||
1563 | if (!((p & SR_MSG) && c > 1)) { | ||
1564 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | ||
1565 | NCR5380_dprint(NDEBUG_PIO, instance); | ||
1566 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1567 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); | ||
1568 | } else { | ||
1569 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1570 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); | ||
1571 | NCR5380_dprint(NDEBUG_PIO, instance); | ||
1572 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1573 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | ||
1574 | } | ||
1575 | } else { | ||
1576 | NCR5380_dprint(NDEBUG_PIO, instance); | ||
1577 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | ||
1578 | } | ||
1579 | |||
1580 | if (NCR5380_poll_politely(instance, | ||
1581 | STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) | ||
1582 | break; | ||
1583 | |||
1584 | dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); | ||
1585 | |||
1586 | /* | ||
1587 | * We have several special cases to consider during REQ/ACK handshaking : | ||
1588 | * 1. We were in MSGOUT phase, and we are on the last byte of the | ||
1589 | * message. ATN must be dropped as ACK is dropped. | ||
1590 | * | ||
1591 | * 2. We are in a MSGIN phase, and we are on the last byte of the | ||
1592 | * message. We must exit with ACK asserted, so that the calling | ||
1593 | * code may raise ATN before dropping ACK to reject the message. | ||
1594 | * | ||
1595 | * 3. ACK and ATN are clear and the target may proceed as normal. | ||
1596 | */ | ||
1597 | if (!(p == PHASE_MSGIN && c == 1)) { | ||
1598 | if (p == PHASE_MSGOUT && c > 1) | ||
1599 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1600 | else | ||
1601 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1602 | } | ||
1603 | } while (--c); | ||
1604 | |||
1605 | dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); | ||
1606 | |||
1607 | *count = c; | ||
1608 | *data = d; | ||
1609 | tmp = NCR5380_read(STATUS_REG); | ||
1610 | /* The phase read from the bus is valid if either REQ is (already) | ||
1611 | * asserted or if ACK hasn't been released yet. The latter applies if | ||
1612 | * we're in MSG IN, DATA IN or STATUS and all bytes have been received. | ||
1613 | */ | ||
1614 | if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) | ||
1615 | *phase = tmp & PHASE_MASK; | ||
1616 | else | ||
1617 | *phase = PHASE_UNKNOWN; | ||
1618 | |||
1619 | if (!c || (*phase == p)) | ||
1620 | return 0; | ||
1621 | else | ||
1622 | return -1; | ||
1623 | } | ||
1624 | |||
1625 | /** | ||
1626 | * do_reset - issue a reset command | ||
1627 | * @instance: adapter to reset | ||
1628 | * | ||
1629 | * Issue a reset sequence to the NCR5380 and try and get the bus | ||
1630 | * back into sane shape. | ||
1631 | * | ||
1632 | * This clears the reset interrupt flag because there may be no handler for | ||
1633 | * it. When the driver is initialized, the NCR5380_intr() handler has not yet | ||
1634 | * been installed. And when in EH we may have released the ST DMA interrupt. | ||
1635 | */ | ||
1636 | |||
1637 | static void do_reset(struct Scsi_Host *instance) | ||
1638 | { | ||
1639 | unsigned long flags; | ||
1640 | |||
1641 | local_irq_save(flags); | ||
1642 | NCR5380_write(TARGET_COMMAND_REG, | ||
1643 | PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); | ||
1644 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); | ||
1645 | udelay(50); | ||
1646 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1647 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | ||
1648 | local_irq_restore(flags); | ||
1649 | } | ||
1650 | |||
1651 | /** | ||
1652 | * do_abort - abort the currently established nexus by going to | ||
1653 | * MESSAGE OUT phase and sending an ABORT message. | ||
1654 | * @instance: relevant scsi host instance | ||
1655 | * | ||
1656 | * Returns 0 on success, -1 on failure. | ||
1657 | */ | ||
1658 | |||
1659 | static int do_abort(struct Scsi_Host *instance) | ||
1660 | { | ||
1661 | unsigned char *msgptr, phase, tmp; | ||
1662 | int len; | ||
1663 | int rc; | ||
1664 | |||
1665 | /* Request message out phase */ | ||
1666 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1667 | |||
1668 | /* | ||
1669 | * Wait for the target to indicate a valid phase by asserting | ||
1670 | * REQ. Once this happens, we'll have either a MSGOUT phase | ||
1671 | * and can immediately send the ABORT message, or we'll have some | ||
1672 | * other phase and will have to source/sink data. | ||
1673 | * | ||
1674 | * We really don't care what value was on the bus or what value | ||
1675 | * the target sees, so we just handshake. | ||
1676 | */ | ||
1677 | |||
1678 | rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); | ||
1679 | if (rc < 0) | ||
1680 | goto timeout; | ||
1681 | |||
1682 | tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; | ||
1683 | |||
1684 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | ||
1685 | |||
1686 | if (tmp != PHASE_MSGOUT) { | ||
1687 | NCR5380_write(INITIATOR_COMMAND_REG, | ||
1688 | ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | ||
1689 | rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); | ||
1690 | if (rc < 0) | ||
1691 | goto timeout; | ||
1692 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1693 | } | ||
1694 | |||
1695 | tmp = ABORT; | ||
1696 | msgptr = &tmp; | ||
1697 | len = 1; | ||
1698 | phase = PHASE_MSGOUT; | ||
1699 | NCR5380_transfer_pio(instance, &phase, &len, &msgptr); | ||
1700 | |||
1701 | /* | ||
1702 | * If we got here, and the command completed successfully, | ||
1703 | * we're about to go into bus free state. | ||
1704 | */ | ||
1705 | |||
1706 | return len ? -1 : 0; | ||
1707 | |||
1708 | timeout: | ||
1709 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1710 | return -1; | ||
1711 | } | ||
1712 | |||
1713 | #if defined(REAL_DMA) | ||
1714 | /* | ||
1715 | * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, | ||
1716 | * unsigned char *phase, int *count, unsigned char **data) | ||
1717 | * | ||
1718 | * Purpose : transfers data in given phase using either real | ||
1719 | * or pseudo DMA. | ||
1720 | * | ||
1721 | * Inputs : instance - instance of driver, *phase - pointer to | ||
1722 | * what phase is expected, *count - pointer to number of | ||
1723 | * bytes to transfer, **data - pointer to data pointer. | ||
1724 | * | ||
1725 | * Returns : -1 when different phase is entered without transferring | ||
1726 | * maximum number of bytes, 0 if all bytes or transferred or exit | ||
1727 | * is in same phase. | ||
1728 | * | ||
1729 | * Also, *phase, *count, *data are modified in place. | ||
1730 | */ | ||
1731 | |||
1732 | |||
1733 | static int NCR5380_transfer_dma(struct Scsi_Host *instance, | ||
1734 | unsigned char *phase, int *count, | ||
1735 | unsigned char **data) | ||
1736 | { | ||
1737 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
1738 | register int c = *count; | ||
1739 | register unsigned char p = *phase; | ||
1740 | |||
1741 | #if defined(CONFIG_SUN3) | ||
1742 | /* sanity check */ | ||
1743 | if (!sun3_dma_setup_done) { | ||
1744 | pr_err("scsi%d: transfer_dma without setup!\n", | ||
1745 | instance->host_no); | ||
1746 | BUG(); | ||
1747 | } | ||
1748 | hostdata->dma_len = c; | ||
1749 | |||
1750 | dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", | ||
1751 | (p & SR_IO) ? "receive" : "send", c, *data); | ||
1752 | |||
1753 | /* netbsd turns off ints here, why not be safe and do it too */ | ||
1754 | |||
1755 | /* send start chain */ | ||
1756 | sun3scsi_dma_start(c, *data); | ||
1757 | |||
1758 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | ||
1759 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | | ||
1760 | MR_ENABLE_EOP_INTR); | ||
1761 | if (p & SR_IO) { | ||
1762 | NCR5380_write(INITIATOR_COMMAND_REG, 0); | ||
1763 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); | ||
1764 | } else { | ||
1765 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); | ||
1766 | NCR5380_write(START_DMA_SEND_REG, 0); | ||
1767 | } | ||
1768 | |||
1769 | #ifdef SUN3_SCSI_VME | ||
1770 | dregs->csr |= CSR_DMA_ENABLE; | ||
1771 | #endif | ||
1772 | |||
1773 | sun3_dma_active = 1; | ||
1774 | |||
1775 | #else /* !defined(CONFIG_SUN3) */ | ||
1776 | register unsigned char *d = *data; | ||
1777 | unsigned char tmp; | ||
1778 | |||
1779 | if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { | ||
1780 | *phase = tmp; | ||
1781 | return -1; | ||
1782 | } | ||
1783 | |||
1784 | if (hostdata->read_overruns && (p & SR_IO)) | ||
1785 | c -= hostdata->read_overruns; | ||
1786 | |||
1787 | dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", | ||
1788 | (p & SR_IO) ? "receive" : "send", c, d); | ||
1789 | |||
1790 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | ||
1791 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | | ||
1792 | MR_ENABLE_EOP_INTR); | ||
1793 | |||
1794 | if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { | ||
1795 | /* On the Medusa, it is a must to initialize the DMA before | ||
1796 | * starting the NCR. This is also the cleaner way for the TT. | ||
1797 | */ | ||
1798 | hostdata->dma_len = (p & SR_IO) ? | ||
1799 | NCR5380_dma_read_setup(instance, d, c) : | ||
1800 | NCR5380_dma_write_setup(instance, d, c); | ||
1801 | } | ||
1802 | |||
1803 | if (p & SR_IO) | ||
1804 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); | ||
1805 | else { | ||
1806 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | ||
1807 | NCR5380_write(START_DMA_SEND_REG, 0); | ||
1808 | } | ||
1809 | |||
1810 | if (hostdata->flags & FLAG_LATE_DMA_SETUP) { | ||
1811 | /* On the Falcon, the DMA setup must be done after the last */ | ||
1812 | /* NCR access, else the DMA setup gets trashed! | ||
1813 | */ | ||
1814 | hostdata->dma_len = (p & SR_IO) ? | ||
1815 | NCR5380_dma_read_setup(instance, d, c) : | ||
1816 | NCR5380_dma_write_setup(instance, d, c); | ||
1817 | } | ||
1818 | #endif /* !defined(CONFIG_SUN3) */ | ||
1819 | |||
1820 | return 0; | ||
1821 | } | ||
1822 | #endif /* defined(REAL_DMA) */ | ||
1823 | |||
1824 | /* | ||
1825 | * Function : NCR5380_information_transfer (struct Scsi_Host *instance) | ||
1826 | * | ||
1827 | * Purpose : run through the various SCSI phases and do as the target | ||
1828 | * directs us to. Operates on the currently connected command, | ||
1829 | * instance->connected. | ||
1830 | * | ||
1831 | * Inputs : instance, instance for which we are doing commands | ||
1832 | * | ||
1833 | * Side effects : SCSI things happen, the disconnected queue will be | ||
1834 | * modified if a command disconnects, *instance->connected will | ||
1835 | * change. | ||
1836 | * | ||
1837 | * XXX Note : we need to watch for bus free or a reset condition here | ||
1838 | * to recover from an unexpected bus free condition. | ||
1839 | */ | ||
1840 | |||
1841 | static void NCR5380_information_transfer(struct Scsi_Host *instance) | ||
1842 | { | ||
1843 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
1844 | unsigned char msgout = NOP; | ||
1845 | int sink = 0; | ||
1846 | int len; | ||
1847 | int transfersize; | ||
1848 | unsigned char *data; | ||
1849 | unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; | ||
1850 | struct scsi_cmnd *cmd; | ||
1851 | |||
1852 | #ifdef SUN3_SCSI_VME | ||
1853 | dregs->csr |= CSR_INTR; | ||
1854 | #endif | ||
1855 | |||
1856 | while ((cmd = hostdata->connected)) { | ||
1857 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); | ||
1858 | |||
1859 | tmp = NCR5380_read(STATUS_REG); | ||
1860 | /* We only have a valid SCSI phase when REQ is asserted */ | ||
1861 | if (tmp & SR_REQ) { | ||
1862 | phase = (tmp & PHASE_MASK); | ||
1863 | if (phase != old_phase) { | ||
1864 | old_phase = phase; | ||
1865 | NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); | ||
1866 | } | ||
1867 | #if defined(CONFIG_SUN3) | ||
1868 | if (phase == PHASE_CMDOUT) { | ||
1869 | #if defined(REAL_DMA) | ||
1870 | void *d; | ||
1871 | unsigned long count; | ||
1872 | |||
1873 | if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { | ||
1874 | count = cmd->SCp.buffer->length; | ||
1875 | d = sg_virt(cmd->SCp.buffer); | ||
1876 | } else { | ||
1877 | count = cmd->SCp.this_residual; | ||
1878 | d = cmd->SCp.ptr; | ||
1879 | } | ||
1880 | /* this command setup for dma yet? */ | ||
1881 | if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { | ||
1882 | if (cmd->request->cmd_type == REQ_TYPE_FS) { | ||
1883 | sun3scsi_dma_setup(instance, d, count, | ||
1884 | rq_data_dir(cmd->request)); | ||
1885 | sun3_dma_setup_done = cmd; | ||
1886 | } | ||
1887 | } | ||
1888 | #endif | ||
1889 | #ifdef SUN3_SCSI_VME | ||
1890 | dregs->csr |= CSR_INTR; | ||
1891 | #endif | ||
1892 | } | ||
1893 | #endif /* CONFIG_SUN3 */ | ||
1894 | |||
1895 | if (sink && (phase != PHASE_MSGOUT)) { | ||
1896 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | ||
1897 | |||
1898 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | | ||
1899 | ICR_ASSERT_ACK); | ||
1900 | while (NCR5380_read(STATUS_REG) & SR_REQ) | ||
1901 | ; | ||
1902 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1903 | ICR_ASSERT_ATN); | ||
1904 | sink = 0; | ||
1905 | continue; | ||
1906 | } | ||
1907 | |||
1908 | switch (phase) { | ||
1909 | case PHASE_DATAOUT: | ||
1910 | #if (NDEBUG & NDEBUG_NO_DATAOUT) | ||
1911 | shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); | ||
1912 | sink = 1; | ||
1913 | do_abort(instance); | ||
1914 | cmd->result = DID_ERROR << 16; | ||
1915 | complete_cmd(instance, cmd); | ||
1916 | hostdata->connected = NULL; | ||
1917 | return; | ||
1918 | #endif | ||
1919 | case PHASE_DATAIN: | ||
1920 | /* | ||
1921 | * If there is no room left in the current buffer in the | ||
1922 | * scatter-gather list, move onto the next one. | ||
1923 | */ | ||
1924 | |||
1925 | if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { | ||
1926 | ++cmd->SCp.buffer; | ||
1927 | --cmd->SCp.buffers_residual; | ||
1928 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | ||
1929 | cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); | ||
1930 | merge_contiguous_buffers(cmd); | ||
1931 | dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n", | ||
1932 | cmd->SCp.this_residual, | ||
1933 | cmd->SCp.buffers_residual); | ||
1934 | } | ||
1935 | |||
1936 | /* | ||
1937 | * The preferred transfer method is going to be | ||
1938 | * PSEUDO-DMA for systems that are strictly PIO, | ||
1939 | * since we can let the hardware do the handshaking. | ||
1940 | * | ||
1941 | * For this to work, we need to know the transfersize | ||
1942 | * ahead of time, since the pseudo-DMA code will sit | ||
1943 | * in an unconditional loop. | ||
1944 | */ | ||
1945 | |||
1946 | /* ++roman: I suggest, this should be | ||
1947 | * #if def(REAL_DMA) | ||
1948 | * instead of leaving REAL_DMA out. | ||
1949 | */ | ||
1950 | |||
1951 | #if defined(REAL_DMA) | ||
1952 | #if !defined(CONFIG_SUN3) | ||
1953 | transfersize = 0; | ||
1954 | if (!cmd->device->borken) | ||
1955 | #endif | ||
1956 | transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); | ||
1957 | |||
1958 | if (transfersize >= DMA_MIN_SIZE) { | ||
1959 | len = transfersize; | ||
1960 | cmd->SCp.phase = phase; | ||
1961 | if (NCR5380_transfer_dma(instance, &phase, | ||
1962 | &len, (unsigned char **)&cmd->SCp.ptr)) { | ||
1963 | /* | ||
1964 | * If the watchdog timer fires, all future | ||
1965 | * accesses to this device will use the | ||
1966 | * polled-IO. | ||
1967 | */ | ||
1968 | scmd_printk(KERN_INFO, cmd, | ||
1969 | "switching to slow handshake\n"); | ||
1970 | cmd->device->borken = 1; | ||
1971 | sink = 1; | ||
1972 | do_abort(instance); | ||
1973 | cmd->result = DID_ERROR << 16; | ||
1974 | /* XXX - need to source or sink data here, as appropriate */ | ||
1975 | } else { | ||
1976 | #ifdef REAL_DMA | ||
1977 | /* ++roman: When using real DMA, | ||
1978 | * information_transfer() should return after | ||
1979 | * starting DMA since it has nothing more to | ||
1980 | * do. | ||
1981 | */ | ||
1982 | return; | ||
1983 | #else | ||
1984 | cmd->SCp.this_residual -= transfersize - len; | ||
1985 | #endif | ||
1986 | } | ||
1987 | } else | ||
1988 | #endif /* defined(REAL_DMA) */ | ||
1989 | { | ||
1990 | /* Break up transfer into 3 ms chunks, | ||
1991 | * presuming 6 accesses per handshake. | ||
1992 | */ | ||
1993 | transfersize = min((unsigned long)cmd->SCp.this_residual, | ||
1994 | hostdata->accesses_per_ms / 2); | ||
1995 | len = transfersize; | ||
1996 | NCR5380_transfer_pio(instance, &phase, &len, | ||
1997 | (unsigned char **)&cmd->SCp.ptr); | ||
1998 | cmd->SCp.this_residual -= transfersize - len; | ||
1999 | } | ||
2000 | #if defined(CONFIG_SUN3) && defined(REAL_DMA) | ||
2001 | /* if we had intended to dma that command clear it */ | ||
2002 | if (sun3_dma_setup_done == cmd) | ||
2003 | sun3_dma_setup_done = NULL; | ||
2004 | #endif | ||
2005 | return; | ||
2006 | case PHASE_MSGIN: | ||
2007 | len = 1; | ||
2008 | data = &tmp; | ||
2009 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2010 | cmd->SCp.Message = tmp; | ||
2011 | |||
2012 | switch (tmp) { | ||
2013 | case ABORT: | ||
2014 | case COMMAND_COMPLETE: | ||
2015 | /* Accept message by clearing ACK */ | ||
2016 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2017 | dsprintk(NDEBUG_QUEUES, instance, | ||
2018 | "COMMAND COMPLETE %p target %d lun %llu\n", | ||
2019 | cmd, scmd_id(cmd), cmd->device->lun); | ||
2020 | |||
2021 | hostdata->connected = NULL; | ||
2022 | #ifdef SUPPORT_TAGS | ||
2023 | cmd_free_tag(cmd); | ||
2024 | if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { | ||
2025 | u8 lun = cmd->device->lun; | ||
2026 | struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; | ||
2027 | |||
2028 | dsprintk(NDEBUG_TAGS, instance, | ||
2029 | "QUEUE_FULL %p target %d lun %d nr_allocated %d\n", | ||
2030 | cmd, scmd_id(cmd), lun, ta->nr_allocated); | ||
2031 | if (ta->queue_size > ta->nr_allocated) | ||
2032 | ta->queue_size = ta->nr_allocated; | ||
2033 | } | ||
2034 | #endif | ||
2035 | |||
2036 | cmd->result &= ~0xffff; | ||
2037 | cmd->result |= cmd->SCp.Status; | ||
2038 | cmd->result |= cmd->SCp.Message << 8; | ||
2039 | |||
2040 | if (cmd->cmnd[0] == REQUEST_SENSE) | ||
2041 | complete_cmd(instance, cmd); | ||
2042 | else { | ||
2043 | if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION || | ||
2044 | cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) { | ||
2045 | dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", | ||
2046 | cmd); | ||
2047 | list_add_tail(&ncmd->list, | ||
2048 | &hostdata->autosense); | ||
2049 | } else | ||
2050 | complete_cmd(instance, cmd); | ||
2051 | } | ||
2052 | |||
2053 | /* | ||
2054 | * Restore phase bits to 0 so an interrupted selection, | ||
2055 | * arbitration can resume. | ||
2056 | */ | ||
2057 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
2058 | |||
2059 | /* Enable reselect interrupts */ | ||
2060 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
2061 | |||
2062 | maybe_release_dma_irq(instance); | ||
2063 | return; | ||
2064 | case MESSAGE_REJECT: | ||
2065 | /* Accept message by clearing ACK */ | ||
2066 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2067 | switch (hostdata->last_message) { | ||
2068 | case HEAD_OF_QUEUE_TAG: | ||
2069 | case ORDERED_QUEUE_TAG: | ||
2070 | case SIMPLE_QUEUE_TAG: | ||
2071 | /* The target obviously doesn't support tagged | ||
2072 | * queuing, even though it announced this ability in | ||
2073 | * its INQUIRY data ?!? (maybe only this LUN?) Ok, | ||
2074 | * clear 'tagged_supported' and lock the LUN, since | ||
2075 | * the command is treated as untagged further on. | ||
2076 | */ | ||
2077 | cmd->device->tagged_supported = 0; | ||
2078 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | ||
2079 | cmd->tag = TAG_NONE; | ||
2080 | dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n", | ||
2081 | scmd_id(cmd), cmd->device->lun); | ||
2082 | break; | ||
2083 | } | ||
2084 | break; | ||
2085 | case DISCONNECT: | ||
2086 | /* Accept message by clearing ACK */ | ||
2087 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2088 | hostdata->connected = NULL; | ||
2089 | list_add(&ncmd->list, &hostdata->disconnected); | ||
2090 | dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, | ||
2091 | instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", | ||
2092 | cmd, scmd_id(cmd), cmd->device->lun); | ||
2093 | |||
2094 | /* | ||
2095 | * Restore phase bits to 0 so an interrupted selection, | ||
2096 | * arbitration can resume. | ||
2097 | */ | ||
2098 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
2099 | |||
2100 | /* Enable reselect interrupts */ | ||
2101 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
2102 | #ifdef SUN3_SCSI_VME | ||
2103 | dregs->csr |= CSR_DMA_ENABLE; | ||
2104 | #endif | ||
2105 | return; | ||
2106 | /* | ||
2107 | * The SCSI data pointer is *IMPLICITLY* saved on a disconnect | ||
2108 | * operation, in violation of the SCSI spec so we can safely | ||
2109 | * ignore SAVE/RESTORE pointers calls. | ||
2110 | * | ||
2111 | * Unfortunately, some disks violate the SCSI spec and | ||
2112 | * don't issue the required SAVE_POINTERS message before | ||
2113 | * disconnecting, and we have to break spec to remain | ||
2114 | * compatible. | ||
2115 | */ | ||
2116 | case SAVE_POINTERS: | ||
2117 | case RESTORE_POINTERS: | ||
2118 | /* Accept message by clearing ACK */ | ||
2119 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2120 | break; | ||
2121 | case EXTENDED_MESSAGE: | ||
2122 | /* | ||
2123 | * Start the message buffer with the EXTENDED_MESSAGE | ||
2124 | * byte, since spi_print_msg() wants the whole thing. | ||
2125 | */ | ||
2126 | extended_msg[0] = EXTENDED_MESSAGE; | ||
2127 | /* Accept first byte by clearing ACK */ | ||
2128 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2129 | |||
2130 | spin_unlock_irq(&hostdata->lock); | ||
2131 | |||
2132 | dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); | ||
2133 | |||
2134 | len = 2; | ||
2135 | data = extended_msg + 1; | ||
2136 | phase = PHASE_MSGIN; | ||
2137 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2138 | dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", | ||
2139 | (int)extended_msg[1], | ||
2140 | (int)extended_msg[2]); | ||
2141 | |||
2142 | if (!len && extended_msg[1] > 0 && | ||
2143 | extended_msg[1] <= sizeof(extended_msg) - 2) { | ||
2144 | /* Accept third byte by clearing ACK */ | ||
2145 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2146 | len = extended_msg[1] - 1; | ||
2147 | data = extended_msg + 3; | ||
2148 | phase = PHASE_MSGIN; | ||
2149 | |||
2150 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2151 | dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", | ||
2152 | len); | ||
2153 | |||
2154 | switch (extended_msg[2]) { | ||
2155 | case EXTENDED_SDTR: | ||
2156 | case EXTENDED_WDTR: | ||
2157 | case EXTENDED_MODIFY_DATA_POINTER: | ||
2158 | case EXTENDED_EXTENDED_IDENTIFY: | ||
2159 | tmp = 0; | ||
2160 | } | ||
2161 | } else if (len) { | ||
2162 | shost_printk(KERN_ERR, instance, "error receiving extended message\n"); | ||
2163 | tmp = 0; | ||
2164 | } else { | ||
2165 | shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", | ||
2166 | extended_msg[2], extended_msg[1]); | ||
2167 | tmp = 0; | ||
2168 | } | ||
2169 | |||
2170 | spin_lock_irq(&hostdata->lock); | ||
2171 | if (!hostdata->connected) | ||
2172 | return; | ||
2173 | |||
2174 | /* Fall through to reject message */ | ||
2175 | |||
2176 | /* | ||
2177 | * If we get something weird that we aren't expecting, | ||
2178 | * reject it. | ||
2179 | */ | ||
2180 | default: | ||
2181 | if (!tmp) { | ||
2182 | shost_printk(KERN_ERR, instance, "rejecting message "); | ||
2183 | spi_print_msg(extended_msg); | ||
2184 | printk("\n"); | ||
2185 | } else if (tmp != EXTENDED_MESSAGE) | ||
2186 | scmd_printk(KERN_INFO, cmd, | ||
2187 | "rejecting unknown message %02x\n", | ||
2188 | tmp); | ||
2189 | else | ||
2190 | scmd_printk(KERN_INFO, cmd, | ||
2191 | "rejecting unknown extended message code %02x, length %d\n", | ||
2192 | extended_msg[1], extended_msg[0]); | ||
2193 | |||
2194 | msgout = MESSAGE_REJECT; | ||
2195 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
2196 | break; | ||
2197 | } /* switch (tmp) */ | ||
2198 | break; | ||
2199 | case PHASE_MSGOUT: | ||
2200 | len = 1; | ||
2201 | data = &msgout; | ||
2202 | hostdata->last_message = msgout; | ||
2203 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2204 | if (msgout == ABORT) { | ||
2205 | hostdata->connected = NULL; | ||
2206 | cmd->result = DID_ERROR << 16; | ||
2207 | complete_cmd(instance, cmd); | ||
2208 | maybe_release_dma_irq(instance); | ||
2209 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
2210 | return; | ||
2211 | } | ||
2212 | msgout = NOP; | ||
2213 | break; | ||
2214 | case PHASE_CMDOUT: | ||
2215 | len = cmd->cmd_len; | ||
2216 | data = cmd->cmnd; | ||
2217 | /* | ||
2218 | * XXX for performance reasons, on machines with a | ||
2219 | * PSEUDO-DMA architecture we should probably | ||
2220 | * use the dma transfer function. | ||
2221 | */ | ||
2222 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2223 | break; | ||
2224 | case PHASE_STATIN: | ||
2225 | len = 1; | ||
2226 | data = &tmp; | ||
2227 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2228 | cmd->SCp.Status = tmp; | ||
2229 | break; | ||
2230 | default: | ||
2231 | shost_printk(KERN_ERR, instance, "unknown phase\n"); | ||
2232 | NCR5380_dprint(NDEBUG_ANY, instance); | ||
2233 | } /* switch(phase) */ | ||
2234 | } else { | ||
2235 | spin_unlock_irq(&hostdata->lock); | ||
2236 | NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); | ||
2237 | spin_lock_irq(&hostdata->lock); | ||
2238 | } | ||
2239 | } | ||
2240 | } | ||
2241 | |||
2242 | /* | ||
2243 | * Function : void NCR5380_reselect (struct Scsi_Host *instance) | ||
2244 | * | ||
2245 | * Purpose : does reselection, initializing the instance->connected | ||
2246 | * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q | ||
2247 | * nexus has been reestablished, | ||
2248 | * | ||
2249 | * Inputs : instance - this instance of the NCR5380. | ||
2250 | */ | ||
2251 | |||
2252 | |||
2253 | /* it might eventually prove necessary to do a dma setup on | ||
2254 | reselection, but it doesn't seem to be needed now -- sam */ | ||
2255 | |||
2256 | static void NCR5380_reselect(struct Scsi_Host *instance) | ||
2257 | { | ||
2258 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
2259 | unsigned char target_mask; | ||
2260 | unsigned char lun; | ||
2261 | #ifdef SUPPORT_TAGS | ||
2262 | unsigned char tag; | ||
2263 | #endif | ||
2264 | unsigned char msg[3]; | ||
2265 | int __maybe_unused len; | ||
2266 | unsigned char __maybe_unused *data, __maybe_unused phase; | ||
2267 | struct NCR5380_cmd *ncmd; | ||
2268 | struct scsi_cmnd *tmp; | ||
2269 | |||
2270 | /* | ||
2271 | * Disable arbitration, etc. since the host adapter obviously | ||
2272 | * lost, and tell an interrupted NCR5380_select() to restart. | ||
2273 | */ | ||
2274 | |||
2275 | NCR5380_write(MODE_REG, MR_BASE); | ||
2276 | |||
2277 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); | ||
2278 | |||
2279 | dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); | ||
2280 | |||
2281 | /* | ||
2282 | * At this point, we have detected that our SCSI ID is on the bus, | ||
2283 | * SEL is true and BSY was false for at least one bus settle delay | ||
2284 | * (400 ns). | ||
2285 | * | ||
2286 | * We must assert BSY ourselves, until the target drops the SEL | ||
2287 | * signal. | ||
2288 | */ | ||
2289 | |||
2290 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); | ||
2291 | if (NCR5380_poll_politely(instance, | ||
2292 | STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { | ||
2293 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2294 | return; | ||
2295 | } | ||
2296 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2297 | |||
2298 | /* | ||
2299 | * Wait for target to go into MSGIN. | ||
2300 | */ | ||
2301 | |||
2302 | if (NCR5380_poll_politely(instance, | ||
2303 | STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { | ||
2304 | do_abort(instance); | ||
2305 | return; | ||
2306 | } | ||
2307 | |||
2308 | #if defined(CONFIG_SUN3) && defined(REAL_DMA) | ||
2309 | /* acknowledge toggle to MSGIN */ | ||
2310 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); | ||
2311 | |||
2312 | /* peek at the byte without really hitting the bus */ | ||
2313 | msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); | ||
2314 | #else | ||
2315 | len = 1; | ||
2316 | data = msg; | ||
2317 | phase = PHASE_MSGIN; | ||
2318 | NCR5380_transfer_pio(instance, &phase, &len, &data); | ||
2319 | |||
2320 | if (len) { | ||
2321 | do_abort(instance); | ||
2322 | return; | ||
2323 | } | ||
2324 | #endif | ||
2325 | |||
2326 | if (!(msg[0] & 0x80)) { | ||
2327 | shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); | ||
2328 | spi_print_msg(msg); | ||
2329 | printk("\n"); | ||
2330 | do_abort(instance); | ||
2331 | return; | ||
2332 | } | ||
2333 | lun = msg[0] & 0x07; | ||
2334 | |||
2335 | #if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) | ||
2336 | /* If the phase is still MSGIN, the target wants to send some more | ||
2337 | * messages. In case it supports tagged queuing, this is probably a | ||
2338 | * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. | ||
2339 | */ | ||
2340 | tag = TAG_NONE; | ||
2341 | if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { | ||
2342 | /* Accept previous IDENTIFY message by clearing ACK */ | ||
2343 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2344 | len = 2; | ||
2345 | data = msg + 1; | ||
2346 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && | ||
2347 | msg[1] == SIMPLE_QUEUE_TAG) | ||
2348 | tag = msg[2]; | ||
2349 | dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n", | ||
2350 | target_mask, lun, tag); | ||
2351 | } | ||
2352 | #endif | ||
2353 | |||
2354 | /* | ||
2355 | * Find the command corresponding to the I_T_L or I_T_L_Q nexus we | ||
2356 | * just reestablished, and remove it from the disconnected queue. | ||
2357 | */ | ||
2358 | |||
2359 | tmp = NULL; | ||
2360 | list_for_each_entry(ncmd, &hostdata->disconnected, list) { | ||
2361 | struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); | ||
2362 | |||
2363 | if (target_mask == (1 << scmd_id(cmd)) && | ||
2364 | lun == (u8)cmd->device->lun | ||
2365 | #ifdef SUPPORT_TAGS | ||
2366 | && (tag == cmd->tag) | ||
2367 | #endif | ||
2368 | ) { | ||
2369 | list_del(&ncmd->list); | ||
2370 | tmp = cmd; | ||
2371 | break; | ||
2372 | } | ||
2373 | } | ||
2374 | |||
2375 | if (tmp) { | ||
2376 | dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, | ||
2377 | "reselect: removed %p from disconnected queue\n", tmp); | ||
2378 | } else { | ||
2379 | |||
2380 | #ifdef SUPPORT_TAGS | ||
2381 | shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n", | ||
2382 | target_mask, lun, tag); | ||
2383 | #else | ||
2384 | shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", | ||
2385 | target_mask, lun); | ||
2386 | #endif | ||
2387 | /* | ||
2388 | * Since we have an established nexus that we can't do anything | ||
2389 | * with, we must abort it. | ||
2390 | */ | ||
2391 | do_abort(instance); | ||
2392 | return; | ||
2393 | } | ||
2394 | |||
2395 | #if defined(CONFIG_SUN3) && defined(REAL_DMA) | ||
2396 | /* engage dma setup for the command we just saw */ | ||
2397 | { | ||
2398 | void *d; | ||
2399 | unsigned long count; | ||
2400 | |||
2401 | if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { | ||
2402 | count = tmp->SCp.buffer->length; | ||
2403 | d = sg_virt(tmp->SCp.buffer); | ||
2404 | } else { | ||
2405 | count = tmp->SCp.this_residual; | ||
2406 | d = tmp->SCp.ptr; | ||
2407 | } | ||
2408 | /* setup this command for dma if not already */ | ||
2409 | if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { | ||
2410 | sun3scsi_dma_setup(instance, d, count, | ||
2411 | rq_data_dir(tmp->request)); | ||
2412 | sun3_dma_setup_done = tmp; | ||
2413 | } | ||
2414 | } | ||
2415 | |||
2416 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | ||
2417 | #endif | ||
2418 | |||
2419 | /* Accept message by clearing ACK */ | ||
2420 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2421 | |||
2422 | #if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) | ||
2423 | /* If the phase is still MSGIN, the target wants to send some more | ||
2424 | * messages. In case it supports tagged queuing, this is probably a | ||
2425 | * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. | ||
2426 | */ | ||
2427 | tag = TAG_NONE; | ||
2428 | if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { | ||
2429 | /* Accept previous IDENTIFY message by clearing ACK */ | ||
2430 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
2431 | len = 2; | ||
2432 | data = msg + 1; | ||
2433 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && | ||
2434 | msg[1] == SIMPLE_QUEUE_TAG) | ||
2435 | tag = msg[2]; | ||
2436 | dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n" | ||
2437 | target_mask, lun, tag); | ||
2438 | } | ||
2439 | #endif | ||
2440 | |||
2441 | hostdata->connected = tmp; | ||
2442 | dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", | ||
2443 | scmd_id(tmp), tmp->device->lun, tmp->tag); | ||
2444 | } | ||
2445 | |||
2446 | |||
2447 | /** | ||
2448 | * list_find_cmd - test for presence of a command in a linked list | ||
2449 | * @haystack: list of commands | ||
2450 | * @needle: command to search for | ||
2451 | */ | ||
2452 | |||
2453 | static bool list_find_cmd(struct list_head *haystack, | ||
2454 | struct scsi_cmnd *needle) | ||
2455 | { | ||
2456 | struct NCR5380_cmd *ncmd; | ||
2457 | |||
2458 | list_for_each_entry(ncmd, haystack, list) | ||
2459 | if (NCR5380_to_scmd(ncmd) == needle) | ||
2460 | return true; | ||
2461 | return false; | ||
2462 | } | ||
2463 | |||
2464 | /** | ||
2465 | * list_remove_cmd - remove a command from linked list | ||
2466 | * @haystack: list of commands | ||
2467 | * @needle: command to remove | ||
2468 | */ | ||
2469 | |||
2470 | static bool list_del_cmd(struct list_head *haystack, | ||
2471 | struct scsi_cmnd *needle) | ||
2472 | { | ||
2473 | if (list_find_cmd(haystack, needle)) { | ||
2474 | struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle); | ||
2475 | |||
2476 | list_del(&ncmd->list); | ||
2477 | return true; | ||
2478 | } | ||
2479 | return false; | ||
2480 | } | ||
2481 | |||
2482 | /** | ||
2483 | * NCR5380_abort - scsi host eh_abort_handler() method | ||
2484 | * @cmd: the command to be aborted | ||
2485 | * | ||
2486 | * Try to abort a given command by removing it from queues and/or sending | ||
2487 | * the target an abort message. This may not succeed in causing a target | ||
2488 | * to abort the command. Nonetheless, the low-level driver must forget about | ||
2489 | * the command because the mid-layer reclaims it and it may be re-issued. | ||
2490 | * | ||
2491 | * The normal path taken by a command is as follows. For EH we trace this | ||
2492 | * same path to locate and abort the command. | ||
2493 | * | ||
2494 | * unissued -> selecting -> [unissued -> selecting ->]... connected -> | ||
2495 | * [disconnected -> connected ->]... | ||
2496 | * [autosense -> connected ->] done | ||
2497 | * | ||
2498 | * If cmd was not found at all then presumably it has already been completed, | ||
2499 | * in which case return SUCCESS to try to avoid further EH measures. | ||
2500 | * | ||
2501 | * If the command has not completed yet, we must not fail to find it. | ||
2502 | * We have no option but to forget the aborted command (even if it still | ||
2503 | * lacks sense data). The mid-layer may re-issue a command that is in error | ||
2504 | * recovery (see scsi_send_eh_cmnd), but the logic and data structures in | ||
2505 | * this driver are such that a command can appear on one queue only. | ||
2506 | * | ||
2507 | * The lock protects driver data structures, but EH handlers also use it | ||
2508 | * to serialize their own execution and prevent their own re-entry. | ||
2509 | */ | ||
2510 | |||
2511 | static int NCR5380_abort(struct scsi_cmnd *cmd) | ||
2512 | { | ||
2513 | struct Scsi_Host *instance = cmd->device->host; | ||
2514 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
2515 | unsigned long flags; | ||
2516 | int result = SUCCESS; | ||
2517 | |||
2518 | spin_lock_irqsave(&hostdata->lock, flags); | ||
2519 | |||
2520 | #if (NDEBUG & NDEBUG_ANY) | ||
2521 | scmd_printk(KERN_INFO, cmd, __func__); | ||
2522 | #endif | ||
2523 | NCR5380_dprint(NDEBUG_ANY, instance); | ||
2524 | NCR5380_dprint_phase(NDEBUG_ANY, instance); | ||
2525 | |||
2526 | if (list_del_cmd(&hostdata->unissued, cmd)) { | ||
2527 | dsprintk(NDEBUG_ABORT, instance, | ||
2528 | "abort: removed %p from issue queue\n", cmd); | ||
2529 | cmd->result = DID_ABORT << 16; | ||
2530 | cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ | ||
2531 | goto out; | ||
2532 | } | ||
2533 | |||
2534 | if (hostdata->selecting == cmd) { | ||
2535 | dsprintk(NDEBUG_ABORT, instance, | ||
2536 | "abort: cmd %p == selecting\n", cmd); | ||
2537 | hostdata->selecting = NULL; | ||
2538 | cmd->result = DID_ABORT << 16; | ||
2539 | complete_cmd(instance, cmd); | ||
2540 | goto out; | ||
2541 | } | ||
2542 | |||
2543 | if (list_del_cmd(&hostdata->disconnected, cmd)) { | ||
2544 | dsprintk(NDEBUG_ABORT, instance, | ||
2545 | "abort: removed %p from disconnected list\n", cmd); | ||
2546 | /* Can't call NCR5380_select() and send ABORT because that | ||
2547 | * means releasing the lock. Need a bus reset. | ||
2548 | */ | ||
2549 | set_host_byte(cmd, DID_ERROR); | ||
2550 | complete_cmd(instance, cmd); | ||
2551 | result = FAILED; | ||
2552 | goto out; | ||
2553 | } | ||
2554 | |||
2555 | if (hostdata->connected == cmd) { | ||
2556 | dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); | ||
2557 | hostdata->connected = NULL; | ||
2558 | #ifdef REAL_DMA | ||
2559 | hostdata->dma_len = 0; | ||
2560 | #endif | ||
2561 | if (do_abort(instance)) { | ||
2562 | set_host_byte(cmd, DID_ERROR); | ||
2563 | complete_cmd(instance, cmd); | ||
2564 | result = FAILED; | ||
2565 | goto out; | ||
2566 | } | ||
2567 | set_host_byte(cmd, DID_ABORT); | ||
2568 | complete_cmd(instance, cmd); | ||
2569 | goto out; | ||
2570 | } | ||
2571 | |||
2572 | if (list_del_cmd(&hostdata->autosense, cmd)) { | ||
2573 | dsprintk(NDEBUG_ABORT, instance, | ||
2574 | "abort: removed %p from sense queue\n", cmd); | ||
2575 | set_host_byte(cmd, DID_ERROR); | ||
2576 | complete_cmd(instance, cmd); | ||
2577 | } | ||
2578 | |||
2579 | out: | ||
2580 | if (result == FAILED) | ||
2581 | dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); | ||
2582 | else | ||
2583 | dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); | ||
2584 | |||
2585 | queue_work(hostdata->work_q, &hostdata->main_task); | ||
2586 | maybe_release_dma_irq(instance); | ||
2587 | spin_unlock_irqrestore(&hostdata->lock, flags); | ||
2588 | |||
2589 | return result; | ||
2590 | } | ||
2591 | |||
2592 | |||
2593 | /** | ||
2594 | * NCR5380_bus_reset - reset the SCSI bus | ||
2595 | * @cmd: SCSI command undergoing EH | ||
2596 | * | ||
2597 | * Returns SUCCESS | ||
2598 | */ | ||
2599 | |||
2600 | static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | ||
2601 | { | ||
2602 | struct Scsi_Host *instance = cmd->device->host; | ||
2603 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
2604 | int i; | ||
2605 | unsigned long flags; | ||
2606 | struct NCR5380_cmd *ncmd; | ||
2607 | |||
2608 | spin_lock_irqsave(&hostdata->lock, flags); | ||
2609 | |||
2610 | #if (NDEBUG & NDEBUG_ANY) | ||
2611 | scmd_printk(KERN_INFO, cmd, __func__); | ||
2612 | #endif | ||
2613 | NCR5380_dprint(NDEBUG_ANY, instance); | ||
2614 | NCR5380_dprint_phase(NDEBUG_ANY, instance); | ||
2615 | |||
2616 | do_reset(instance); | ||
2617 | |||
2618 | /* reset NCR registers */ | ||
2619 | NCR5380_write(MODE_REG, MR_BASE); | ||
2620 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
2621 | NCR5380_write(SELECT_ENABLE_REG, 0); | ||
2622 | |||
2623 | /* After the reset, there are no more connected or disconnected commands | ||
2624 | * and no busy units; so clear the low-level status here to avoid | ||
2625 | * conflicts when the mid-level code tries to wake up the affected | ||
2626 | * commands! | ||
2627 | */ | ||
2628 | |||
2629 | if (list_del_cmd(&hostdata->unissued, cmd)) { | ||
2630 | cmd->result = DID_RESET << 16; | ||
2631 | cmd->scsi_done(cmd); | ||
2632 | } | ||
2633 | |||
2634 | if (hostdata->selecting) { | ||
2635 | hostdata->selecting->result = DID_RESET << 16; | ||
2636 | complete_cmd(instance, hostdata->selecting); | ||
2637 | hostdata->selecting = NULL; | ||
2638 | } | ||
2639 | |||
2640 | list_for_each_entry(ncmd, &hostdata->disconnected, list) { | ||
2641 | struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); | ||
2642 | |||
2643 | set_host_byte(cmd, DID_RESET); | ||
2644 | cmd->scsi_done(cmd); | ||
2645 | } | ||
2646 | INIT_LIST_HEAD(&hostdata->disconnected); | ||
2647 | |||
2648 | list_for_each_entry(ncmd, &hostdata->autosense, list) { | ||
2649 | struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); | ||
2650 | |||
2651 | set_host_byte(cmd, DID_RESET); | ||
2652 | cmd->scsi_done(cmd); | ||
2653 | } | ||
2654 | INIT_LIST_HEAD(&hostdata->autosense); | ||
2655 | |||
2656 | if (hostdata->connected) { | ||
2657 | set_host_byte(hostdata->connected, DID_RESET); | ||
2658 | complete_cmd(instance, hostdata->connected); | ||
2659 | hostdata->connected = NULL; | ||
2660 | } | ||
2661 | |||
2662 | #ifdef SUPPORT_TAGS | ||
2663 | free_all_tags(hostdata); | ||
2664 | #endif | ||
2665 | for (i = 0; i < 8; ++i) | ||
2666 | hostdata->busy[i] = 0; | ||
2667 | #ifdef REAL_DMA | ||
2668 | hostdata->dma_len = 0; | ||
2669 | #endif | ||
2670 | |||
2671 | queue_work(hostdata->work_q, &hostdata->main_task); | ||
2672 | maybe_release_dma_irq(instance); | ||
2673 | spin_unlock_irqrestore(&hostdata->lock, flags); | ||
2674 | |||
2675 | return SUCCESS; | ||
2676 | } | ||
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 78d1b2963f2c..a59ad94ea52b 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
@@ -14,55 +14,23 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | 17 | /* | |
18 | /**************************************************************************/ | 18 | * Notes for Falcon SCSI DMA |
19 | /* */ | 19 | * |
20 | /* Notes for Falcon SCSI: */ | 20 | * The 5380 device is one of several that all share the DMA chip. Hence |
21 | /* ---------------------- */ | 21 | * "locking" and "unlocking" access to this chip is required. |
22 | /* */ | 22 | * |
23 | /* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */ | 23 | * Two possible schemes for ST DMA acquisition by atari_scsi are: |
24 | /* several device drivers, locking and unlocking the access to this */ | 24 | * 1) The lock is taken for each command separately (i.e. can_queue == 1). |
25 | /* chip is required. But locking is not possible from an interrupt, */ | 25 | * 2) The lock is taken when the first command arrives and released |
26 | /* since it puts the process to sleep if the lock is not available. */ | 26 | * when the last command is finished (i.e. can_queue > 1). |
27 | /* This prevents "late" locking of the DMA chip, i.e. locking it just */ | 27 | * |
28 | /* before using it, since in case of disconnection-reconnection */ | 28 | * The first alternative limits SCSI bus utilization, since interleaving |
29 | /* commands, the DMA is started from the reselection interrupt. */ | 29 | * commands is not possible. The second gives better performance but is |
30 | /* */ | 30 | * unfair to other drivers needing to use the ST DMA chip. In order to |
31 | /* Two possible schemes for ST-DMA-locking would be: */ | 31 | * allow the IDE and floppy drivers equal access to the ST DMA chip |
32 | /* 1) The lock is taken for each command separately and disconnecting */ | 32 | * the default is can_queue == 1. |
33 | /* is forbidden (i.e. can_queue = 1). */ | 33 | */ |
34 | /* 2) The DMA chip is locked when the first command comes in and */ | ||
35 | /* released when the last command is finished and all queues are */ | ||
36 | /* empty. */ | ||
37 | /* The first alternative would result in bad performance, since the */ | ||
38 | /* interleaving of commands would not be used. The second is unfair to */ | ||
39 | /* other drivers using the ST-DMA, because the queues will seldom be */ | ||
40 | /* totally empty if there is a lot of disk traffic. */ | ||
41 | /* */ | ||
42 | /* For this reasons I decided to employ a more elaborate scheme: */ | ||
43 | /* - First, we give up the lock every time we can (for fairness), this */ | ||
44 | /* means every time a command finishes and there are no other commands */ | ||
45 | /* on the disconnected queue. */ | ||
46 | /* - If there are others waiting to lock the DMA chip, we stop */ | ||
47 | /* issuing commands, i.e. moving them onto the issue queue. */ | ||
48 | /* Because of that, the disconnected queue will run empty in a */ | ||
49 | /* while. Instead we go to sleep on a 'fairness_queue'. */ | ||
50 | /* - If the lock is released, all processes waiting on the fairness */ | ||
51 | /* queue will be woken. The first of them tries to re-lock the DMA, */ | ||
52 | /* the others wait for the first to finish this task. After that, */ | ||
53 | /* they can all run on and do their commands... */ | ||
54 | /* This sounds complicated (and it is it :-(), but it seems to be a */ | ||
55 | /* good compromise between fairness and performance: As long as no one */ | ||
56 | /* else wants to work with the ST-DMA chip, SCSI can go along as */ | ||
57 | /* usual. If now someone else comes, this behaviour is changed to a */ | ||
58 | /* "fairness mode": just already initiated commands are finished and */ | ||
59 | /* then the lock is released. The other one waiting will probably win */ | ||
60 | /* the race for locking the DMA, since it was waiting for longer. And */ | ||
61 | /* after it has finished, SCSI can go ahead again. Finally: I hope I */ | ||
62 | /* have not produced any deadlock possibilities! */ | ||
63 | /* */ | ||
64 | /**************************************************************************/ | ||
65 | |||
66 | 34 | ||
67 | #include <linux/module.h> | 35 | #include <linux/module.h> |
68 | #include <linux/types.h> | 36 | #include <linux/types.h> |
@@ -83,13 +51,10 @@ | |||
83 | 51 | ||
84 | #include <scsi/scsi_host.h> | 52 | #include <scsi/scsi_host.h> |
85 | 53 | ||
86 | /* Definitions for the core NCR5380 driver. */ | ||
87 | |||
88 | #define REAL_DMA | ||
89 | #define SUPPORT_TAGS | ||
90 | #define MAX_TAGS 32 | ||
91 | #define DMA_MIN_SIZE 32 | 54 | #define DMA_MIN_SIZE 32 |
92 | 55 | ||
56 | /* Definitions for the core NCR5380 driver. */ | ||
57 | |||
93 | #define NCR5380_implementation_fields /* none */ | 58 | #define NCR5380_implementation_fields /* none */ |
94 | 59 | ||
95 | #define NCR5380_read(reg) atari_scsi_reg_read(reg) | 60 | #define NCR5380_read(reg) atari_scsi_reg_read(reg) |
@@ -99,9 +64,9 @@ | |||
99 | #define NCR5380_abort atari_scsi_abort | 64 | #define NCR5380_abort atari_scsi_abort |
100 | #define NCR5380_info atari_scsi_info | 65 | #define NCR5380_info atari_scsi_info |
101 | 66 | ||
102 | #define NCR5380_dma_read_setup(instance, data, count) \ | 67 | #define NCR5380_dma_recv_setup(instance, data, count) \ |
103 | atari_scsi_dma_setup(instance, data, count, 0) | 68 | atari_scsi_dma_setup(instance, data, count, 0) |
104 | #define NCR5380_dma_write_setup(instance, data, count) \ | 69 | #define NCR5380_dma_send_setup(instance, data, count) \ |
105 | atari_scsi_dma_setup(instance, data, count, 1) | 70 | atari_scsi_dma_setup(instance, data, count, 1) |
106 | #define NCR5380_dma_residual(instance) \ | 71 | #define NCR5380_dma_residual(instance) \ |
107 | atari_scsi_dma_residual(instance) | 72 | atari_scsi_dma_residual(instance) |
@@ -159,14 +124,11 @@ static inline unsigned long SCSI_DMA_GETADR(void) | |||
159 | return adr; | 124 | return adr; |
160 | } | 125 | } |
161 | 126 | ||
162 | #ifdef REAL_DMA | ||
163 | static void atari_scsi_fetch_restbytes(void); | 127 | static void atari_scsi_fetch_restbytes(void); |
164 | #endif | ||
165 | 128 | ||
166 | static unsigned char (*atari_scsi_reg_read)(unsigned char reg); | 129 | static unsigned char (*atari_scsi_reg_read)(unsigned char reg); |
167 | static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); | 130 | static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); |
168 | 131 | ||
169 | #ifdef REAL_DMA | ||
170 | static unsigned long atari_dma_residual, atari_dma_startaddr; | 132 | static unsigned long atari_dma_residual, atari_dma_startaddr; |
171 | static short atari_dma_active; | 133 | static short atari_dma_active; |
172 | /* pointer to the dribble buffer */ | 134 | /* pointer to the dribble buffer */ |
@@ -185,7 +147,6 @@ static char *atari_dma_orig_addr; | |||
185 | /* mask for address bits that can't be used with the ST-DMA */ | 147 | /* mask for address bits that can't be used with the ST-DMA */ |
186 | static unsigned long atari_dma_stram_mask; | 148 | static unsigned long atari_dma_stram_mask; |
187 | #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) | 149 | #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) |
188 | #endif | ||
189 | 150 | ||
190 | static int setup_can_queue = -1; | 151 | static int setup_can_queue = -1; |
191 | module_param(setup_can_queue, int, 0); | 152 | module_param(setup_can_queue, int, 0); |
@@ -193,16 +154,12 @@ static int setup_cmd_per_lun = -1; | |||
193 | module_param(setup_cmd_per_lun, int, 0); | 154 | module_param(setup_cmd_per_lun, int, 0); |
194 | static int setup_sg_tablesize = -1; | 155 | static int setup_sg_tablesize = -1; |
195 | module_param(setup_sg_tablesize, int, 0); | 156 | module_param(setup_sg_tablesize, int, 0); |
196 | static int setup_use_tagged_queuing = -1; | ||
197 | module_param(setup_use_tagged_queuing, int, 0); | ||
198 | static int setup_hostid = -1; | 157 | static int setup_hostid = -1; |
199 | module_param(setup_hostid, int, 0); | 158 | module_param(setup_hostid, int, 0); |
200 | static int setup_toshiba_delay = -1; | 159 | static int setup_toshiba_delay = -1; |
201 | module_param(setup_toshiba_delay, int, 0); | 160 | module_param(setup_toshiba_delay, int, 0); |
202 | 161 | ||
203 | 162 | ||
204 | #if defined(REAL_DMA) | ||
205 | |||
206 | static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) | 163 | static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) |
207 | { | 164 | { |
208 | int i; | 165 | int i; |
@@ -255,12 +212,9 @@ static void scsi_dma_buserr(int irq, void *dummy) | |||
255 | } | 212 | } |
256 | #endif | 213 | #endif |
257 | 214 | ||
258 | #endif | ||
259 | |||
260 | 215 | ||
261 | static irqreturn_t scsi_tt_intr(int irq, void *dev) | 216 | static irqreturn_t scsi_tt_intr(int irq, void *dev) |
262 | { | 217 | { |
263 | #ifdef REAL_DMA | ||
264 | struct Scsi_Host *instance = dev; | 218 | struct Scsi_Host *instance = dev; |
265 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 219 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
266 | int dma_stat; | 220 | int dma_stat; |
@@ -342,8 +296,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev) | |||
342 | tt_scsi_dma.dma_ctrl = 0; | 296 | tt_scsi_dma.dma_ctrl = 0; |
343 | } | 297 | } |
344 | 298 | ||
345 | #endif /* REAL_DMA */ | ||
346 | |||
347 | NCR5380_intr(irq, dev); | 299 | NCR5380_intr(irq, dev); |
348 | 300 | ||
349 | return IRQ_HANDLED; | 301 | return IRQ_HANDLED; |
@@ -352,7 +304,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev) | |||
352 | 304 | ||
353 | static irqreturn_t scsi_falcon_intr(int irq, void *dev) | 305 | static irqreturn_t scsi_falcon_intr(int irq, void *dev) |
354 | { | 306 | { |
355 | #ifdef REAL_DMA | ||
356 | struct Scsi_Host *instance = dev; | 307 | struct Scsi_Host *instance = dev; |
357 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 308 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
358 | int dma_stat; | 309 | int dma_stat; |
@@ -405,15 +356,12 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dev) | |||
405 | atari_dma_orig_addr = NULL; | 356 | atari_dma_orig_addr = NULL; |
406 | } | 357 | } |
407 | 358 | ||
408 | #endif /* REAL_DMA */ | ||
409 | |||
410 | NCR5380_intr(irq, dev); | 359 | NCR5380_intr(irq, dev); |
411 | 360 | ||
412 | return IRQ_HANDLED; | 361 | return IRQ_HANDLED; |
413 | } | 362 | } |
414 | 363 | ||
415 | 364 | ||
416 | #ifdef REAL_DMA | ||
417 | static void atari_scsi_fetch_restbytes(void) | 365 | static void atari_scsi_fetch_restbytes(void) |
418 | { | 366 | { |
419 | int nr; | 367 | int nr; |
@@ -436,7 +384,6 @@ static void atari_scsi_fetch_restbytes(void) | |||
436 | *dst++ = *src++; | 384 | *dst++ = *src++; |
437 | } | 385 | } |
438 | } | 386 | } |
439 | #endif /* REAL_DMA */ | ||
440 | 387 | ||
441 | 388 | ||
442 | /* This function releases the lock on the DMA chip if there is no | 389 | /* This function releases the lock on the DMA chip if there is no |
@@ -464,6 +411,10 @@ static int falcon_get_lock(struct Scsi_Host *instance) | |||
464 | if (IS_A_TT()) | 411 | if (IS_A_TT()) |
465 | return 1; | 412 | return 1; |
466 | 413 | ||
414 | if (stdma_is_locked_by(scsi_falcon_intr) && | ||
415 | instance->hostt->can_queue > 1) | ||
416 | return 1; | ||
417 | |||
467 | if (in_interrupt()) | 418 | if (in_interrupt()) |
468 | return stdma_try_lock(scsi_falcon_intr, instance); | 419 | return stdma_try_lock(scsi_falcon_intr, instance); |
469 | 420 | ||
@@ -495,8 +446,7 @@ static int __init atari_scsi_setup(char *str) | |||
495 | setup_sg_tablesize = ints[3]; | 446 | setup_sg_tablesize = ints[3]; |
496 | if (ints[0] >= 4) | 447 | if (ints[0] >= 4) |
497 | setup_hostid = ints[4]; | 448 | setup_hostid = ints[4]; |
498 | if (ints[0] >= 5) | 449 | /* ints[5] (use_tagged_queuing) is ignored */ |
499 | setup_use_tagged_queuing = ints[5]; | ||
500 | /* ints[6] (use_pdma) is ignored */ | 450 | /* ints[6] (use_pdma) is ignored */ |
501 | if (ints[0] >= 7) | 451 | if (ints[0] >= 7) |
502 | setup_toshiba_delay = ints[7]; | 452 | setup_toshiba_delay = ints[7]; |
@@ -508,8 +458,6 @@ __setup("atascsi=", atari_scsi_setup); | |||
508 | #endif /* !MODULE */ | 458 | #endif /* !MODULE */ |
509 | 459 | ||
510 | 460 | ||
511 | #if defined(REAL_DMA) | ||
512 | |||
513 | static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, | 461 | static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, |
514 | void *data, unsigned long count, | 462 | void *data, unsigned long count, |
515 | int dir) | 463 | int dir) |
@@ -545,9 +493,6 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, | |||
545 | */ | 493 | */ |
546 | dma_cache_maintenance(addr, count, dir); | 494 | dma_cache_maintenance(addr, count, dir); |
547 | 495 | ||
548 | if (count == 0) | ||
549 | printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); | ||
550 | |||
551 | if (IS_A_TT()) { | 496 | if (IS_A_TT()) { |
552 | tt_scsi_dma.dma_ctrl = dir; | 497 | tt_scsi_dma.dma_ctrl = dir; |
553 | SCSI_DMA_WRITE_P(dma_addr, addr); | 498 | SCSI_DMA_WRITE_P(dma_addr, addr); |
@@ -624,6 +569,9 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, | |||
624 | { | 569 | { |
625 | unsigned long possible_len, limit; | 570 | unsigned long possible_len, limit; |
626 | 571 | ||
572 | if (wanted_len < DMA_MIN_SIZE) | ||
573 | return 0; | ||
574 | |||
627 | if (IS_A_TT()) | 575 | if (IS_A_TT()) |
628 | /* TT SCSI DMA can transfer arbitrary #bytes */ | 576 | /* TT SCSI DMA can transfer arbitrary #bytes */ |
629 | return wanted_len; | 577 | return wanted_len; |
@@ -703,9 +651,6 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, | |||
703 | } | 651 | } |
704 | 652 | ||
705 | 653 | ||
706 | #endif /* REAL_DMA */ | ||
707 | |||
708 | |||
709 | /* NCR5380 register access functions | 654 | /* NCR5380 register access functions |
710 | * | 655 | * |
711 | * There are separate functions for TT and Falcon, because the access | 656 | * There are separate functions for TT and Falcon, because the access |
@@ -736,7 +681,7 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) | |||
736 | } | 681 | } |
737 | 682 | ||
738 | 683 | ||
739 | #include "atari_NCR5380.c" | 684 | #include "NCR5380.c" |
740 | 685 | ||
741 | static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) | 686 | static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) |
742 | { | 687 | { |
@@ -745,7 +690,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) | |||
745 | 690 | ||
746 | local_irq_save(flags); | 691 | local_irq_save(flags); |
747 | 692 | ||
748 | #ifdef REAL_DMA | ||
749 | /* Abort a maybe active DMA transfer */ | 693 | /* Abort a maybe active DMA transfer */ |
750 | if (IS_A_TT()) { | 694 | if (IS_A_TT()) { |
751 | tt_scsi_dma.dma_ctrl = 0; | 695 | tt_scsi_dma.dma_ctrl = 0; |
@@ -754,7 +698,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) | |||
754 | atari_dma_active = 0; | 698 | atari_dma_active = 0; |
755 | atari_dma_orig_addr = NULL; | 699 | atari_dma_orig_addr = NULL; |
756 | } | 700 | } |
757 | #endif | ||
758 | 701 | ||
759 | rv = NCR5380_bus_reset(cmd); | 702 | rv = NCR5380_bus_reset(cmd); |
760 | 703 | ||
@@ -781,6 +724,7 @@ static struct scsi_host_template atari_scsi_template = { | |||
781 | .eh_abort_handler = atari_scsi_abort, | 724 | .eh_abort_handler = atari_scsi_abort, |
782 | .eh_bus_reset_handler = atari_scsi_bus_reset, | 725 | .eh_bus_reset_handler = atari_scsi_bus_reset, |
783 | .this_id = 7, | 726 | .this_id = 7, |
727 | .cmd_per_lun = 2, | ||
784 | .use_clustering = DISABLE_CLUSTERING, | 728 | .use_clustering = DISABLE_CLUSTERING, |
785 | .cmd_size = NCR5380_CMD_SIZE, | 729 | .cmd_size = NCR5380_CMD_SIZE, |
786 | }; | 730 | }; |
@@ -804,24 +748,11 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
804 | atari_scsi_reg_write = atari_scsi_falcon_reg_write; | 748 | atari_scsi_reg_write = atari_scsi_falcon_reg_write; |
805 | } | 749 | } |
806 | 750 | ||
807 | /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. | ||
808 | * Higher values should work, too; try it! | ||
809 | * (But cmd_per_lun costs memory!) | ||
810 | * | ||
811 | * But there seems to be a bug somewhere that requires CAN_QUEUE to be | ||
812 | * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since | ||
813 | * changed CMD_PER_LUN... | ||
814 | * | ||
815 | * Note: The Falcon currently uses 8/1 setting due to unsolved problems | ||
816 | * with cmd_per_lun != 1 | ||
817 | */ | ||
818 | if (ATARIHW_PRESENT(TT_SCSI)) { | 751 | if (ATARIHW_PRESENT(TT_SCSI)) { |
819 | atari_scsi_template.can_queue = 16; | 752 | atari_scsi_template.can_queue = 16; |
820 | atari_scsi_template.cmd_per_lun = 8; | ||
821 | atari_scsi_template.sg_tablesize = SG_ALL; | 753 | atari_scsi_template.sg_tablesize = SG_ALL; |
822 | } else { | 754 | } else { |
823 | atari_scsi_template.can_queue = 8; | 755 | atari_scsi_template.can_queue = 1; |
824 | atari_scsi_template.cmd_per_lun = 1; | ||
825 | atari_scsi_template.sg_tablesize = SG_NONE; | 756 | atari_scsi_template.sg_tablesize = SG_NONE; |
826 | } | 757 | } |
827 | 758 | ||
@@ -850,8 +781,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
850 | } | 781 | } |
851 | } | 782 | } |
852 | 783 | ||
853 | |||
854 | #ifdef REAL_DMA | ||
855 | /* If running on a Falcon and if there's TT-Ram (i.e., more than one | 784 | /* If running on a Falcon and if there's TT-Ram (i.e., more than one |
856 | * memory block, since there's always ST-Ram in a Falcon), then | 785 | * memory block, since there's always ST-Ram in a Falcon), then |
857 | * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers | 786 | * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers |
@@ -867,7 +796,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
867 | atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); | 796 | atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); |
868 | atari_dma_orig_addr = 0; | 797 | atari_dma_orig_addr = 0; |
869 | } | 798 | } |
870 | #endif | ||
871 | 799 | ||
872 | instance = scsi_host_alloc(&atari_scsi_template, | 800 | instance = scsi_host_alloc(&atari_scsi_template, |
873 | sizeof(struct NCR5380_hostdata)); | 801 | sizeof(struct NCR5380_hostdata)); |
@@ -879,9 +807,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
879 | instance->irq = irq->start; | 807 | instance->irq = irq->start; |
880 | 808 | ||
881 | host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; | 809 | host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; |
882 | #ifdef SUPPORT_TAGS | ||
883 | host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; | ||
884 | #endif | ||
885 | host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; | 810 | host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; |
886 | 811 | ||
887 | error = NCR5380_init(instance, host_flags); | 812 | error = NCR5380_init(instance, host_flags); |
@@ -897,7 +822,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
897 | goto fail_irq; | 822 | goto fail_irq; |
898 | } | 823 | } |
899 | tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ | 824 | tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ |
900 | #ifdef REAL_DMA | 825 | |
901 | tt_scsi_dma.dma_ctrl = 0; | 826 | tt_scsi_dma.dma_ctrl = 0; |
902 | atari_dma_residual = 0; | 827 | atari_dma_residual = 0; |
903 | 828 | ||
@@ -919,17 +844,14 @@ static int __init atari_scsi_probe(struct platform_device *pdev) | |||
919 | 844 | ||
920 | hostdata->read_overruns = 4; | 845 | hostdata->read_overruns = 4; |
921 | } | 846 | } |
922 | #endif | ||
923 | } else { | 847 | } else { |
924 | /* Nothing to do for the interrupt: the ST-DMA is initialized | 848 | /* Nothing to do for the interrupt: the ST-DMA is initialized |
925 | * already. | 849 | * already. |
926 | */ | 850 | */ |
927 | #ifdef REAL_DMA | ||
928 | atari_dma_residual = 0; | 851 | atari_dma_residual = 0; |
929 | atari_dma_active = 0; | 852 | atari_dma_active = 0; |
930 | atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 | 853 | atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 |
931 | : 0xff000000); | 854 | : 0xff000000); |
932 | #endif | ||
933 | } | 855 | } |
934 | 856 | ||
935 | NCR5380_maybe_reset_bus(instance); | 857 | NCR5380_maybe_reset_bus(instance); |
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 06dc215ea050..0f797a55d504 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h | |||
@@ -874,8 +874,8 @@ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, | |||
874 | /* | 874 | /* |
875 | * itnim callbacks | 875 | * itnim callbacks |
876 | */ | 876 | */ |
877 | void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, | 877 | int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, |
878 | struct bfad_itnim_s **itnim_drv); | 878 | struct bfad_itnim_s **itnim_drv); |
879 | void bfa_fcb_itnim_free(struct bfad_s *bfad, | 879 | void bfa_fcb_itnim_free(struct bfad_s *bfad, |
880 | struct bfad_itnim_s *itnim_drv); | 880 | struct bfad_itnim_s *itnim_drv); |
881 | void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); | 881 | void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); |
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 4f089d76afb1..2e3b19e7e079 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c | |||
@@ -588,12 +588,13 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) | |||
588 | struct bfa_fcs_lport_s *port = rport->port; | 588 | struct bfa_fcs_lport_s *port = rport->port; |
589 | struct bfa_fcs_itnim_s *itnim; | 589 | struct bfa_fcs_itnim_s *itnim; |
590 | struct bfad_itnim_s *itnim_drv; | 590 | struct bfad_itnim_s *itnim_drv; |
591 | int ret; | ||
591 | 592 | ||
592 | /* | 593 | /* |
593 | * call bfad to allocate the itnim | 594 | * call bfad to allocate the itnim |
594 | */ | 595 | */ |
595 | bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); | 596 | ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); |
596 | if (itnim == NULL) { | 597 | if (ret) { |
597 | bfa_trc(port->fcs, rport->pwwn); | 598 | bfa_trc(port->fcs, rport->pwwn); |
598 | return NULL; | 599 | return NULL; |
599 | } | 600 | } |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 6c805e13f8dd..02d806012fa1 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
@@ -440,13 +440,13 @@ bfad_im_slave_destroy(struct scsi_device *sdev) | |||
440 | * BFA FCS itnim alloc callback, after successful PRLI | 440 | * BFA FCS itnim alloc callback, after successful PRLI |
441 | * Context: Interrupt | 441 | * Context: Interrupt |
442 | */ | 442 | */ |
443 | void | 443 | int |
444 | bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, | 444 | bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, |
445 | struct bfad_itnim_s **itnim_drv) | 445 | struct bfad_itnim_s **itnim_drv) |
446 | { | 446 | { |
447 | *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); | 447 | *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); |
448 | if (*itnim_drv == NULL) | 448 | if (*itnim_drv == NULL) |
449 | return; | 449 | return -ENOMEM; |
450 | 450 | ||
451 | (*itnim_drv)->im = bfad->im; | 451 | (*itnim_drv)->im = bfad->im; |
452 | *itnim = &(*itnim_drv)->fcs_itnim; | 452 | *itnim = &(*itnim_drv)->fcs_itnim; |
@@ -457,6 +457,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, | |||
457 | */ | 457 | */ |
458 | INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); | 458 | INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); |
459 | bfad->bfad_flags |= BFAD_RPORT_ONLINE; | 459 | bfad->bfad_flags |= BFAD_RPORT_ONLINE; |
460 | return 0; | ||
460 | } | 461 | } |
461 | 462 | ||
462 | /* | 463 | /* |
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 499e369eabf0..fdd4eb4e41b2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
@@ -65,7 +65,7 @@ | |||
65 | #include "bnx2fc_constants.h" | 65 | #include "bnx2fc_constants.h" |
66 | 66 | ||
67 | #define BNX2FC_NAME "bnx2fc" | 67 | #define BNX2FC_NAME "bnx2fc" |
68 | #define BNX2FC_VERSION "2.9.6" | 68 | #define BNX2FC_VERSION "2.10.3" |
69 | 69 | ||
70 | #define PFX "bnx2fc: " | 70 | #define PFX "bnx2fc: " |
71 | 71 | ||
@@ -261,6 +261,7 @@ struct bnx2fc_interface { | |||
261 | u8 vlan_enabled; | 261 | u8 vlan_enabled; |
262 | int vlan_id; | 262 | int vlan_id; |
263 | bool enabled; | 263 | bool enabled; |
264 | u8 tm_timeout; | ||
264 | }; | 265 | }; |
265 | 266 | ||
266 | #define bnx2fc_from_ctlr(x) \ | 267 | #define bnx2fc_from_ctlr(x) \ |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index d7029ea5d319..a1881993982c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -107,6 +107,26 @@ MODULE_PARM_DESC(debug_logging, | |||
107 | "\t\t0x10 - fcoe L2 fame related logs.\n" | 107 | "\t\t0x10 - fcoe L2 fame related logs.\n" |
108 | "\t\t0xff - LOG all messages."); | 108 | "\t\t0xff - LOG all messages."); |
109 | 109 | ||
110 | uint bnx2fc_devloss_tmo; | ||
111 | module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); | ||
112 | MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " | ||
113 | "attached via bnx2fc."); | ||
114 | |||
115 | uint bnx2fc_max_luns = BNX2FC_MAX_LUN; | ||
116 | module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); | ||
117 | MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " | ||
118 | "0xffff."); | ||
119 | |||
120 | uint bnx2fc_queue_depth; | ||
121 | module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); | ||
122 | MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " | ||
123 | "attached via bnx2fc."); | ||
124 | |||
125 | uint bnx2fc_log_fka; | ||
126 | module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); | ||
127 | MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " | ||
128 | "initiating a FIP keep alive when debug logging is enabled."); | ||
129 | |||
110 | static int bnx2fc_cpu_callback(struct notifier_block *nfb, | 130 | static int bnx2fc_cpu_callback(struct notifier_block *nfb, |
111 | unsigned long action, void *hcpu); | 131 | unsigned long action, void *hcpu); |
112 | /* notification function for CPU hotplug events */ | 132 | /* notification function for CPU hotplug events */ |
@@ -692,7 +712,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
692 | int rc = 0; | 712 | int rc = 0; |
693 | 713 | ||
694 | shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; | 714 | shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; |
695 | shost->max_lun = BNX2FC_MAX_LUN; | 715 | shost->max_lun = bnx2fc_max_luns; |
696 | shost->max_id = BNX2FC_MAX_FCP_TGT; | 716 | shost->max_id = BNX2FC_MAX_FCP_TGT; |
697 | shost->max_channel = 0; | 717 | shost->max_channel = 0; |
698 | if (lport->vport) | 718 | if (lport->vport) |
@@ -1061,6 +1081,20 @@ static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) | |||
1061 | */ | 1081 | */ |
1062 | static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | 1082 | static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) |
1063 | { | 1083 | { |
1084 | struct fip_header *fiph; | ||
1085 | struct ethhdr *eth_hdr; | ||
1086 | u16 op; | ||
1087 | u8 sub; | ||
1088 | |||
1089 | fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); | ||
1090 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); | ||
1091 | op = ntohs(fiph->fip_op); | ||
1092 | sub = fiph->fip_subcode; | ||
1093 | |||
1094 | if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) | ||
1095 | BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", | ||
1096 | eth_hdr->h_source, eth_hdr->h_dest); | ||
1097 | |||
1064 | skb->dev = bnx2fc_from_ctlr(fip)->netdev; | 1098 | skb->dev = bnx2fc_from_ctlr(fip)->netdev; |
1065 | dev_queue_xmit(skb); | 1099 | dev_queue_xmit(skb); |
1066 | } | 1100 | } |
@@ -1102,6 +1136,9 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) | |||
1102 | return -EIO; | 1136 | return -EIO; |
1103 | } | 1137 | } |
1104 | 1138 | ||
1139 | if (bnx2fc_devloss_tmo) | ||
1140 | fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; | ||
1141 | |||
1105 | if (disabled) { | 1142 | if (disabled) { |
1106 | fc_vport_set_state(vport, FC_VPORT_DISABLED); | 1143 | fc_vport_set_state(vport, FC_VPORT_DISABLED); |
1107 | } else { | 1144 | } else { |
@@ -1495,6 +1532,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, | |||
1495 | } | 1532 | } |
1496 | fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; | 1533 | fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; |
1497 | 1534 | ||
1535 | if (bnx2fc_devloss_tmo) | ||
1536 | fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; | ||
1537 | |||
1498 | /* Allocate exchange manager */ | 1538 | /* Allocate exchange manager */ |
1499 | if (!npiv) | 1539 | if (!npiv) |
1500 | rc = bnx2fc_em_config(lport, hba); | 1540 | rc = bnx2fc_em_config(lport, hba); |
@@ -1999,6 +2039,8 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) | |||
1999 | return; | 2039 | return; |
2000 | } | 2040 | } |
2001 | 2041 | ||
2042 | pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); | ||
2043 | |||
2002 | /* Add HBA to the adapter list */ | 2044 | /* Add HBA to the adapter list */ |
2003 | mutex_lock(&bnx2fc_dev_lock); | 2045 | mutex_lock(&bnx2fc_dev_lock); |
2004 | list_add_tail(&hba->list, &adapter_list); | 2046 | list_add_tail(&hba->list, &adapter_list); |
@@ -2293,6 +2335,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2293 | ctlr = bnx2fc_to_ctlr(interface); | 2335 | ctlr = bnx2fc_to_ctlr(interface); |
2294 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | 2336 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); |
2295 | interface->vlan_id = vlan_id; | 2337 | interface->vlan_id = vlan_id; |
2338 | interface->tm_timeout = BNX2FC_TM_TIMEOUT; | ||
2296 | 2339 | ||
2297 | interface->timer_work_queue = | 2340 | interface->timer_work_queue = |
2298 | create_singlethread_workqueue("bnx2fc_timer_wq"); | 2341 | create_singlethread_workqueue("bnx2fc_timer_wq"); |
@@ -2612,6 +2655,15 @@ static int bnx2fc_cpu_callback(struct notifier_block *nfb, | |||
2612 | return NOTIFY_OK; | 2655 | return NOTIFY_OK; |
2613 | } | 2656 | } |
2614 | 2657 | ||
2658 | static int bnx2fc_slave_configure(struct scsi_device *sdev) | ||
2659 | { | ||
2660 | if (!bnx2fc_queue_depth) | ||
2661 | return 0; | ||
2662 | |||
2663 | scsi_change_queue_depth(sdev, bnx2fc_queue_depth); | ||
2664 | return 0; | ||
2665 | } | ||
2666 | |||
2615 | /** | 2667 | /** |
2616 | * bnx2fc_mod_init - module init entry point | 2668 | * bnx2fc_mod_init - module init entry point |
2617 | * | 2669 | * |
@@ -2858,6 +2910,50 @@ static struct fc_function_template bnx2fc_vport_xport_function = { | |||
2858 | .bsg_request = fc_lport_bsg_request, | 2910 | .bsg_request = fc_lport_bsg_request, |
2859 | }; | 2911 | }; |
2860 | 2912 | ||
2913 | /* | ||
2914 | * Additional scsi_host attributes. | ||
2915 | */ | ||
2916 | static ssize_t | ||
2917 | bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, | ||
2918 | char *buf) | ||
2919 | { | ||
2920 | struct Scsi_Host *shost = class_to_shost(dev); | ||
2921 | struct fc_lport *lport = shost_priv(shost); | ||
2922 | struct fcoe_port *port = lport_priv(lport); | ||
2923 | struct bnx2fc_interface *interface = port->priv; | ||
2924 | |||
2925 | sprintf(buf, "%u\n", interface->tm_timeout); | ||
2926 | return strlen(buf); | ||
2927 | } | ||
2928 | |||
2929 | static ssize_t | ||
2930 | bnx2fc_tm_timeout_store(struct device *dev, | ||
2931 | struct device_attribute *attr, const char *buf, size_t count) | ||
2932 | { | ||
2933 | struct Scsi_Host *shost = class_to_shost(dev); | ||
2934 | struct fc_lport *lport = shost_priv(shost); | ||
2935 | struct fcoe_port *port = lport_priv(lport); | ||
2936 | struct bnx2fc_interface *interface = port->priv; | ||
2937 | int rval, val; | ||
2938 | |||
2939 | rval = kstrtouint(buf, 10, &val); | ||
2940 | if (rval) | ||
2941 | return rval; | ||
2942 | if (val > 255) | ||
2943 | return -ERANGE; | ||
2944 | |||
2945 | interface->tm_timeout = (u8)val; | ||
2946 | return strlen(buf); | ||
2947 | } | ||
2948 | |||
2949 | static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, | ||
2950 | bnx2fc_tm_timeout_store); | ||
2951 | |||
2952 | static struct device_attribute *bnx2fc_host_attrs[] = { | ||
2953 | &dev_attr_tm_timeout, | ||
2954 | NULL, | ||
2955 | }; | ||
2956 | |||
2861 | /** | 2957 | /** |
2862 | * scsi_host_template structure used while registering with SCSI-ml | 2958 | * scsi_host_template structure used while registering with SCSI-ml |
2863 | */ | 2959 | */ |
@@ -2877,6 +2973,8 @@ static struct scsi_host_template bnx2fc_shost_template = { | |||
2877 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, | 2973 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, |
2878 | .max_sectors = 1024, | 2974 | .max_sectors = 1024, |
2879 | .track_queue_depth = 1, | 2975 | .track_queue_depth = 1, |
2976 | .slave_configure = bnx2fc_slave_configure, | ||
2977 | .shost_attrs = bnx2fc_host_attrs, | ||
2880 | }; | 2978 | }; |
2881 | 2979 | ||
2882 | static struct libfc_function_template bnx2fc_libfc_fcn_templ = { | 2980 | static struct libfc_function_template bnx2fc_libfc_fcn_templ = { |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 2230dab67ca5..026f394a3851 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -179,12 +179,24 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) | |||
179 | 179 | ||
180 | bnx2fc_unmap_sg_list(io_req); | 180 | bnx2fc_unmap_sg_list(io_req); |
181 | io_req->sc_cmd = NULL; | 181 | io_req->sc_cmd = NULL; |
182 | |||
183 | /* Sanity checks before returning command to mid-layer */ | ||
182 | if (!sc_cmd) { | 184 | if (!sc_cmd) { |
183 | printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " | 185 | printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " |
184 | "IO(0x%x) already cleaned up\n", | 186 | "IO(0x%x) already cleaned up\n", |
185 | io_req->xid); | 187 | io_req->xid); |
186 | return; | 188 | return; |
187 | } | 189 | } |
190 | if (!sc_cmd->device) { | ||
191 | pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); | ||
192 | return; | ||
193 | } | ||
194 | if (!sc_cmd->device->host) { | ||
195 | pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", | ||
196 | io_req->xid); | ||
197 | return; | ||
198 | } | ||
199 | |||
188 | sc_cmd->result = err_code << 16; | 200 | sc_cmd->result = err_code << 16; |
189 | 201 | ||
190 | BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", | 202 | BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", |
@@ -770,7 +782,7 @@ retry_tmf: | |||
770 | spin_unlock_bh(&tgt->tgt_lock); | 782 | spin_unlock_bh(&tgt->tgt_lock); |
771 | 783 | ||
772 | rc = wait_for_completion_timeout(&io_req->tm_done, | 784 | rc = wait_for_completion_timeout(&io_req->tm_done, |
773 | BNX2FC_TM_TIMEOUT * HZ); | 785 | interface->tm_timeout * HZ); |
774 | spin_lock_bh(&tgt->tgt_lock); | 786 | spin_lock_bh(&tgt->tgt_lock); |
775 | 787 | ||
776 | io_req->wait_for_comp = 0; | 788 | io_req->wait_for_comp = 0; |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 72894378ffcf..133901fd3e35 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -675,7 +675,7 @@ bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) | |||
675 | { | 675 | { |
676 | struct list_head *list; | 676 | struct list_head *list; |
677 | struct list_head *tmp; | 677 | struct list_head *tmp; |
678 | struct bnx2i_endpoint *ep; | 678 | struct bnx2i_endpoint *ep = NULL; |
679 | 679 | ||
680 | read_lock_bh(&hba->ep_rdwr_lock); | 680 | read_lock_bh(&hba->ep_rdwr_lock); |
681 | list_for_each_safe(list, tmp, &hba->ep_ofld_list) { | 681 | list_for_each_safe(list, tmp, &hba->ep_ofld_list) { |
@@ -703,7 +703,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) | |||
703 | { | 703 | { |
704 | struct list_head *list; | 704 | struct list_head *list; |
705 | struct list_head *tmp; | 705 | struct list_head *tmp; |
706 | struct bnx2i_endpoint *ep; | 706 | struct bnx2i_endpoint *ep = NULL; |
707 | 707 | ||
708 | read_lock_bh(&hba->ep_rdwr_lock); | 708 | read_lock_bh(&hba->ep_rdwr_lock); |
709 | list_for_each_safe(list, tmp, &hba->ep_destroy_list) { | 709 | list_for_each_safe(list, tmp, &hba->ep_destroy_list) { |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index fa09d4be2b53..83458f7a2824 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -292,850 +292,30 @@ bool scsi_opcode_sa_name(int opcode, int service_action, | |||
292 | 292 | ||
293 | struct error_info { | 293 | struct error_info { |
294 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ | 294 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ |
295 | const char * text; | 295 | unsigned short size; |
296 | }; | 296 | }; |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * The canonical list of T10 Additional Sense Codes is available at: | 299 | * There are 700+ entries in this table. To save space, we don't store |
300 | * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] | 300 | * (code, pointer) pairs, which would make sizeof(struct |
301 | * error_info)==16 on 64 bits. Rather, the second element just stores | ||
302 | * the size (including \0) of the corresponding string, and we use the | ||
303 | * sum of these to get the appropriate offset into additional_text | ||
304 | * defined below. This approach saves 12 bytes per entry. | ||
301 | */ | 305 | */ |
302 | |||
303 | static const struct error_info additional[] = | 306 | static const struct error_info additional[] = |
304 | { | 307 | { |
305 | {0x0000, "No additional sense information"}, | 308 | #define SENSE_CODE(c, s) {c, sizeof(s)}, |
306 | {0x0001, "Filemark detected"}, | 309 | #include "sense_codes.h" |
307 | {0x0002, "End-of-partition/medium detected"}, | 310 | #undef SENSE_CODE |
308 | {0x0003, "Setmark detected"}, | ||
309 | {0x0004, "Beginning-of-partition/medium detected"}, | ||
310 | {0x0005, "End-of-data detected"}, | ||
311 | {0x0006, "I/O process terminated"}, | ||
312 | {0x0007, "Programmable early warning detected"}, | ||
313 | {0x0011, "Audio play operation in progress"}, | ||
314 | {0x0012, "Audio play operation paused"}, | ||
315 | {0x0013, "Audio play operation successfully completed"}, | ||
316 | {0x0014, "Audio play operation stopped due to error"}, | ||
317 | {0x0015, "No current audio status to return"}, | ||
318 | {0x0016, "Operation in progress"}, | ||
319 | {0x0017, "Cleaning requested"}, | ||
320 | {0x0018, "Erase operation in progress"}, | ||
321 | {0x0019, "Locate operation in progress"}, | ||
322 | {0x001A, "Rewind operation in progress"}, | ||
323 | {0x001B, "Set capacity operation in progress"}, | ||
324 | {0x001C, "Verify operation in progress"}, | ||
325 | {0x001D, "ATA pass through information available"}, | ||
326 | {0x001E, "Conflicting SA creation request"}, | ||
327 | {0x001F, "Logical unit transitioning to another power condition"}, | ||
328 | {0x0020, "Extended copy information available"}, | ||
329 | {0x0021, "Atomic command aborted due to ACA"}, | ||
330 | |||
331 | {0x0100, "No index/sector signal"}, | ||
332 | |||
333 | {0x0200, "No seek complete"}, | ||
334 | |||
335 | {0x0300, "Peripheral device write fault"}, | ||
336 | {0x0301, "No write current"}, | ||
337 | {0x0302, "Excessive write errors"}, | ||
338 | |||
339 | {0x0400, "Logical unit not ready, cause not reportable"}, | ||
340 | {0x0401, "Logical unit is in process of becoming ready"}, | ||
341 | {0x0402, "Logical unit not ready, initializing command required"}, | ||
342 | {0x0403, "Logical unit not ready, manual intervention required"}, | ||
343 | {0x0404, "Logical unit not ready, format in progress"}, | ||
344 | {0x0405, "Logical unit not ready, rebuild in progress"}, | ||
345 | {0x0406, "Logical unit not ready, recalculation in progress"}, | ||
346 | {0x0407, "Logical unit not ready, operation in progress"}, | ||
347 | {0x0408, "Logical unit not ready, long write in progress"}, | ||
348 | {0x0409, "Logical unit not ready, self-test in progress"}, | ||
349 | {0x040A, "Logical unit not accessible, asymmetric access state " | ||
350 | "transition"}, | ||
351 | {0x040B, "Logical unit not accessible, target port in standby state"}, | ||
352 | {0x040C, "Logical unit not accessible, target port in unavailable " | ||
353 | "state"}, | ||
354 | {0x040D, "Logical unit not ready, structure check required"}, | ||
355 | {0x040E, "Logical unit not ready, security session in progress"}, | ||
356 | {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, | ||
357 | {0x0411, "Logical unit not ready, notify (enable spinup) required"}, | ||
358 | {0x0412, "Logical unit not ready, offline"}, | ||
359 | {0x0413, "Logical unit not ready, SA creation in progress"}, | ||
360 | {0x0414, "Logical unit not ready, space allocation in progress"}, | ||
361 | {0x0415, "Logical unit not ready, robotics disabled"}, | ||
362 | {0x0416, "Logical unit not ready, configuration required"}, | ||
363 | {0x0417, "Logical unit not ready, calibration required"}, | ||
364 | {0x0418, "Logical unit not ready, a door is open"}, | ||
365 | {0x0419, "Logical unit not ready, operating in sequential mode"}, | ||
366 | {0x041A, "Logical unit not ready, start stop unit command in " | ||
367 | "progress"}, | ||
368 | {0x041B, "Logical unit not ready, sanitize in progress"}, | ||
369 | {0x041C, "Logical unit not ready, additional power use not yet " | ||
370 | "granted"}, | ||
371 | {0x041D, "Logical unit not ready, configuration in progress"}, | ||
372 | {0x041E, "Logical unit not ready, microcode activation required"}, | ||
373 | {0x041F, "Logical unit not ready, microcode download required"}, | ||
374 | {0x0420, "Logical unit not ready, logical unit reset required"}, | ||
375 | {0x0421, "Logical unit not ready, hard reset required"}, | ||
376 | {0x0422, "Logical unit not ready, power cycle required"}, | ||
377 | |||
378 | {0x0500, "Logical unit does not respond to selection"}, | ||
379 | |||
380 | {0x0600, "No reference position found"}, | ||
381 | |||
382 | {0x0700, "Multiple peripheral devices selected"}, | ||
383 | |||
384 | {0x0800, "Logical unit communication failure"}, | ||
385 | {0x0801, "Logical unit communication time-out"}, | ||
386 | {0x0802, "Logical unit communication parity error"}, | ||
387 | {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"}, | ||
388 | {0x0804, "Unreachable copy target"}, | ||
389 | |||
390 | {0x0900, "Track following error"}, | ||
391 | {0x0901, "Tracking servo failure"}, | ||
392 | {0x0902, "Focus servo failure"}, | ||
393 | {0x0903, "Spindle servo failure"}, | ||
394 | {0x0904, "Head select fault"}, | ||
395 | {0x0905, "Vibration induced tracking error"}, | ||
396 | |||
397 | {0x0A00, "Error log overflow"}, | ||
398 | |||
399 | {0x0B00, "Warning"}, | ||
400 | {0x0B01, "Warning - specified temperature exceeded"}, | ||
401 | {0x0B02, "Warning - enclosure degraded"}, | ||
402 | {0x0B03, "Warning - background self-test failed"}, | ||
403 | {0x0B04, "Warning - background pre-scan detected medium error"}, | ||
404 | {0x0B05, "Warning - background medium scan detected medium error"}, | ||
405 | {0x0B06, "Warning - non-volatile cache now volatile"}, | ||
406 | {0x0B07, "Warning - degraded power to non-volatile cache"}, | ||
407 | {0x0B08, "Warning - power loss expected"}, | ||
408 | {0x0B09, "Warning - device statistics notification active"}, | ||
409 | |||
410 | {0x0C00, "Write error"}, | ||
411 | {0x0C01, "Write error - recovered with auto reallocation"}, | ||
412 | {0x0C02, "Write error - auto reallocation failed"}, | ||
413 | {0x0C03, "Write error - recommend reassignment"}, | ||
414 | {0x0C04, "Compression check miscompare error"}, | ||
415 | {0x0C05, "Data expansion occurred during compression"}, | ||
416 | {0x0C06, "Block not compressible"}, | ||
417 | {0x0C07, "Write error - recovery needed"}, | ||
418 | {0x0C08, "Write error - recovery failed"}, | ||
419 | {0x0C09, "Write error - loss of streaming"}, | ||
420 | {0x0C0A, "Write error - padding blocks added"}, | ||
421 | {0x0C0B, "Auxiliary memory write error"}, | ||
422 | {0x0C0C, "Write error - unexpected unsolicited data"}, | ||
423 | {0x0C0D, "Write error - not enough unsolicited data"}, | ||
424 | {0x0C0E, "Multiple write errors"}, | ||
425 | {0x0C0F, "Defects in error window"}, | ||
426 | {0x0C10, "Incomplete multiple atomic write operations"}, | ||
427 | |||
428 | {0x0D00, "Error detected by third party temporary initiator"}, | ||
429 | {0x0D01, "Third party device failure"}, | ||
430 | {0x0D02, "Copy target device not reachable"}, | ||
431 | {0x0D03, "Incorrect copy target device type"}, | ||
432 | {0x0D04, "Copy target device data underrun"}, | ||
433 | {0x0D05, "Copy target device data overrun"}, | ||
434 | |||
435 | {0x0E00, "Invalid information unit"}, | ||
436 | {0x0E01, "Information unit too short"}, | ||
437 | {0x0E02, "Information unit too long"}, | ||
438 | {0x0E03, "Invalid field in command information unit"}, | ||
439 | |||
440 | {0x1000, "Id CRC or ECC error"}, | ||
441 | {0x1001, "Logical block guard check failed"}, | ||
442 | {0x1002, "Logical block application tag check failed"}, | ||
443 | {0x1003, "Logical block reference tag check failed"}, | ||
444 | {0x1004, "Logical block protection error on recover buffered data"}, | ||
445 | {0x1005, "Logical block protection method error"}, | ||
446 | |||
447 | {0x1100, "Unrecovered read error"}, | ||
448 | {0x1101, "Read retries exhausted"}, | ||
449 | {0x1102, "Error too long to correct"}, | ||
450 | {0x1103, "Multiple read errors"}, | ||
451 | {0x1104, "Unrecovered read error - auto reallocate failed"}, | ||
452 | {0x1105, "L-EC uncorrectable error"}, | ||
453 | {0x1106, "CIRC unrecovered error"}, | ||
454 | {0x1107, "Data re-synchronization error"}, | ||
455 | {0x1108, "Incomplete block read"}, | ||
456 | {0x1109, "No gap found"}, | ||
457 | {0x110A, "Miscorrected error"}, | ||
458 | {0x110B, "Unrecovered read error - recommend reassignment"}, | ||
459 | {0x110C, "Unrecovered read error - recommend rewrite the data"}, | ||
460 | {0x110D, "De-compression CRC error"}, | ||
461 | {0x110E, "Cannot decompress using declared algorithm"}, | ||
462 | {0x110F, "Error reading UPC/EAN number"}, | ||
463 | {0x1110, "Error reading ISRC number"}, | ||
464 | {0x1111, "Read error - loss of streaming"}, | ||
465 | {0x1112, "Auxiliary memory read error"}, | ||
466 | {0x1113, "Read error - failed retransmission request"}, | ||
467 | {0x1114, "Read error - lba marked bad by application client"}, | ||
468 | {0x1115, "Write after sanitize required"}, | ||
469 | |||
470 | {0x1200, "Address mark not found for id field"}, | ||
471 | |||
472 | {0x1300, "Address mark not found for data field"}, | ||
473 | |||
474 | {0x1400, "Recorded entity not found"}, | ||
475 | {0x1401, "Record not found"}, | ||
476 | {0x1402, "Filemark or setmark not found"}, | ||
477 | {0x1403, "End-of-data not found"}, | ||
478 | {0x1404, "Block sequence error"}, | ||
479 | {0x1405, "Record not found - recommend reassignment"}, | ||
480 | {0x1406, "Record not found - data auto-reallocated"}, | ||
481 | {0x1407, "Locate operation failure"}, | ||
482 | |||
483 | {0x1500, "Random positioning error"}, | ||
484 | {0x1501, "Mechanical positioning error"}, | ||
485 | {0x1502, "Positioning error detected by read of medium"}, | ||
486 | |||
487 | {0x1600, "Data synchronization mark error"}, | ||
488 | {0x1601, "Data sync error - data rewritten"}, | ||
489 | {0x1602, "Data sync error - recommend rewrite"}, | ||
490 | {0x1603, "Data sync error - data auto-reallocated"}, | ||
491 | {0x1604, "Data sync error - recommend reassignment"}, | ||
492 | |||
493 | {0x1700, "Recovered data with no error correction applied"}, | ||
494 | {0x1701, "Recovered data with retries"}, | ||
495 | {0x1702, "Recovered data with positive head offset"}, | ||
496 | {0x1703, "Recovered data with negative head offset"}, | ||
497 | {0x1704, "Recovered data with retries and/or circ applied"}, | ||
498 | {0x1705, "Recovered data using previous sector id"}, | ||
499 | {0x1706, "Recovered data without ECC - data auto-reallocated"}, | ||
500 | {0x1707, "Recovered data without ECC - recommend reassignment"}, | ||
501 | {0x1708, "Recovered data without ECC - recommend rewrite"}, | ||
502 | {0x1709, "Recovered data without ECC - data rewritten"}, | ||
503 | |||
504 | {0x1800, "Recovered data with error correction applied"}, | ||
505 | {0x1801, "Recovered data with error corr. & retries applied"}, | ||
506 | {0x1802, "Recovered data - data auto-reallocated"}, | ||
507 | {0x1803, "Recovered data with CIRC"}, | ||
508 | {0x1804, "Recovered data with L-EC"}, | ||
509 | {0x1805, "Recovered data - recommend reassignment"}, | ||
510 | {0x1806, "Recovered data - recommend rewrite"}, | ||
511 | {0x1807, "Recovered data with ECC - data rewritten"}, | ||
512 | {0x1808, "Recovered data with linking"}, | ||
513 | |||
514 | {0x1900, "Defect list error"}, | ||
515 | {0x1901, "Defect list not available"}, | ||
516 | {0x1902, "Defect list error in primary list"}, | ||
517 | {0x1903, "Defect list error in grown list"}, | ||
518 | |||
519 | {0x1A00, "Parameter list length error"}, | ||
520 | |||
521 | {0x1B00, "Synchronous data transfer error"}, | ||
522 | |||
523 | {0x1C00, "Defect list not found"}, | ||
524 | {0x1C01, "Primary defect list not found"}, | ||
525 | {0x1C02, "Grown defect list not found"}, | ||
526 | |||
527 | {0x1D00, "Miscompare during verify operation"}, | ||
528 | {0x1D01, "Miscompare verify of unmapped LBA"}, | ||
529 | |||
530 | {0x1E00, "Recovered id with ECC correction"}, | ||
531 | |||
532 | {0x1F00, "Partial defect list transfer"}, | ||
533 | |||
534 | {0x2000, "Invalid command operation code"}, | ||
535 | {0x2001, "Access denied - initiator pending-enrolled"}, | ||
536 | {0x2002, "Access denied - no access rights"}, | ||
537 | {0x2003, "Access denied - invalid mgmt id key"}, | ||
538 | {0x2004, "Illegal command while in write capable state"}, | ||
539 | {0x2005, "Obsolete"}, | ||
540 | {0x2006, "Illegal command while in explicit address mode"}, | ||
541 | {0x2007, "Illegal command while in implicit address mode"}, | ||
542 | {0x2008, "Access denied - enrollment conflict"}, | ||
543 | {0x2009, "Access denied - invalid LU identifier"}, | ||
544 | {0x200A, "Access denied - invalid proxy token"}, | ||
545 | {0x200B, "Access denied - ACL LUN conflict"}, | ||
546 | {0x200C, "Illegal command when not in append-only mode"}, | ||
547 | |||
548 | {0x2100, "Logical block address out of range"}, | ||
549 | {0x2101, "Invalid element address"}, | ||
550 | {0x2102, "Invalid address for write"}, | ||
551 | {0x2103, "Invalid write crossing layer jump"}, | ||
552 | {0x2104, "Unaligned write command"}, | ||
553 | {0x2105, "Write boundary violation"}, | ||
554 | {0x2106, "Attempt to read invalid data"}, | ||
555 | {0x2107, "Read boundary violation"}, | ||
556 | |||
557 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, | ||
558 | |||
559 | {0x2300, "Invalid token operation, cause not reportable"}, | ||
560 | {0x2301, "Invalid token operation, unsupported token type"}, | ||
561 | {0x2302, "Invalid token operation, remote token usage not supported"}, | ||
562 | {0x2303, "Invalid token operation, remote rod token creation not " | ||
563 | "supported"}, | ||
564 | {0x2304, "Invalid token operation, token unknown"}, | ||
565 | {0x2305, "Invalid token operation, token corrupt"}, | ||
566 | {0x2306, "Invalid token operation, token revoked"}, | ||
567 | {0x2307, "Invalid token operation, token expired"}, | ||
568 | {0x2308, "Invalid token operation, token cancelled"}, | ||
569 | {0x2309, "Invalid token operation, token deleted"}, | ||
570 | {0x230A, "Invalid token operation, invalid token length"}, | ||
571 | |||
572 | {0x2400, "Invalid field in cdb"}, | ||
573 | {0x2401, "CDB decryption error"}, | ||
574 | {0x2402, "Obsolete"}, | ||
575 | {0x2403, "Obsolete"}, | ||
576 | {0x2404, "Security audit value frozen"}, | ||
577 | {0x2405, "Security working key frozen"}, | ||
578 | {0x2406, "Nonce not unique"}, | ||
579 | {0x2407, "Nonce timestamp out of range"}, | ||
580 | {0x2408, "Invalid XCDB"}, | ||
581 | |||
582 | {0x2500, "Logical unit not supported"}, | ||
583 | |||
584 | {0x2600, "Invalid field in parameter list"}, | ||
585 | {0x2601, "Parameter not supported"}, | ||
586 | {0x2602, "Parameter value invalid"}, | ||
587 | {0x2603, "Threshold parameters not supported"}, | ||
588 | {0x2604, "Invalid release of persistent reservation"}, | ||
589 | {0x2605, "Data decryption error"}, | ||
590 | {0x2606, "Too many target descriptors"}, | ||
591 | {0x2607, "Unsupported target descriptor type code"}, | ||
592 | {0x2608, "Too many segment descriptors"}, | ||
593 | {0x2609, "Unsupported segment descriptor type code"}, | ||
594 | {0x260A, "Unexpected inexact segment"}, | ||
595 | {0x260B, "Inline data length exceeded"}, | ||
596 | {0x260C, "Invalid operation for copy source or destination"}, | ||
597 | {0x260D, "Copy segment granularity violation"}, | ||
598 | {0x260E, "Invalid parameter while port is enabled"}, | ||
599 | {0x260F, "Invalid data-out buffer integrity check value"}, | ||
600 | {0x2610, "Data decryption key fail limit reached"}, | ||
601 | {0x2611, "Incomplete key-associated data set"}, | ||
602 | {0x2612, "Vendor specific key reference not found"}, | ||
603 | |||
604 | {0x2700, "Write protected"}, | ||
605 | {0x2701, "Hardware write protected"}, | ||
606 | {0x2702, "Logical unit software write protected"}, | ||
607 | {0x2703, "Associated write protect"}, | ||
608 | {0x2704, "Persistent write protect"}, | ||
609 | {0x2705, "Permanent write protect"}, | ||
610 | {0x2706, "Conditional write protect"}, | ||
611 | {0x2707, "Space allocation failed write protect"}, | ||
612 | {0x2708, "Zone is read only"}, | ||
613 | |||
614 | {0x2800, "Not ready to ready change, medium may have changed"}, | ||
615 | {0x2801, "Import or export element accessed"}, | ||
616 | {0x2802, "Format-layer may have changed"}, | ||
617 | {0x2803, "Import/export element accessed, medium changed"}, | ||
618 | |||
619 | {0x2900, "Power on, reset, or bus device reset occurred"}, | ||
620 | {0x2901, "Power on occurred"}, | ||
621 | {0x2902, "Scsi bus reset occurred"}, | ||
622 | {0x2903, "Bus device reset function occurred"}, | ||
623 | {0x2904, "Device internal reset"}, | ||
624 | {0x2905, "Transceiver mode changed to single-ended"}, | ||
625 | {0x2906, "Transceiver mode changed to lvd"}, | ||
626 | {0x2907, "I_T nexus loss occurred"}, | ||
627 | |||
628 | {0x2A00, "Parameters changed"}, | ||
629 | {0x2A01, "Mode parameters changed"}, | ||
630 | {0x2A02, "Log parameters changed"}, | ||
631 | {0x2A03, "Reservations preempted"}, | ||
632 | {0x2A04, "Reservations released"}, | ||
633 | {0x2A05, "Registrations preempted"}, | ||
634 | {0x2A06, "Asymmetric access state changed"}, | ||
635 | {0x2A07, "Implicit asymmetric access state transition failed"}, | ||
636 | {0x2A08, "Priority changed"}, | ||
637 | {0x2A09, "Capacity data has changed"}, | ||
638 | {0x2A0A, "Error history I_T nexus cleared"}, | ||
639 | {0x2A0B, "Error history snapshot released"}, | ||
640 | {0x2A0C, "Error recovery attributes have changed"}, | ||
641 | {0x2A0D, "Data encryption capabilities changed"}, | ||
642 | {0x2A10, "Timestamp changed"}, | ||
643 | {0x2A11, "Data encryption parameters changed by another i_t nexus"}, | ||
644 | {0x2A12, "Data encryption parameters changed by vendor specific " | ||
645 | "event"}, | ||
646 | {0x2A13, "Data encryption key instance counter has changed"}, | ||
647 | {0x2A14, "SA creation capabilities data has changed"}, | ||
648 | {0x2A15, "Medium removal prevention preempted"}, | ||
649 | |||
650 | {0x2B00, "Copy cannot execute since host cannot disconnect"}, | ||
651 | |||
652 | {0x2C00, "Command sequence error"}, | ||
653 | {0x2C01, "Too many windows specified"}, | ||
654 | {0x2C02, "Invalid combination of windows specified"}, | ||
655 | {0x2C03, "Current program area is not empty"}, | ||
656 | {0x2C04, "Current program area is empty"}, | ||
657 | {0x2C05, "Illegal power condition request"}, | ||
658 | {0x2C06, "Persistent prevent conflict"}, | ||
659 | {0x2C07, "Previous busy status"}, | ||
660 | {0x2C08, "Previous task set full status"}, | ||
661 | {0x2C09, "Previous reservation conflict status"}, | ||
662 | {0x2C0A, "Partition or collection contains user objects"}, | ||
663 | {0x2C0B, "Not reserved"}, | ||
664 | {0x2C0C, "Orwrite generation does not match"}, | ||
665 | {0x2C0D, "Reset write pointer not allowed"}, | ||
666 | {0x2C0E, "Zone is offline"}, | ||
667 | |||
668 | {0x2D00, "Overwrite error on update in place"}, | ||
669 | |||
670 | {0x2E00, "Insufficient time for operation"}, | ||
671 | {0x2E01, "Command timeout before processing"}, | ||
672 | {0x2E02, "Command timeout during processing"}, | ||
673 | {0x2E03, "Command timeout during processing due to error recovery"}, | ||
674 | |||
675 | {0x2F00, "Commands cleared by another initiator"}, | ||
676 | {0x2F01, "Commands cleared by power loss notification"}, | ||
677 | {0x2F02, "Commands cleared by device server"}, | ||
678 | {0x2F03, "Some commands cleared by queuing layer event"}, | ||
679 | |||
680 | {0x3000, "Incompatible medium installed"}, | ||
681 | {0x3001, "Cannot read medium - unknown format"}, | ||
682 | {0x3002, "Cannot read medium - incompatible format"}, | ||
683 | {0x3003, "Cleaning cartridge installed"}, | ||
684 | {0x3004, "Cannot write medium - unknown format"}, | ||
685 | {0x3005, "Cannot write medium - incompatible format"}, | ||
686 | {0x3006, "Cannot format medium - incompatible medium"}, | ||
687 | {0x3007, "Cleaning failure"}, | ||
688 | {0x3008, "Cannot write - application code mismatch"}, | ||
689 | {0x3009, "Current session not fixated for append"}, | ||
690 | {0x300A, "Cleaning request rejected"}, | ||
691 | {0x300C, "WORM medium - overwrite attempted"}, | ||
692 | {0x300D, "WORM medium - integrity check"}, | ||
693 | {0x3010, "Medium not formatted"}, | ||
694 | {0x3011, "Incompatible volume type"}, | ||
695 | {0x3012, "Incompatible volume qualifier"}, | ||
696 | {0x3013, "Cleaning volume expired"}, | ||
697 | |||
698 | {0x3100, "Medium format corrupted"}, | ||
699 | {0x3101, "Format command failed"}, | ||
700 | {0x3102, "Zoned formatting failed due to spare linking"}, | ||
701 | {0x3103, "Sanitize command failed"}, | ||
702 | |||
703 | {0x3200, "No defect spare location available"}, | ||
704 | {0x3201, "Defect list update failure"}, | ||
705 | |||
706 | {0x3300, "Tape length error"}, | ||
707 | |||
708 | {0x3400, "Enclosure failure"}, | ||
709 | |||
710 | {0x3500, "Enclosure services failure"}, | ||
711 | {0x3501, "Unsupported enclosure function"}, | ||
712 | {0x3502, "Enclosure services unavailable"}, | ||
713 | {0x3503, "Enclosure services transfer failure"}, | ||
714 | {0x3504, "Enclosure services transfer refused"}, | ||
715 | {0x3505, "Enclosure services checksum error"}, | ||
716 | |||
717 | {0x3600, "Ribbon, ink, or toner failure"}, | ||
718 | |||
719 | {0x3700, "Rounded parameter"}, | ||
720 | |||
721 | {0x3800, "Event status notification"}, | ||
722 | {0x3802, "Esn - power management class event"}, | ||
723 | {0x3804, "Esn - media class event"}, | ||
724 | {0x3806, "Esn - device busy class event"}, | ||
725 | {0x3807, "Thin Provisioning soft threshold reached"}, | ||
726 | |||
727 | {0x3900, "Saving parameters not supported"}, | ||
728 | |||
729 | {0x3A00, "Medium not present"}, | ||
730 | {0x3A01, "Medium not present - tray closed"}, | ||
731 | {0x3A02, "Medium not present - tray open"}, | ||
732 | {0x3A03, "Medium not present - loadable"}, | ||
733 | {0x3A04, "Medium not present - medium auxiliary memory accessible"}, | ||
734 | |||
735 | {0x3B00, "Sequential positioning error"}, | ||
736 | {0x3B01, "Tape position error at beginning-of-medium"}, | ||
737 | {0x3B02, "Tape position error at end-of-medium"}, | ||
738 | {0x3B03, "Tape or electronic vertical forms unit not ready"}, | ||
739 | {0x3B04, "Slew failure"}, | ||
740 | {0x3B05, "Paper jam"}, | ||
741 | {0x3B06, "Failed to sense top-of-form"}, | ||
742 | {0x3B07, "Failed to sense bottom-of-form"}, | ||
743 | {0x3B08, "Reposition error"}, | ||
744 | {0x3B09, "Read past end of medium"}, | ||
745 | {0x3B0A, "Read past beginning of medium"}, | ||
746 | {0x3B0B, "Position past end of medium"}, | ||
747 | {0x3B0C, "Position past beginning of medium"}, | ||
748 | {0x3B0D, "Medium destination element full"}, | ||
749 | {0x3B0E, "Medium source element empty"}, | ||
750 | {0x3B0F, "End of medium reached"}, | ||
751 | {0x3B11, "Medium magazine not accessible"}, | ||
752 | {0x3B12, "Medium magazine removed"}, | ||
753 | {0x3B13, "Medium magazine inserted"}, | ||
754 | {0x3B14, "Medium magazine locked"}, | ||
755 | {0x3B15, "Medium magazine unlocked"}, | ||
756 | {0x3B16, "Mechanical positioning or changer error"}, | ||
757 | {0x3B17, "Read past end of user object"}, | ||
758 | {0x3B18, "Element disabled"}, | ||
759 | {0x3B19, "Element enabled"}, | ||
760 | {0x3B1A, "Data transfer device removed"}, | ||
761 | {0x3B1B, "Data transfer device inserted"}, | ||
762 | {0x3B1C, "Too many logical objects on partition to support " | ||
763 | "operation"}, | ||
764 | |||
765 | {0x3D00, "Invalid bits in identify message"}, | ||
766 | |||
767 | {0x3E00, "Logical unit has not self-configured yet"}, | ||
768 | {0x3E01, "Logical unit failure"}, | ||
769 | {0x3E02, "Timeout on logical unit"}, | ||
770 | {0x3E03, "Logical unit failed self-test"}, | ||
771 | {0x3E04, "Logical unit unable to update self-test log"}, | ||
772 | |||
773 | {0x3F00, "Target operating conditions have changed"}, | ||
774 | {0x3F01, "Microcode has been changed"}, | ||
775 | {0x3F02, "Changed operating definition"}, | ||
776 | {0x3F03, "Inquiry data has changed"}, | ||
777 | {0x3F04, "Component device attached"}, | ||
778 | {0x3F05, "Device identifier changed"}, | ||
779 | {0x3F06, "Redundancy group created or modified"}, | ||
780 | {0x3F07, "Redundancy group deleted"}, | ||
781 | {0x3F08, "Spare created or modified"}, | ||
782 | {0x3F09, "Spare deleted"}, | ||
783 | {0x3F0A, "Volume set created or modified"}, | ||
784 | {0x3F0B, "Volume set deleted"}, | ||
785 | {0x3F0C, "Volume set deassigned"}, | ||
786 | {0x3F0D, "Volume set reassigned"}, | ||
787 | {0x3F0E, "Reported luns data has changed"}, | ||
788 | {0x3F0F, "Echo buffer overwritten"}, | ||
789 | {0x3F10, "Medium loadable"}, | ||
790 | {0x3F11, "Medium auxiliary memory accessible"}, | ||
791 | {0x3F12, "iSCSI IP address added"}, | ||
792 | {0x3F13, "iSCSI IP address removed"}, | ||
793 | {0x3F14, "iSCSI IP address changed"}, | ||
794 | {0x3F15, "Inspect referrals sense descriptors"}, | ||
795 | {0x3F16, "Microcode has been changed without reset"}, | ||
796 | /* | ||
797 | * {0x40NN, "Ram failure"}, | ||
798 | * {0x40NN, "Diagnostic failure on component nn"}, | ||
799 | * {0x41NN, "Data path failure"}, | ||
800 | * {0x42NN, "Power-on or self-test failure"}, | ||
801 | */ | ||
802 | {0x4300, "Message error"}, | ||
803 | |||
804 | {0x4400, "Internal target failure"}, | ||
805 | {0x4401, "Persistent reservation information lost"}, | ||
806 | {0x4471, "ATA device failed set features"}, | ||
807 | |||
808 | {0x4500, "Select or reselect failure"}, | ||
809 | |||
810 | {0x4600, "Unsuccessful soft reset"}, | ||
811 | |||
812 | {0x4700, "Scsi parity error"}, | ||
813 | {0x4701, "Data phase CRC error detected"}, | ||
814 | {0x4702, "Scsi parity error detected during st data phase"}, | ||
815 | {0x4703, "Information unit iuCRC error detected"}, | ||
816 | {0x4704, "Asynchronous information protection error detected"}, | ||
817 | {0x4705, "Protocol service CRC error"}, | ||
818 | {0x4706, "Phy test function in progress"}, | ||
819 | {0x477f, "Some commands cleared by iSCSI Protocol event"}, | ||
820 | |||
821 | {0x4800, "Initiator detected error message received"}, | ||
822 | |||
823 | {0x4900, "Invalid message error"}, | ||
824 | |||
825 | {0x4A00, "Command phase error"}, | ||
826 | |||
827 | {0x4B00, "Data phase error"}, | ||
828 | {0x4B01, "Invalid target port transfer tag received"}, | ||
829 | {0x4B02, "Too much write data"}, | ||
830 | {0x4B03, "Ack/nak timeout"}, | ||
831 | {0x4B04, "Nak received"}, | ||
832 | {0x4B05, "Data offset error"}, | ||
833 | {0x4B06, "Initiator response timeout"}, | ||
834 | {0x4B07, "Connection lost"}, | ||
835 | {0x4B08, "Data-in buffer overflow - data buffer size"}, | ||
836 | {0x4B09, "Data-in buffer overflow - data buffer descriptor area"}, | ||
837 | {0x4B0A, "Data-in buffer error"}, | ||
838 | {0x4B0B, "Data-out buffer overflow - data buffer size"}, | ||
839 | {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"}, | ||
840 | {0x4B0D, "Data-out buffer error"}, | ||
841 | {0x4B0E, "PCIe fabric error"}, | ||
842 | {0x4B0F, "PCIe completion timeout"}, | ||
843 | {0x4B10, "PCIe completer abort"}, | ||
844 | {0x4B11, "PCIe poisoned tlp received"}, | ||
845 | {0x4B12, "PCIe eCRC check failed"}, | ||
846 | {0x4B13, "PCIe unsupported request"}, | ||
847 | {0x4B14, "PCIe acs violation"}, | ||
848 | {0x4B15, "PCIe tlp prefix blocked"}, | ||
849 | |||
850 | {0x4C00, "Logical unit failed self-configuration"}, | ||
851 | /* | ||
852 | * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"}, | ||
853 | */ | ||
854 | {0x4E00, "Overlapped commands attempted"}, | ||
855 | |||
856 | {0x5000, "Write append error"}, | ||
857 | {0x5001, "Write append position error"}, | ||
858 | {0x5002, "Position error related to timing"}, | ||
859 | |||
860 | {0x5100, "Erase failure"}, | ||
861 | {0x5101, "Erase failure - incomplete erase operation detected"}, | ||
862 | |||
863 | {0x5200, "Cartridge fault"}, | ||
864 | |||
865 | {0x5300, "Media load or eject failed"}, | ||
866 | {0x5301, "Unload tape failure"}, | ||
867 | {0x5302, "Medium removal prevented"}, | ||
868 | {0x5303, "Medium removal prevented by data transfer element"}, | ||
869 | {0x5304, "Medium thread or unthread failure"}, | ||
870 | {0x5305, "Volume identifier invalid"}, | ||
871 | {0x5306, "Volume identifier missing"}, | ||
872 | {0x5307, "Duplicate volume identifier"}, | ||
873 | {0x5308, "Element status unknown"}, | ||
874 | {0x5309, "Data transfer device error - load failed"}, | ||
875 | {0x530a, "Data transfer device error - unload failed"}, | ||
876 | {0x530b, "Data transfer device error - unload missing"}, | ||
877 | {0x530c, "Data transfer device error - eject failed"}, | ||
878 | {0x530d, "Data transfer device error - library communication failed"}, | ||
879 | |||
880 | {0x5400, "Scsi to host system interface failure"}, | ||
881 | |||
882 | {0x5500, "System resource failure"}, | ||
883 | {0x5501, "System buffer full"}, | ||
884 | {0x5502, "Insufficient reservation resources"}, | ||
885 | {0x5503, "Insufficient resources"}, | ||
886 | {0x5504, "Insufficient registration resources"}, | ||
887 | {0x5505, "Insufficient access control resources"}, | ||
888 | {0x5506, "Auxiliary memory out of space"}, | ||
889 | {0x5507, "Quota error"}, | ||
890 | {0x5508, "Maximum number of supplemental decryption keys exceeded"}, | ||
891 | {0x5509, "Medium auxiliary memory not accessible"}, | ||
892 | {0x550A, "Data currently unavailable"}, | ||
893 | {0x550B, "Insufficient power for operation"}, | ||
894 | {0x550C, "Insufficient resources to create rod"}, | ||
895 | {0x550D, "Insufficient resources to create rod token"}, | ||
896 | {0x550E, "Insufficient zone resources"}, | ||
897 | |||
898 | {0x5700, "Unable to recover table-of-contents"}, | ||
899 | |||
900 | {0x5800, "Generation does not exist"}, | ||
901 | |||
902 | {0x5900, "Updated block read"}, | ||
903 | |||
904 | {0x5A00, "Operator request or state change input"}, | ||
905 | {0x5A01, "Operator medium removal request"}, | ||
906 | {0x5A02, "Operator selected write protect"}, | ||
907 | {0x5A03, "Operator selected write permit"}, | ||
908 | |||
909 | {0x5B00, "Log exception"}, | ||
910 | {0x5B01, "Threshold condition met"}, | ||
911 | {0x5B02, "Log counter at maximum"}, | ||
912 | {0x5B03, "Log list codes exhausted"}, | ||
913 | |||
914 | {0x5C00, "Rpl status change"}, | ||
915 | {0x5C01, "Spindles synchronized"}, | ||
916 | {0x5C02, "Spindles not synchronized"}, | ||
917 | |||
918 | {0x5D00, "Failure prediction threshold exceeded"}, | ||
919 | {0x5D01, "Media failure prediction threshold exceeded"}, | ||
920 | {0x5D02, "Logical unit failure prediction threshold exceeded"}, | ||
921 | {0x5D03, "Spare area exhaustion prediction threshold exceeded"}, | ||
922 | {0x5D10, "Hardware impending failure general hard drive failure"}, | ||
923 | {0x5D11, "Hardware impending failure drive error rate too high"}, | ||
924 | {0x5D12, "Hardware impending failure data error rate too high"}, | ||
925 | {0x5D13, "Hardware impending failure seek error rate too high"}, | ||
926 | {0x5D14, "Hardware impending failure too many block reassigns"}, | ||
927 | {0x5D15, "Hardware impending failure access times too high"}, | ||
928 | {0x5D16, "Hardware impending failure start unit times too high"}, | ||
929 | {0x5D17, "Hardware impending failure channel parametrics"}, | ||
930 | {0x5D18, "Hardware impending failure controller detected"}, | ||
931 | {0x5D19, "Hardware impending failure throughput performance"}, | ||
932 | {0x5D1A, "Hardware impending failure seek time performance"}, | ||
933 | {0x5D1B, "Hardware impending failure spin-up retry count"}, | ||
934 | {0x5D1C, "Hardware impending failure drive calibration retry count"}, | ||
935 | {0x5D20, "Controller impending failure general hard drive failure"}, | ||
936 | {0x5D21, "Controller impending failure drive error rate too high"}, | ||
937 | {0x5D22, "Controller impending failure data error rate too high"}, | ||
938 | {0x5D23, "Controller impending failure seek error rate too high"}, | ||
939 | {0x5D24, "Controller impending failure too many block reassigns"}, | ||
940 | {0x5D25, "Controller impending failure access times too high"}, | ||
941 | {0x5D26, "Controller impending failure start unit times too high"}, | ||
942 | {0x5D27, "Controller impending failure channel parametrics"}, | ||
943 | {0x5D28, "Controller impending failure controller detected"}, | ||
944 | {0x5D29, "Controller impending failure throughput performance"}, | ||
945 | {0x5D2A, "Controller impending failure seek time performance"}, | ||
946 | {0x5D2B, "Controller impending failure spin-up retry count"}, | ||
947 | {0x5D2C, "Controller impending failure drive calibration retry count"}, | ||
948 | {0x5D30, "Data channel impending failure general hard drive failure"}, | ||
949 | {0x5D31, "Data channel impending failure drive error rate too high"}, | ||
950 | {0x5D32, "Data channel impending failure data error rate too high"}, | ||
951 | {0x5D33, "Data channel impending failure seek error rate too high"}, | ||
952 | {0x5D34, "Data channel impending failure too many block reassigns"}, | ||
953 | {0x5D35, "Data channel impending failure access times too high"}, | ||
954 | {0x5D36, "Data channel impending failure start unit times too high"}, | ||
955 | {0x5D37, "Data channel impending failure channel parametrics"}, | ||
956 | {0x5D38, "Data channel impending failure controller detected"}, | ||
957 | {0x5D39, "Data channel impending failure throughput performance"}, | ||
958 | {0x5D3A, "Data channel impending failure seek time performance"}, | ||
959 | {0x5D3B, "Data channel impending failure spin-up retry count"}, | ||
960 | {0x5D3C, "Data channel impending failure drive calibration retry " | ||
961 | "count"}, | ||
962 | {0x5D40, "Servo impending failure general hard drive failure"}, | ||
963 | {0x5D41, "Servo impending failure drive error rate too high"}, | ||
964 | {0x5D42, "Servo impending failure data error rate too high"}, | ||
965 | {0x5D43, "Servo impending failure seek error rate too high"}, | ||
966 | {0x5D44, "Servo impending failure too many block reassigns"}, | ||
967 | {0x5D45, "Servo impending failure access times too high"}, | ||
968 | {0x5D46, "Servo impending failure start unit times too high"}, | ||
969 | {0x5D47, "Servo impending failure channel parametrics"}, | ||
970 | {0x5D48, "Servo impending failure controller detected"}, | ||
971 | {0x5D49, "Servo impending failure throughput performance"}, | ||
972 | {0x5D4A, "Servo impending failure seek time performance"}, | ||
973 | {0x5D4B, "Servo impending failure spin-up retry count"}, | ||
974 | {0x5D4C, "Servo impending failure drive calibration retry count"}, | ||
975 | {0x5D50, "Spindle impending failure general hard drive failure"}, | ||
976 | {0x5D51, "Spindle impending failure drive error rate too high"}, | ||
977 | {0x5D52, "Spindle impending failure data error rate too high"}, | ||
978 | {0x5D53, "Spindle impending failure seek error rate too high"}, | ||
979 | {0x5D54, "Spindle impending failure too many block reassigns"}, | ||
980 | {0x5D55, "Spindle impending failure access times too high"}, | ||
981 | {0x5D56, "Spindle impending failure start unit times too high"}, | ||
982 | {0x5D57, "Spindle impending failure channel parametrics"}, | ||
983 | {0x5D58, "Spindle impending failure controller detected"}, | ||
984 | {0x5D59, "Spindle impending failure throughput performance"}, | ||
985 | {0x5D5A, "Spindle impending failure seek time performance"}, | ||
986 | {0x5D5B, "Spindle impending failure spin-up retry count"}, | ||
987 | {0x5D5C, "Spindle impending failure drive calibration retry count"}, | ||
988 | {0x5D60, "Firmware impending failure general hard drive failure"}, | ||
989 | {0x5D61, "Firmware impending failure drive error rate too high"}, | ||
990 | {0x5D62, "Firmware impending failure data error rate too high"}, | ||
991 | {0x5D63, "Firmware impending failure seek error rate too high"}, | ||
992 | {0x5D64, "Firmware impending failure too many block reassigns"}, | ||
993 | {0x5D65, "Firmware impending failure access times too high"}, | ||
994 | {0x5D66, "Firmware impending failure start unit times too high"}, | ||
995 | {0x5D67, "Firmware impending failure channel parametrics"}, | ||
996 | {0x5D68, "Firmware impending failure controller detected"}, | ||
997 | {0x5D69, "Firmware impending failure throughput performance"}, | ||
998 | {0x5D6A, "Firmware impending failure seek time performance"}, | ||
999 | {0x5D6B, "Firmware impending failure spin-up retry count"}, | ||
1000 | {0x5D6C, "Firmware impending failure drive calibration retry count"}, | ||
1001 | {0x5DFF, "Failure prediction threshold exceeded (false)"}, | ||
1002 | |||
1003 | {0x5E00, "Low power condition on"}, | ||
1004 | {0x5E01, "Idle condition activated by timer"}, | ||
1005 | {0x5E02, "Standby condition activated by timer"}, | ||
1006 | {0x5E03, "Idle condition activated by command"}, | ||
1007 | {0x5E04, "Standby condition activated by command"}, | ||
1008 | {0x5E05, "Idle_b condition activated by timer"}, | ||
1009 | {0x5E06, "Idle_b condition activated by command"}, | ||
1010 | {0x5E07, "Idle_c condition activated by timer"}, | ||
1011 | {0x5E08, "Idle_c condition activated by command"}, | ||
1012 | {0x5E09, "Standby_y condition activated by timer"}, | ||
1013 | {0x5E0A, "Standby_y condition activated by command"}, | ||
1014 | {0x5E41, "Power state change to active"}, | ||
1015 | {0x5E42, "Power state change to idle"}, | ||
1016 | {0x5E43, "Power state change to standby"}, | ||
1017 | {0x5E45, "Power state change to sleep"}, | ||
1018 | {0x5E47, "Power state change to device control"}, | ||
1019 | |||
1020 | {0x6000, "Lamp failure"}, | ||
1021 | |||
1022 | {0x6100, "Video acquisition error"}, | ||
1023 | {0x6101, "Unable to acquire video"}, | ||
1024 | {0x6102, "Out of focus"}, | ||
1025 | |||
1026 | {0x6200, "Scan head positioning error"}, | ||
1027 | |||
1028 | {0x6300, "End of user area encountered on this track"}, | ||
1029 | {0x6301, "Packet does not fit in available space"}, | ||
1030 | |||
1031 | {0x6400, "Illegal mode for this track"}, | ||
1032 | {0x6401, "Invalid packet size"}, | ||
1033 | |||
1034 | {0x6500, "Voltage fault"}, | ||
1035 | |||
1036 | {0x6600, "Automatic document feeder cover up"}, | ||
1037 | {0x6601, "Automatic document feeder lift up"}, | ||
1038 | {0x6602, "Document jam in automatic document feeder"}, | ||
1039 | {0x6603, "Document miss feed automatic in document feeder"}, | ||
1040 | |||
1041 | {0x6700, "Configuration failure"}, | ||
1042 | {0x6701, "Configuration of incapable logical units failed"}, | ||
1043 | {0x6702, "Add logical unit failed"}, | ||
1044 | {0x6703, "Modification of logical unit failed"}, | ||
1045 | {0x6704, "Exchange of logical unit failed"}, | ||
1046 | {0x6705, "Remove of logical unit failed"}, | ||
1047 | {0x6706, "Attachment of logical unit failed"}, | ||
1048 | {0x6707, "Creation of logical unit failed"}, | ||
1049 | {0x6708, "Assign failure occurred"}, | ||
1050 | {0x6709, "Multiply assigned logical unit"}, | ||
1051 | {0x670A, "Set target port groups command failed"}, | ||
1052 | {0x670B, "ATA device feature not enabled"}, | ||
1053 | |||
1054 | {0x6800, "Logical unit not configured"}, | ||
1055 | {0x6801, "Subsidiary logical unit not configured"}, | ||
1056 | |||
1057 | {0x6900, "Data loss on logical unit"}, | ||
1058 | {0x6901, "Multiple logical unit failures"}, | ||
1059 | {0x6902, "Parity/data mismatch"}, | ||
1060 | |||
1061 | {0x6A00, "Informational, refer to log"}, | ||
1062 | |||
1063 | {0x6B00, "State change has occurred"}, | ||
1064 | {0x6B01, "Redundancy level got better"}, | ||
1065 | {0x6B02, "Redundancy level got worse"}, | ||
1066 | |||
1067 | {0x6C00, "Rebuild failure occurred"}, | ||
1068 | |||
1069 | {0x6D00, "Recalculate failure occurred"}, | ||
1070 | |||
1071 | {0x6E00, "Command to logical unit failed"}, | ||
1072 | |||
1073 | {0x6F00, "Copy protection key exchange failure - authentication " | ||
1074 | "failure"}, | ||
1075 | {0x6F01, "Copy protection key exchange failure - key not present"}, | ||
1076 | {0x6F02, "Copy protection key exchange failure - key not established"}, | ||
1077 | {0x6F03, "Read of scrambled sector without authentication"}, | ||
1078 | {0x6F04, "Media region code is mismatched to logical unit region"}, | ||
1079 | {0x6F05, "Drive region must be permanent/region reset count error"}, | ||
1080 | {0x6F06, "Insufficient block count for binding nonce recording"}, | ||
1081 | {0x6F07, "Conflict in binding nonce recording"}, | ||
1082 | /* | ||
1083 | * {0x70NN, "Decompression exception short algorithm id of nn"}, | ||
1084 | */ | ||
1085 | {0x7100, "Decompression exception long algorithm id"}, | ||
1086 | |||
1087 | {0x7200, "Session fixation error"}, | ||
1088 | {0x7201, "Session fixation error writing lead-in"}, | ||
1089 | {0x7202, "Session fixation error writing lead-out"}, | ||
1090 | {0x7203, "Session fixation error - incomplete track in session"}, | ||
1091 | {0x7204, "Empty or partially written reserved track"}, | ||
1092 | {0x7205, "No more track reservations allowed"}, | ||
1093 | {0x7206, "RMZ extension is not allowed"}, | ||
1094 | {0x7207, "No more test zone extensions are allowed"}, | ||
1095 | |||
1096 | {0x7300, "Cd control error"}, | ||
1097 | {0x7301, "Power calibration area almost full"}, | ||
1098 | {0x7302, "Power calibration area is full"}, | ||
1099 | {0x7303, "Power calibration area error"}, | ||
1100 | {0x7304, "Program memory area update failure"}, | ||
1101 | {0x7305, "Program memory area is full"}, | ||
1102 | {0x7306, "RMA/PMA is almost full"}, | ||
1103 | {0x7310, "Current power calibration area almost full"}, | ||
1104 | {0x7311, "Current power calibration area is full"}, | ||
1105 | {0x7317, "RDZ is full"}, | ||
1106 | |||
1107 | {0x7400, "Security error"}, | ||
1108 | {0x7401, "Unable to decrypt data"}, | ||
1109 | {0x7402, "Unencrypted data encountered while decrypting"}, | ||
1110 | {0x7403, "Incorrect data encryption key"}, | ||
1111 | {0x7404, "Cryptographic integrity validation failed"}, | ||
1112 | {0x7405, "Error decrypting data"}, | ||
1113 | {0x7406, "Unknown signature verification key"}, | ||
1114 | {0x7407, "Encryption parameters not useable"}, | ||
1115 | {0x7408, "Digital signature validation failure"}, | ||
1116 | {0x7409, "Encryption mode mismatch on read"}, | ||
1117 | {0x740A, "Encrypted block not raw read enabled"}, | ||
1118 | {0x740B, "Incorrect Encryption parameters"}, | ||
1119 | {0x740C, "Unable to decrypt parameter list"}, | ||
1120 | {0x740D, "Encryption algorithm disabled"}, | ||
1121 | {0x7410, "SA creation parameter value invalid"}, | ||
1122 | {0x7411, "SA creation parameter value rejected"}, | ||
1123 | {0x7412, "Invalid SA usage"}, | ||
1124 | {0x7421, "Data Encryption configuration prevented"}, | ||
1125 | {0x7430, "SA creation parameter not supported"}, | ||
1126 | {0x7440, "Authentication failed"}, | ||
1127 | {0x7461, "External data encryption key manager access error"}, | ||
1128 | {0x7462, "External data encryption key manager error"}, | ||
1129 | {0x7463, "External data encryption key not found"}, | ||
1130 | {0x7464, "External data encryption request not authorized"}, | ||
1131 | {0x746E, "External data encryption control timeout"}, | ||
1132 | {0x746F, "External data encryption control error"}, | ||
1133 | {0x7471, "Logical unit access not authorized"}, | ||
1134 | {0x7479, "Security conflict in translated device"}, | ||
1135 | |||
1136 | {0, NULL} | ||
1137 | }; | 311 | }; |
1138 | 312 | ||
313 | static const char *additional_text = | ||
314 | #define SENSE_CODE(c, s) s "\0" | ||
315 | #include "sense_codes.h" | ||
316 | #undef SENSE_CODE | ||
317 | ; | ||
318 | |||
1139 | struct error_info2 { | 319 | struct error_info2 { |
1140 | unsigned char code1, code2_min, code2_max; | 320 | unsigned char code1, code2_min, code2_max; |
1141 | const char * str; | 321 | const char * str; |
@@ -1197,11 +377,14 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) | |||
1197 | { | 377 | { |
1198 | int i; | 378 | int i; |
1199 | unsigned short code = ((asc << 8) | ascq); | 379 | unsigned short code = ((asc << 8) | ascq); |
380 | unsigned offset = 0; | ||
1200 | 381 | ||
1201 | *fmt = NULL; | 382 | *fmt = NULL; |
1202 | for (i = 0; additional[i].text; i++) | 383 | for (i = 0; i < ARRAY_SIZE(additional); i++) { |
1203 | if (additional[i].code12 == code) | 384 | if (additional[i].code12 == code) |
1204 | return additional[i].text; | 385 | return additional_text + offset; |
386 | offset += additional[i].size; | ||
387 | } | ||
1205 | for (i = 0; additional2[i].fmt; i++) { | 388 | for (i = 0; additional2[i].fmt; i++) { |
1206 | if (additional2[i].code1 == asc && | 389 | if (additional2[i].code1 == asc && |
1207 | ascq >= additional2[i].code2_min && | 390 | ascq >= additional2[i].code2_min && |
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index d8a5cb3cd2bd..ce1507023132 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c | |||
@@ -1615,6 +1615,13 @@ err1: | |||
1615 | * place at the same time and the failure was due to CXL services being | 1615 | * place at the same time and the failure was due to CXL services being |
1616 | * unable to keep up. | 1616 | * unable to keep up. |
1617 | * | 1617 | * |
1618 | * As this routine is called on ioctl context, it holds the ioctl r/w | ||
1619 | * semaphore that is used to drain ioctls in recovery scenarios. The | ||
1620 | * implementation to achieve the pacing described above (a local mutex) | ||
1621 | * requires that the ioctl r/w semaphore be dropped and reacquired to | ||
1622 | * avoid a 3-way deadlock when multiple process recoveries operate in | ||
1623 | * parallel. | ||
1624 | * | ||
1618 | * Because a user can detect an error condition before the kernel, it is | 1625 | * Because a user can detect an error condition before the kernel, it is |
1619 | * quite possible for this routine to act as the kernel's EEH detection | 1626 | * quite possible for this routine to act as the kernel's EEH detection |
1620 | * source (MMIO read of mbox_r). Because of this, there is a window of | 1627 | * source (MMIO read of mbox_r). Because of this, there is a window of |
@@ -1642,9 +1649,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, | |||
1642 | int rc = 0; | 1649 | int rc = 0; |
1643 | 1650 | ||
1644 | atomic_inc(&cfg->recovery_threads); | 1651 | atomic_inc(&cfg->recovery_threads); |
1652 | up_read(&cfg->ioctl_rwsem); | ||
1645 | rc = mutex_lock_interruptible(mutex); | 1653 | rc = mutex_lock_interruptible(mutex); |
1654 | down_read(&cfg->ioctl_rwsem); | ||
1646 | if (rc) | 1655 | if (rc) |
1647 | goto out; | 1656 | goto out; |
1657 | rc = check_state(cfg); | ||
1658 | if (rc) { | ||
1659 | dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); | ||
1660 | rc = -ENODEV; | ||
1661 | goto out; | ||
1662 | } | ||
1648 | 1663 | ||
1649 | dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", | 1664 | dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", |
1650 | __func__, recover->reason, rctxid); | 1665 | __func__, recover->reason, rctxid); |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a655cf29c16f..752b5c9d1ab2 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -190,15 +190,18 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, | |||
190 | ALUA_FAILOVER_RETRIES, NULL, req_flags); | 190 | ALUA_FAILOVER_RETRIES, NULL, req_flags); |
191 | } | 191 | } |
192 | 192 | ||
193 | struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, | 193 | static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, |
194 | int group_id) | 194 | int group_id) |
195 | { | 195 | { |
196 | struct alua_port_group *pg; | 196 | struct alua_port_group *pg; |
197 | 197 | ||
198 | if (!id_str || !id_size || !strlen(id_str)) | ||
199 | return NULL; | ||
200 | |||
198 | list_for_each_entry(pg, &port_group_list, node) { | 201 | list_for_each_entry(pg, &port_group_list, node) { |
199 | if (pg->group_id != group_id) | 202 | if (pg->group_id != group_id) |
200 | continue; | 203 | continue; |
201 | if (pg->device_id_len != id_size) | 204 | if (!pg->device_id_len || pg->device_id_len != id_size) |
202 | continue; | 205 | continue; |
203 | if (strncmp(pg->device_id_str, id_str, id_size)) | 206 | if (strncmp(pg->device_id_str, id_str, id_size)) |
204 | continue; | 207 | continue; |
@@ -219,8 +222,8 @@ struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, | |||
219 | * Allocate a new port_group structure for a given | 222 | * Allocate a new port_group structure for a given |
220 | * device. | 223 | * device. |
221 | */ | 224 | */ |
222 | struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, | 225 | static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, |
223 | int group_id, int tpgs) | 226 | int group_id, int tpgs) |
224 | { | 227 | { |
225 | struct alua_port_group *pg, *tmp_pg; | 228 | struct alua_port_group *pg, *tmp_pg; |
226 | 229 | ||
@@ -232,14 +235,14 @@ struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, | |||
232 | sizeof(pg->device_id_str)); | 235 | sizeof(pg->device_id_str)); |
233 | if (pg->device_id_len <= 0) { | 236 | if (pg->device_id_len <= 0) { |
234 | /* | 237 | /* |
235 | * Internal error: TPGS supported but no device | 238 | * TPGS supported but no device identification found. |
236 | * identifcation found. Disable ALUA support. | 239 | * Generate private device identification. |
237 | */ | 240 | */ |
238 | kfree(pg); | ||
239 | sdev_printk(KERN_INFO, sdev, | 241 | sdev_printk(KERN_INFO, sdev, |
240 | "%s: No device descriptors found\n", | 242 | "%s: No device descriptors found\n", |
241 | ALUA_DH_NAME); | 243 | ALUA_DH_NAME); |
242 | return ERR_PTR(-ENXIO); | 244 | pg->device_id_str[0] = '\0'; |
245 | pg->device_id_len = 0; | ||
243 | } | 246 | } |
244 | pg->group_id = group_id; | 247 | pg->group_id = group_id; |
245 | pg->tpgs = tpgs; | 248 | pg->tpgs = tpgs; |
@@ -354,9 +357,15 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, | |||
354 | return SCSI_DH_NOMEM; | 357 | return SCSI_DH_NOMEM; |
355 | return SCSI_DH_DEV_UNSUPP; | 358 | return SCSI_DH_DEV_UNSUPP; |
356 | } | 359 | } |
357 | sdev_printk(KERN_INFO, sdev, | 360 | if (pg->device_id_len) |
358 | "%s: device %s port group %x rel port %x\n", | 361 | sdev_printk(KERN_INFO, sdev, |
359 | ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); | 362 | "%s: device %s port group %x rel port %x\n", |
363 | ALUA_DH_NAME, pg->device_id_str, | ||
364 | group_id, rel_port); | ||
365 | else | ||
366 | sdev_printk(KERN_INFO, sdev, | ||
367 | "%s: port group %x rel port %x\n", | ||
368 | ALUA_DH_NAME, group_id, rel_port); | ||
360 | 369 | ||
361 | /* Check for existing port group references */ | 370 | /* Check for existing port group references */ |
362 | spin_lock(&h->pg_lock); | 371 | spin_lock(&h->pg_lock); |
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 6c14e68b9e1a..9b5a457d4bca 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c | |||
@@ -34,11 +34,14 @@ | |||
34 | * Definitions for the generic 5380 driver. | 34 | * Definitions for the generic 5380 driver. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define DONT_USE_INTR | ||
38 | |||
39 | #define NCR5380_read(reg) inb(instance->io_port + reg) | 37 | #define NCR5380_read(reg) inb(instance->io_port + reg) |
40 | #define NCR5380_write(reg, value) outb(value, instance->io_port + reg) | 38 | #define NCR5380_write(reg, value) outb(value, instance->io_port + reg) |
41 | 39 | ||
40 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (0) | ||
41 | #define NCR5380_dma_recv_setup(instance, dst, len) (0) | ||
42 | #define NCR5380_dma_send_setup(instance, src, len) (0) | ||
43 | #define NCR5380_dma_residual(instance) (0) | ||
44 | |||
42 | #define NCR5380_implementation_fields /* none */ | 45 | #define NCR5380_implementation_fields /* none */ |
43 | 46 | ||
44 | #include "NCR5380.h" | 47 | #include "NCR5380.h" |
@@ -62,7 +65,6 @@ static struct scsi_host_template dmx3191d_driver_template = { | |||
62 | .cmd_per_lun = 2, | 65 | .cmd_per_lun = 2, |
63 | .use_clustering = DISABLE_CLUSTERING, | 66 | .use_clustering = DISABLE_CLUSTERING, |
64 | .cmd_size = NCR5380_CMD_SIZE, | 67 | .cmd_size = NCR5380_CMD_SIZE, |
65 | .max_sectors = 128, | ||
66 | }; | 68 | }; |
67 | 69 | ||
68 | static int dmx3191d_probe_one(struct pci_dev *pdev, | 70 | static int dmx3191d_probe_one(struct pci_dev *pdev, |
@@ -93,7 +95,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, | |||
93 | */ | 95 | */ |
94 | shost->irq = NO_IRQ; | 96 | shost->irq = NO_IRQ; |
95 | 97 | ||
96 | error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA); | 98 | error = NCR5380_init(shost, 0); |
97 | if (error) | 99 | if (error) |
98 | goto out_host_put; | 100 | goto out_host_put; |
99 | 101 | ||
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 6c736b071cf4..459863f94e46 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c | |||
@@ -1,6 +1,3 @@ | |||
1 | #define PSEUDO_DMA | ||
2 | #define DONT_USE_INTR | ||
3 | |||
4 | /* | 1 | /* |
5 | * DTC 3180/3280 driver, by | 2 | * DTC 3180/3280 driver, by |
6 | * Ray Van Tassle rayvt@comm.mot.com | 3 | * Ray Van Tassle rayvt@comm.mot.com |
@@ -54,7 +51,6 @@ | |||
54 | #include <scsi/scsi_host.h> | 51 | #include <scsi/scsi_host.h> |
55 | 52 | ||
56 | #include "dtc.h" | 53 | #include "dtc.h" |
57 | #define AUTOPROBE_IRQ | ||
58 | #include "NCR5380.h" | 54 | #include "NCR5380.h" |
59 | 55 | ||
60 | /* | 56 | /* |
@@ -229,7 +225,7 @@ found: | |||
229 | instance->base = addr; | 225 | instance->base = addr; |
230 | ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base; | 226 | ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base; |
231 | 227 | ||
232 | if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP)) | 228 | if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP)) |
233 | goto out_unregister; | 229 | goto out_unregister; |
234 | 230 | ||
235 | NCR5380_maybe_reset_bus(instance); | 231 | NCR5380_maybe_reset_bus(instance); |
@@ -244,9 +240,10 @@ found: | |||
244 | if (instance->irq == 255) | 240 | if (instance->irq == 255) |
245 | instance->irq = NO_IRQ; | 241 | instance->irq = NO_IRQ; |
246 | 242 | ||
247 | #ifndef DONT_USE_INTR | ||
248 | /* With interrupts enabled, it will sometimes hang when doing heavy | 243 | /* With interrupts enabled, it will sometimes hang when doing heavy |
249 | * reads. So better not enable them until I finger it out. */ | 244 | * reads. So better not enable them until I finger it out. */ |
245 | instance->irq = NO_IRQ; | ||
246 | |||
250 | if (instance->irq != NO_IRQ) | 247 | if (instance->irq != NO_IRQ) |
251 | if (request_irq(instance->irq, dtc_intr, 0, | 248 | if (request_irq(instance->irq, dtc_intr, 0, |
252 | "dtc", instance)) { | 249 | "dtc", instance)) { |
@@ -258,11 +255,7 @@ found: | |||
258 | printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); | 255 | printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); |
259 | printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); | 256 | printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); |
260 | } | 257 | } |
261 | #else | 258 | |
262 | if (instance->irq != NO_IRQ) | ||
263 | printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); | ||
264 | instance->irq = NO_IRQ; | ||
265 | #endif | ||
266 | dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", | 259 | dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", |
267 | instance->host_no, instance->irq); | 260 | instance->host_no, instance->irq); |
268 | 261 | ||
@@ -323,7 +316,8 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev, | |||
323 | * timeout. | 316 | * timeout. |
324 | */ | 317 | */ |
325 | 318 | ||
326 | static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) | 319 | static inline int dtc_pread(struct Scsi_Host *instance, |
320 | unsigned char *dst, int len) | ||
327 | { | 321 | { |
328 | unsigned char *d = dst; | 322 | unsigned char *d = dst; |
329 | int i; /* For counting time spent in the poll-loop */ | 323 | int i; /* For counting time spent in the poll-loop */ |
@@ -352,8 +346,6 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, | |||
352 | while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS)) | 346 | while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS)) |
353 | ++i; | 347 | ++i; |
354 | rtrc(0); | 348 | rtrc(0); |
355 | if (i > hostdata->spin_max_r) | ||
356 | hostdata->spin_max_r = i; | ||
357 | return (0); | 349 | return (0); |
358 | } | 350 | } |
359 | 351 | ||
@@ -370,7 +362,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, | |||
370 | * timeout. | 362 | * timeout. |
371 | */ | 363 | */ |
372 | 364 | ||
373 | static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) | 365 | static inline int dtc_pwrite(struct Scsi_Host *instance, |
366 | unsigned char *src, int len) | ||
374 | { | 367 | { |
375 | int i; | 368 | int i; |
376 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 369 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
@@ -400,8 +393,6 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, | |||
400 | rtrc(7); | 393 | rtrc(7); |
401 | /* Check for parity error here. fixme. */ | 394 | /* Check for parity error here. fixme. */ |
402 | rtrc(0); | 395 | rtrc(0); |
403 | if (i > hostdata->spin_max_w) | ||
404 | hostdata->spin_max_w = i; | ||
405 | return (0); | 396 | return (0); |
406 | } | 397 | } |
407 | 398 | ||
@@ -440,8 +431,6 @@ static struct scsi_host_template driver_template = { | |||
440 | .detect = dtc_detect, | 431 | .detect = dtc_detect, |
441 | .release = dtc_release, | 432 | .release = dtc_release, |
442 | .proc_name = "dtc3x80", | 433 | .proc_name = "dtc3x80", |
443 | .show_info = dtc_show_info, | ||
444 | .write_info = dtc_write_info, | ||
445 | .info = dtc_info, | 434 | .info = dtc_info, |
446 | .queuecommand = dtc_queue_command, | 435 | .queuecommand = dtc_queue_command, |
447 | .eh_abort_handler = dtc_abort, | 436 | .eh_abort_handler = dtc_abort, |
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 56732cba8aba..fcb0a8ea7bda 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h | |||
@@ -21,14 +21,17 @@ | |||
21 | 21 | ||
22 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ | 22 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ |
23 | dtc_dma_xfer_len(cmd) | 23 | dtc_dma_xfer_len(cmd) |
24 | #define NCR5380_dma_recv_setup dtc_pread | ||
25 | #define NCR5380_dma_send_setup dtc_pwrite | ||
26 | #define NCR5380_dma_residual(instance) (0) | ||
24 | 27 | ||
25 | #define NCR5380_intr dtc_intr | 28 | #define NCR5380_intr dtc_intr |
26 | #define NCR5380_queue_command dtc_queue_command | 29 | #define NCR5380_queue_command dtc_queue_command |
27 | #define NCR5380_abort dtc_abort | 30 | #define NCR5380_abort dtc_abort |
28 | #define NCR5380_bus_reset dtc_bus_reset | 31 | #define NCR5380_bus_reset dtc_bus_reset |
29 | #define NCR5380_info dtc_info | 32 | #define NCR5380_info dtc_info |
30 | #define NCR5380_show_info dtc_show_info | 33 | |
31 | #define NCR5380_write_info dtc_write_info | 34 | #define NCR5380_io_delay(x) udelay(x) |
32 | 35 | ||
33 | /* 15 12 11 10 | 36 | /* 15 12 11 10 |
34 | 1001 1100 0000 0000 */ | 37 | 1001 1100 0000 0000 */ |
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index ca8003f0d8a3..4299fa485622 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -729,6 +729,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev | |||
729 | break; | 729 | break; |
730 | case 0x24: | 730 | case 0x24: |
731 | SD(sh)->EATA_revision = 'z'; | 731 | SD(sh)->EATA_revision = 'z'; |
732 | break; | ||
732 | default: | 733 | default: |
733 | SD(sh)->EATA_revision = '?'; | 734 | SD(sh)->EATA_revision = '?'; |
734 | } | 735 | } |
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 33581ba4386e..2aca4d16f39e 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c | |||
@@ -246,7 +246,7 @@ static struct scsi_host_template driver_template = { | |||
246 | .eh_target_reset_handler = esas2r_target_reset, | 246 | .eh_target_reset_handler = esas2r_target_reset, |
247 | .can_queue = 128, | 247 | .can_queue = 128, |
248 | .this_id = -1, | 248 | .this_id = -1, |
249 | .sg_tablesize = SCSI_MAX_SG_SEGMENTS, | 249 | .sg_tablesize = SG_CHUNK_SIZE, |
250 | .cmd_per_lun = | 250 | .cmd_per_lun = |
251 | ESAS2R_DEFAULT_CMD_PER_LUN, | 251 | ESAS2R_DEFAULT_CMD_PER_LUN, |
252 | .present = 0, | 252 | .present = 0, |
@@ -271,7 +271,7 @@ module_param(num_sg_lists, int, 0); | |||
271 | MODULE_PARM_DESC(num_sg_lists, | 271 | MODULE_PARM_DESC(num_sg_lists, |
272 | "Number of scatter/gather lists. Default 1024."); | 272 | "Number of scatter/gather lists. Default 1024."); |
273 | 273 | ||
274 | int sg_tablesize = SCSI_MAX_SG_SEGMENTS; | 274 | int sg_tablesize = SG_CHUNK_SIZE; |
275 | module_param(sg_tablesize, int, 0); | 275 | module_param(sg_tablesize, int, 0); |
276 | MODULE_PARM_DESC(sg_tablesize, | 276 | MODULE_PARM_DESC(sg_tablesize, |
277 | "Maximum number of entries in a scatter/gather table."); | 277 | "Maximum number of entries in a scatter/gather table."); |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index ce129e595b55..9ddc9200e0a4 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | #define DRV_NAME "fnic" | 40 | #define DRV_NAME "fnic" |
41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
42 | #define DRV_VERSION "1.6.0.17a" | 42 | #define DRV_VERSION "1.6.0.21" |
43 | #define PFX DRV_NAME ": " | 43 | #define PFX DRV_NAME ": " |
44 | #define DFX DRV_NAME "%d: " | 44 | #define DFX DRV_NAME "%d: " |
45 | 45 | ||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index f3032ca5051b..d9fd2f841585 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -439,7 +439,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
439 | int sg_count = 0; | 439 | int sg_count = 0; |
440 | unsigned long flags = 0; | 440 | unsigned long flags = 0; |
441 | unsigned long ptr; | 441 | unsigned long ptr; |
442 | struct fc_rport_priv *rdata; | ||
443 | spinlock_t *io_lock = NULL; | 442 | spinlock_t *io_lock = NULL; |
444 | int io_lock_acquired = 0; | 443 | int io_lock_acquired = 0; |
445 | 444 | ||
@@ -455,14 +454,17 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
455 | return 0; | 454 | return 0; |
456 | } | 455 | } |
457 | 456 | ||
458 | rdata = lp->tt.rport_lookup(lp, rport->port_id); | 457 | if (rport) { |
459 | if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) { | 458 | struct fc_rport_libfc_priv *rp = rport->dd_data; |
460 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 459 | |
461 | "returning IO as rport is removed\n"); | 460 | if (!rp || rp->rp_state != RPORT_ST_READY) { |
462 | atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); | 461 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
463 | sc->result = DID_NO_CONNECT; | 462 | "returning DID_NO_CONNECT for IO as rport is removed\n"); |
464 | done(sc); | 463 | atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); |
465 | return 0; | 464 | sc->result = DID_NO_CONNECT<<16; |
465 | done(sc); | ||
466 | return 0; | ||
467 | } | ||
466 | } | 468 | } |
467 | 469 | ||
468 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | 470 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
@@ -1091,6 +1093,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
1091 | atomic64_inc( | 1093 | atomic64_inc( |
1092 | &term_stats->terminate_fw_timeouts); | 1094 | &term_stats->terminate_fw_timeouts); |
1093 | break; | 1095 | break; |
1096 | case FCPIO_ITMF_REJECTED: | ||
1097 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
1098 | "abort reject recd. id %d\n", | ||
1099 | (int)(id & FNIC_TAG_MASK)); | ||
1100 | break; | ||
1094 | case FCPIO_IO_NOT_FOUND: | 1101 | case FCPIO_IO_NOT_FOUND: |
1095 | if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) | 1102 | if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) |
1096 | atomic64_inc(&abts_stats->abort_io_not_found); | 1103 | atomic64_inc(&abts_stats->abort_io_not_found); |
@@ -1111,9 +1118,15 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
1111 | spin_unlock_irqrestore(io_lock, flags); | 1118 | spin_unlock_irqrestore(io_lock, flags); |
1112 | return; | 1119 | return; |
1113 | } | 1120 | } |
1114 | CMD_ABTS_STATUS(sc) = hdr_status; | 1121 | |
1115 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | 1122 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; |
1116 | 1123 | ||
1124 | /* If the status is IO not found consider it as success */ | ||
1125 | if (hdr_status == FCPIO_IO_NOT_FOUND) | ||
1126 | CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; | ||
1127 | else | ||
1128 | CMD_ABTS_STATUS(sc) = hdr_status; | ||
1129 | |||
1117 | atomic64_dec(&fnic_stats->io_stats.active_ios); | 1130 | atomic64_dec(&fnic_stats->io_stats.active_ios); |
1118 | if (atomic64_read(&fnic->io_cmpl_skip)) | 1131 | if (atomic64_read(&fnic->io_cmpl_skip)) |
1119 | atomic64_dec(&fnic->io_cmpl_skip); | 1132 | atomic64_dec(&fnic->io_cmpl_skip); |
@@ -1926,21 +1939,31 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1926 | 1939 | ||
1927 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | 1940 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; |
1928 | 1941 | ||
1942 | start_time = io_req->start_time; | ||
1929 | /* | 1943 | /* |
1930 | * firmware completed the abort, check the status, | 1944 | * firmware completed the abort, check the status, |
1931 | * free the io_req irrespective of failure or success | 1945 | * free the io_req if successful. If abort fails, |
1946 | * Device reset will clean the I/O. | ||
1932 | */ | 1947 | */ |
1933 | if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) | 1948 | if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) |
1949 | CMD_SP(sc) = NULL; | ||
1950 | else { | ||
1934 | ret = FAILED; | 1951 | ret = FAILED; |
1935 | 1952 | spin_unlock_irqrestore(io_lock, flags); | |
1936 | CMD_SP(sc) = NULL; | 1953 | goto fnic_abort_cmd_end; |
1954 | } | ||
1937 | 1955 | ||
1938 | spin_unlock_irqrestore(io_lock, flags); | 1956 | spin_unlock_irqrestore(io_lock, flags); |
1939 | 1957 | ||
1940 | start_time = io_req->start_time; | ||
1941 | fnic_release_ioreq_buf(fnic, io_req, sc); | 1958 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1942 | mempool_free(io_req, fnic->io_req_pool); | 1959 | mempool_free(io_req, fnic->io_req_pool); |
1943 | 1960 | ||
1961 | if (sc->scsi_done) { | ||
1962 | /* Call SCSI completion function to complete the IO */ | ||
1963 | sc->result = (DID_ABORT << 16); | ||
1964 | sc->scsi_done(sc); | ||
1965 | } | ||
1966 | |||
1944 | fnic_abort_cmd_end: | 1967 | fnic_abort_cmd_end: |
1945 | FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, | 1968 | FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, |
1946 | sc->request->tag, sc, | 1969 | sc->request->tag, sc, |
@@ -2018,7 +2041,9 @@ lr_io_req_end: | |||
2018 | * successfully aborted, 1 otherwise | 2041 | * successfully aborted, 1 otherwise |
2019 | */ | 2042 | */ |
2020 | static int fnic_clean_pending_aborts(struct fnic *fnic, | 2043 | static int fnic_clean_pending_aborts(struct fnic *fnic, |
2021 | struct scsi_cmnd *lr_sc) | 2044 | struct scsi_cmnd *lr_sc, |
2045 | bool new_sc) | ||
2046 | |||
2022 | { | 2047 | { |
2023 | int tag, abt_tag; | 2048 | int tag, abt_tag; |
2024 | struct fnic_io_req *io_req; | 2049 | struct fnic_io_req *io_req; |
@@ -2036,10 +2061,10 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
2036 | spin_lock_irqsave(io_lock, flags); | 2061 | spin_lock_irqsave(io_lock, flags); |
2037 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 2062 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
2038 | /* | 2063 | /* |
2039 | * ignore this lun reset cmd or cmds that do not belong to | 2064 | * ignore this lun reset cmd if issued using new SC |
2040 | * this lun | 2065 | * or cmds that do not belong to this lun |
2041 | */ | 2066 | */ |
2042 | if (!sc || sc == lr_sc || sc->device != lun_dev) { | 2067 | if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) { |
2043 | spin_unlock_irqrestore(io_lock, flags); | 2068 | spin_unlock_irqrestore(io_lock, flags); |
2044 | continue; | 2069 | continue; |
2045 | } | 2070 | } |
@@ -2145,11 +2170,27 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
2145 | goto clean_pending_aborts_end; | 2170 | goto clean_pending_aborts_end; |
2146 | } | 2171 | } |
2147 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | 2172 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; |
2148 | CMD_SP(sc) = NULL; | 2173 | |
2174 | /* original sc used for lr is handled by dev reset code */ | ||
2175 | if (sc != lr_sc) | ||
2176 | CMD_SP(sc) = NULL; | ||
2149 | spin_unlock_irqrestore(io_lock, flags); | 2177 | spin_unlock_irqrestore(io_lock, flags); |
2150 | 2178 | ||
2151 | fnic_release_ioreq_buf(fnic, io_req, sc); | 2179 | /* original sc used for lr is handled by dev reset code */ |
2152 | mempool_free(io_req, fnic->io_req_pool); | 2180 | if (sc != lr_sc) { |
2181 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
2182 | mempool_free(io_req, fnic->io_req_pool); | ||
2183 | } | ||
2184 | |||
2185 | /* | ||
2186 | * Any IO is returned during reset, it needs to call scsi_done | ||
2187 | * to return the scsi_cmnd to upper layer. | ||
2188 | */ | ||
2189 | if (sc->scsi_done) { | ||
2190 | /* Set result to let upper SCSI layer retry */ | ||
2191 | sc->result = DID_RESET << 16; | ||
2192 | sc->scsi_done(sc); | ||
2193 | } | ||
2153 | } | 2194 | } |
2154 | 2195 | ||
2155 | schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); | 2196 | schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); |
@@ -2243,6 +2284,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2243 | int tag = 0; | 2284 | int tag = 0; |
2244 | DECLARE_COMPLETION_ONSTACK(tm_done); | 2285 | DECLARE_COMPLETION_ONSTACK(tm_done); |
2245 | int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ | 2286 | int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ |
2287 | bool new_sc = 0; | ||
2246 | 2288 | ||
2247 | /* Wait for rport to unblock */ | 2289 | /* Wait for rport to unblock */ |
2248 | fc_block_scsi_eh(sc); | 2290 | fc_block_scsi_eh(sc); |
@@ -2288,13 +2330,12 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2288 | * fix the way the EH ioctls work for real, but until | 2330 | * fix the way the EH ioctls work for real, but until |
2289 | * that happens we fail these explicit requests here. | 2331 | * that happens we fail these explicit requests here. |
2290 | */ | 2332 | */ |
2291 | if (shost_use_blk_mq(sc->device->host)) | ||
2292 | goto fnic_device_reset_end; | ||
2293 | 2333 | ||
2294 | tag = fnic_scsi_host_start_tag(fnic, sc); | 2334 | tag = fnic_scsi_host_start_tag(fnic, sc); |
2295 | if (unlikely(tag == SCSI_NO_TAG)) | 2335 | if (unlikely(tag == SCSI_NO_TAG)) |
2296 | goto fnic_device_reset_end; | 2336 | goto fnic_device_reset_end; |
2297 | tag_gen_flag = 1; | 2337 | tag_gen_flag = 1; |
2338 | new_sc = 1; | ||
2298 | } | 2339 | } |
2299 | io_lock = fnic_io_lock_hash(fnic, sc); | 2340 | io_lock = fnic_io_lock_hash(fnic, sc); |
2300 | spin_lock_irqsave(io_lock, flags); | 2341 | spin_lock_irqsave(io_lock, flags); |
@@ -2429,7 +2470,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2429 | * the lun reset cmd. If all cmds get cleaned, the lun reset | 2470 | * the lun reset cmd. If all cmds get cleaned, the lun reset |
2430 | * succeeds | 2471 | * succeeds |
2431 | */ | 2472 | */ |
2432 | if (fnic_clean_pending_aborts(fnic, sc)) { | 2473 | if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { |
2433 | spin_lock_irqsave(io_lock, flags); | 2474 | spin_lock_irqsave(io_lock, flags); |
2434 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 2475 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
2435 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2476 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 90091e693020..516bd6c4f442 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c | |||
@@ -18,50 +18,10 @@ | |||
18 | * | 18 | * |
19 | * Added ISAPNP support for DTC436 adapters, | 19 | * Added ISAPNP support for DTC436 adapters, |
20 | * Thomas Sailer, sailer@ife.ee.ethz.ch | 20 | * Thomas Sailer, sailer@ife.ee.ethz.ch |
21 | */ | ||
22 | |||
23 | /* | ||
24 | * TODO : flesh out DMA support, find some one actually using this (I have | ||
25 | * a memory mapped Trantor board that works fine) | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * The card is detected and initialized in one of several ways : | ||
30 | * 1. With command line overrides - NCR5380=port,irq may be | ||
31 | * used on the LILO command line to override the defaults. | ||
32 | * | ||
33 | * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is | ||
34 | * specified as an array of address, irq, dma, board tuples. Ie, for | ||
35 | * one board at 0x350, IRQ5, no dma, I could say | ||
36 | * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}} | ||
37 | * | ||
38 | * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an | ||
39 | * IRQ line if overridden on the command line. | ||
40 | * | 21 | * |
41 | * 3. When included as a module, with arguments passed on the command line: | 22 | * See Documentation/scsi/g_NCR5380.txt for more info. |
42 | * ncr_irq=xx the interrupt | ||
43 | * ncr_addr=xx the port or base address (for port or memory | ||
44 | * mapped, resp.) | ||
45 | * ncr_dma=xx the DMA | ||
46 | * ncr_5380=1 to set up for a NCR5380 board | ||
47 | * ncr_53c400=1 to set up for a NCR53C400 board | ||
48 | * e.g. | ||
49 | * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 | ||
50 | * for a port mapped NCR5380 board or | ||
51 | * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 | ||
52 | * for a memory mapped NCR53C400 board with interrupts disabled. | ||
53 | * | ||
54 | * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an | ||
55 | * IRQ line if overridden on the command line. | ||
56 | * | ||
57 | */ | 23 | */ |
58 | 24 | ||
59 | #define AUTOPROBE_IRQ | ||
60 | |||
61 | #ifdef CONFIG_SCSI_GENERIC_NCR53C400 | ||
62 | #define PSEUDO_DMA | ||
63 | #endif | ||
64 | |||
65 | #include <asm/io.h> | 25 | #include <asm/io.h> |
66 | #include <linux/blkdev.h> | 26 | #include <linux/blkdev.h> |
67 | #include <linux/module.h> | 27 | #include <linux/module.h> |
@@ -270,7 +230,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
270 | #ifndef SCSI_G_NCR5380_MEM | 230 | #ifndef SCSI_G_NCR5380_MEM |
271 | int i; | 231 | int i; |
272 | int port_idx = -1; | 232 | int port_idx = -1; |
273 | unsigned long region_size = 16; | 233 | unsigned long region_size; |
274 | #endif | 234 | #endif |
275 | static unsigned int __initdata ncr_53c400a_ports[] = { | 235 | static unsigned int __initdata ncr_53c400a_ports[] = { |
276 | 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 | 236 | 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 |
@@ -290,6 +250,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
290 | #ifdef SCSI_G_NCR5380_MEM | 250 | #ifdef SCSI_G_NCR5380_MEM |
291 | unsigned long base; | 251 | unsigned long base; |
292 | void __iomem *iomem; | 252 | void __iomem *iomem; |
253 | resource_size_t iomem_size; | ||
293 | #endif | 254 | #endif |
294 | 255 | ||
295 | if (ncr_irq) | 256 | if (ncr_irq) |
@@ -350,25 +311,17 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
350 | flags = 0; | 311 | flags = 0; |
351 | switch (overrides[current_override].board) { | 312 | switch (overrides[current_override].board) { |
352 | case BOARD_NCR5380: | 313 | case BOARD_NCR5380: |
353 | flags = FLAG_NO_PSEUDO_DMA; | 314 | flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; |
354 | break; | ||
355 | case BOARD_NCR53C400: | ||
356 | #ifdef PSEUDO_DMA | ||
357 | flags = FLAG_NO_DMA_FIXUP; | ||
358 | #endif | ||
359 | break; | 315 | break; |
360 | case BOARD_NCR53C400A: | 316 | case BOARD_NCR53C400A: |
361 | flags = FLAG_NO_DMA_FIXUP; | ||
362 | ports = ncr_53c400a_ports; | 317 | ports = ncr_53c400a_ports; |
363 | magic = ncr_53c400a_magic; | 318 | magic = ncr_53c400a_magic; |
364 | break; | 319 | break; |
365 | case BOARD_HP_C2502: | 320 | case BOARD_HP_C2502: |
366 | flags = FLAG_NO_DMA_FIXUP; | ||
367 | ports = ncr_53c400a_ports; | 321 | ports = ncr_53c400a_ports; |
368 | magic = hp_c2502_magic; | 322 | magic = hp_c2502_magic; |
369 | break; | 323 | break; |
370 | case BOARD_DTC3181E: | 324 | case BOARD_DTC3181E: |
371 | flags = FLAG_NO_DMA_FIXUP; | ||
372 | ports = dtc_3181e_ports; | 325 | ports = dtc_3181e_ports; |
373 | magic = ncr_53c400a_magic; | 326 | magic = ncr_53c400a_magic; |
374 | break; | 327 | break; |
@@ -381,20 +334,22 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
381 | /* Disable the adapter and look for a free io port */ | 334 | /* Disable the adapter and look for a free io port */ |
382 | magic_configure(-1, 0, magic); | 335 | magic_configure(-1, 0, magic); |
383 | 336 | ||
337 | region_size = 16; | ||
338 | |||
384 | if (overrides[current_override].NCR5380_map_name != PORT_AUTO) | 339 | if (overrides[current_override].NCR5380_map_name != PORT_AUTO) |
385 | for (i = 0; ports[i]; i++) { | 340 | for (i = 0; ports[i]; i++) { |
386 | if (!request_region(ports[i], 16, "ncr53c80")) | 341 | if (!request_region(ports[i], region_size, "ncr53c80")) |
387 | continue; | 342 | continue; |
388 | if (overrides[current_override].NCR5380_map_name == ports[i]) | 343 | if (overrides[current_override].NCR5380_map_name == ports[i]) |
389 | break; | 344 | break; |
390 | release_region(ports[i], 16); | 345 | release_region(ports[i], region_size); |
391 | } else | 346 | } else |
392 | for (i = 0; ports[i]; i++) { | 347 | for (i = 0; ports[i]; i++) { |
393 | if (!request_region(ports[i], 16, "ncr53c80")) | 348 | if (!request_region(ports[i], region_size, "ncr53c80")) |
394 | continue; | 349 | continue; |
395 | if (inb(ports[i]) == 0xff) | 350 | if (inb(ports[i]) == 0xff) |
396 | break; | 351 | break; |
397 | release_region(ports[i], 16); | 352 | release_region(ports[i], region_size); |
398 | } | 353 | } |
399 | if (ports[i]) { | 354 | if (ports[i]) { |
400 | /* At this point we have our region reserved */ | 355 | /* At this point we have our region reserved */ |
@@ -410,17 +365,19 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
410 | else | 365 | else |
411 | { | 366 | { |
412 | /* Not a 53C400A style setup - just grab */ | 367 | /* Not a 53C400A style setup - just grab */ |
413 | if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380"))) | 368 | region_size = 8; |
369 | if (!request_region(overrides[current_override].NCR5380_map_name, | ||
370 | region_size, "ncr5380")) | ||
414 | continue; | 371 | continue; |
415 | region_size = NCR5380_region_size; | ||
416 | } | 372 | } |
417 | #else | 373 | #else |
418 | base = overrides[current_override].NCR5380_map_name; | 374 | base = overrides[current_override].NCR5380_map_name; |
419 | if (!request_mem_region(base, NCR5380_region_size, "ncr5380")) | 375 | iomem_size = NCR53C400_region_size; |
376 | if (!request_mem_region(base, iomem_size, "ncr5380")) | ||
420 | continue; | 377 | continue; |
421 | iomem = ioremap(base, NCR5380_region_size); | 378 | iomem = ioremap(base, iomem_size); |
422 | if (!iomem) { | 379 | if (!iomem) { |
423 | release_mem_region(base, NCR5380_region_size); | 380 | release_mem_region(base, iomem_size); |
424 | continue; | 381 | continue; |
425 | } | 382 | } |
426 | #endif | 383 | #endif |
@@ -458,6 +415,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
458 | #else | 415 | #else |
459 | instance->base = overrides[current_override].NCR5380_map_name; | 416 | instance->base = overrides[current_override].NCR5380_map_name; |
460 | hostdata->iomem = iomem; | 417 | hostdata->iomem = iomem; |
418 | hostdata->iomem_size = iomem_size; | ||
461 | switch (overrides[current_override].board) { | 419 | switch (overrides[current_override].board) { |
462 | case BOARD_NCR53C400: | 420 | case BOARD_NCR53C400: |
463 | hostdata->c400_ctl_status = 0x100; | 421 | hostdata->c400_ctl_status = 0x100; |
@@ -472,7 +430,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) | |||
472 | } | 430 | } |
473 | #endif | 431 | #endif |
474 | 432 | ||
475 | if (NCR5380_init(instance, flags)) | 433 | if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) |
476 | goto out_unregister; | 434 | goto out_unregister; |
477 | 435 | ||
478 | switch (overrides[current_override].board) { | 436 | switch (overrides[current_override].board) { |
@@ -524,7 +482,7 @@ out_release: | |||
524 | release_region(overrides[current_override].NCR5380_map_name, region_size); | 482 | release_region(overrides[current_override].NCR5380_map_name, region_size); |
525 | #else | 483 | #else |
526 | iounmap(iomem); | 484 | iounmap(iomem); |
527 | release_mem_region(base, NCR5380_region_size); | 485 | release_mem_region(base, iomem_size); |
528 | #endif | 486 | #endif |
529 | return count; | 487 | return count; |
530 | } | 488 | } |
@@ -546,45 +504,18 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance) | |||
546 | #ifndef SCSI_G_NCR5380_MEM | 504 | #ifndef SCSI_G_NCR5380_MEM |
547 | release_region(instance->io_port, instance->n_io_port); | 505 | release_region(instance->io_port, instance->n_io_port); |
548 | #else | 506 | #else |
549 | iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem); | 507 | { |
550 | release_mem_region(instance->base, NCR5380_region_size); | 508 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
551 | #endif | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | #ifdef BIOSPARAM | ||
556 | /** | ||
557 | * generic_NCR5380_biosparam | ||
558 | * @disk: disk to compute geometry for | ||
559 | * @dev: device identifier for this disk | ||
560 | * @ip: sizes to fill in | ||
561 | * | ||
562 | * Generates a BIOS / DOS compatible H-C-S mapping for the specified | ||
563 | * device / size. | ||
564 | * | ||
565 | * XXX Most SCSI boards use this mapping, I could be incorrect. Someone | ||
566 | * using hard disks on a trantor should verify that this mapping | ||
567 | * corresponds to that used by the BIOS / ASPI driver by running the linux | ||
568 | * fdisk program and matching the H_C_S coordinates to what DOS uses. | ||
569 | * | ||
570 | * Locks: none | ||
571 | */ | ||
572 | 509 | ||
573 | static int | 510 | iounmap(hostdata->iomem); |
574 | generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, | 511 | release_mem_region(instance->base, hostdata->iomem_size); |
575 | sector_t capacity, int *ip) | 512 | } |
576 | { | 513 | #endif |
577 | ip[0] = 64; | ||
578 | ip[1] = 32; | ||
579 | ip[2] = capacity >> 11; | ||
580 | return 0; | 514 | return 0; |
581 | } | 515 | } |
582 | #endif | ||
583 | |||
584 | #ifdef PSEUDO_DMA | ||
585 | 516 | ||
586 | /** | 517 | /** |
587 | * NCR5380_pread - pseudo DMA read | 518 | * generic_NCR5380_pread - pseudo DMA read |
588 | * @instance: adapter to read from | 519 | * @instance: adapter to read from |
589 | * @dst: buffer to read into | 520 | * @dst: buffer to read into |
590 | * @len: buffer length | 521 | * @len: buffer length |
@@ -593,7 +524,8 @@ generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, | |||
593 | * controller | 524 | * controller |
594 | */ | 525 | */ |
595 | 526 | ||
596 | static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) | 527 | static inline int generic_NCR5380_pread(struct Scsi_Host *instance, |
528 | unsigned char *dst, int len) | ||
597 | { | 529 | { |
598 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 530 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
599 | int blocks = len / 128; | 531 | int blocks = len / 128; |
@@ -661,7 +593,7 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, | |||
661 | } | 593 | } |
662 | 594 | ||
663 | /** | 595 | /** |
664 | * NCR5380_write - pseudo DMA write | 596 | * generic_NCR5380_pwrite - pseudo DMA write |
665 | * @instance: adapter to read from | 597 | * @instance: adapter to read from |
666 | * @dst: buffer to read into | 598 | * @dst: buffer to read into |
667 | * @len: buffer length | 599 | * @len: buffer length |
@@ -670,7 +602,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, | |||
670 | * controller | 602 | * controller |
671 | */ | 603 | */ |
672 | 604 | ||
673 | static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) | 605 | static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, |
606 | unsigned char *src, int len) | ||
674 | { | 607 | { |
675 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 608 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
676 | int blocks = len / 128; | 609 | int blocks = len / 128; |
@@ -738,10 +671,15 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, | |||
738 | return 0; | 671 | return 0; |
739 | } | 672 | } |
740 | 673 | ||
741 | static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) | 674 | static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, |
675 | struct scsi_cmnd *cmd) | ||
742 | { | 676 | { |
677 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
743 | int transfersize = cmd->transfersize; | 678 | int transfersize = cmd->transfersize; |
744 | 679 | ||
680 | if (hostdata->flags & FLAG_NO_PSEUDO_DMA) | ||
681 | return 0; | ||
682 | |||
745 | /* Limit transfers to 32K, for xx400 & xx406 | 683 | /* Limit transfers to 32K, for xx400 & xx406 |
746 | * pseudoDMA that transfers in 128 bytes blocks. | 684 | * pseudoDMA that transfers in 128 bytes blocks. |
747 | */ | 685 | */ |
@@ -756,8 +694,6 @@ static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) | |||
756 | return transfersize; | 694 | return transfersize; |
757 | } | 695 | } |
758 | 696 | ||
759 | #endif /* PSEUDO_DMA */ | ||
760 | |||
761 | /* | 697 | /* |
762 | * Include the NCR5380 core code that we build our driver around | 698 | * Include the NCR5380 core code that we build our driver around |
763 | */ | 699 | */ |
@@ -773,7 +709,6 @@ static struct scsi_host_template driver_template = { | |||
773 | .queuecommand = generic_NCR5380_queue_command, | 709 | .queuecommand = generic_NCR5380_queue_command, |
774 | .eh_abort_handler = generic_NCR5380_abort, | 710 | .eh_abort_handler = generic_NCR5380_abort, |
775 | .eh_bus_reset_handler = generic_NCR5380_bus_reset, | 711 | .eh_bus_reset_handler = generic_NCR5380_bus_reset, |
776 | .bios_param = NCR5380_BIOSPARAM, | ||
777 | .can_queue = 16, | 712 | .can_queue = 16, |
778 | .this_id = 7, | 713 | .this_id = 7, |
779 | .sg_tablesize = SG_ALL, | 714 | .sg_tablesize = SG_ALL, |
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 6f3d2ac4f185..595177428d76 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h | |||
@@ -14,13 +14,6 @@ | |||
14 | #ifndef GENERIC_NCR5380_H | 14 | #ifndef GENERIC_NCR5380_H |
15 | #define GENERIC_NCR5380_H | 15 | #define GENERIC_NCR5380_H |
16 | 16 | ||
17 | #ifdef CONFIG_SCSI_GENERIC_NCR53C400 | ||
18 | #define BIOSPARAM | ||
19 | #define NCR5380_BIOSPARAM generic_NCR5380_biosparam | ||
20 | #else | ||
21 | #define NCR5380_BIOSPARAM NULL | ||
22 | #endif | ||
23 | |||
24 | #define __STRVAL(x) #x | 17 | #define __STRVAL(x) #x |
25 | #define STRVAL(x) __STRVAL(x) | 18 | #define STRVAL(x) __STRVAL(x) |
26 | 19 | ||
@@ -30,12 +23,6 @@ | |||
30 | #define NCR5380_map_type int | 23 | #define NCR5380_map_type int |
31 | #define NCR5380_map_name port | 24 | #define NCR5380_map_name port |
32 | 25 | ||
33 | #ifdef CONFIG_SCSI_GENERIC_NCR53C400 | ||
34 | #define NCR5380_region_size 16 | ||
35 | #else | ||
36 | #define NCR5380_region_size 8 | ||
37 | #endif | ||
38 | |||
39 | #define NCR5380_read(reg) \ | 26 | #define NCR5380_read(reg) \ |
40 | inb(instance->io_port + (reg)) | 27 | inb(instance->io_port + (reg)) |
41 | #define NCR5380_write(reg, value) \ | 28 | #define NCR5380_write(reg, value) \ |
@@ -55,7 +42,7 @@ | |||
55 | #define NCR5380_map_name base | 42 | #define NCR5380_map_name base |
56 | #define NCR53C400_mem_base 0x3880 | 43 | #define NCR53C400_mem_base 0x3880 |
57 | #define NCR53C400_host_buffer 0x3900 | 44 | #define NCR53C400_host_buffer 0x3900 |
58 | #define NCR5380_region_size 0x3a00 | 45 | #define NCR53C400_region_size 0x3a00 |
59 | 46 | ||
60 | #define NCR5380_read(reg) \ | 47 | #define NCR5380_read(reg) \ |
61 | readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ | 48 | readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ |
@@ -66,6 +53,7 @@ | |||
66 | 53 | ||
67 | #define NCR5380_implementation_fields \ | 54 | #define NCR5380_implementation_fields \ |
68 | void __iomem *iomem; \ | 55 | void __iomem *iomem; \ |
56 | resource_size_t iomem_size; \ | ||
69 | int c400_ctl_status; \ | 57 | int c400_ctl_status; \ |
70 | int c400_blk_cnt; \ | 58 | int c400_blk_cnt; \ |
71 | int c400_host_buf; | 59 | int c400_host_buf; |
@@ -73,16 +61,18 @@ | |||
73 | #endif | 61 | #endif |
74 | 62 | ||
75 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ | 63 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ |
76 | generic_NCR5380_dma_xfer_len(cmd) | 64 | generic_NCR5380_dma_xfer_len(instance, cmd) |
65 | #define NCR5380_dma_recv_setup generic_NCR5380_pread | ||
66 | #define NCR5380_dma_send_setup generic_NCR5380_pwrite | ||
67 | #define NCR5380_dma_residual(instance) (0) | ||
77 | 68 | ||
78 | #define NCR5380_intr generic_NCR5380_intr | 69 | #define NCR5380_intr generic_NCR5380_intr |
79 | #define NCR5380_queue_command generic_NCR5380_queue_command | 70 | #define NCR5380_queue_command generic_NCR5380_queue_command |
80 | #define NCR5380_abort generic_NCR5380_abort | 71 | #define NCR5380_abort generic_NCR5380_abort |
81 | #define NCR5380_bus_reset generic_NCR5380_bus_reset | 72 | #define NCR5380_bus_reset generic_NCR5380_bus_reset |
82 | #define NCR5380_pread generic_NCR5380_pread | ||
83 | #define NCR5380_pwrite generic_NCR5380_pwrite | ||
84 | #define NCR5380_info generic_NCR5380_info | 73 | #define NCR5380_info generic_NCR5380_info |
85 | #define NCR5380_show_info generic_NCR5380_show_info | 74 | |
75 | #define NCR5380_io_delay(x) udelay(x) | ||
86 | 76 | ||
87 | #define BOARD_NCR5380 0 | 77 | #define BOARD_NCR5380 0 |
88 | #define BOARD_NCR53C400 1 | 78 | #define BOARD_NCR53C400 1 |
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 29e89f340b64..d7cab724f203 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <scsi/sas_ata.h> | 23 | #include <scsi/sas_ata.h> |
24 | #include <scsi/libsas.h> | 24 | #include <scsi/libsas.h> |
25 | 25 | ||
26 | #define DRV_VERSION "v1.3" | 26 | #define DRV_VERSION "v1.4" |
27 | 27 | ||
28 | #define HISI_SAS_MAX_PHYS 9 | 28 | #define HISI_SAS_MAX_PHYS 9 |
29 | #define HISI_SAS_MAX_QUEUES 32 | 29 | #define HISI_SAS_MAX_QUEUES 32 |
@@ -133,6 +133,9 @@ struct hisi_sas_hw { | |||
133 | int (*hw_init)(struct hisi_hba *hisi_hba); | 133 | int (*hw_init)(struct hisi_hba *hisi_hba); |
134 | void (*setup_itct)(struct hisi_hba *hisi_hba, | 134 | void (*setup_itct)(struct hisi_hba *hisi_hba, |
135 | struct hisi_sas_device *device); | 135 | struct hisi_sas_device *device); |
136 | int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, | ||
137 | struct domain_device *device); | ||
138 | struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); | ||
136 | void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); | 139 | void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); |
137 | int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); | 140 | int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); |
138 | void (*start_delivery)(struct hisi_hba *hisi_hba); | 141 | void (*start_delivery)(struct hisi_hba *hisi_hba); |
@@ -298,7 +301,7 @@ struct hisi_sas_command_table_stp { | |||
298 | u8 atapi_cdb[ATAPI_CDB_LEN]; | 301 | u8 atapi_cdb[ATAPI_CDB_LEN]; |
299 | }; | 302 | }; |
300 | 303 | ||
301 | #define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS | 304 | #define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE |
302 | struct hisi_sas_sge_page { | 305 | struct hisi_sas_sge_page { |
303 | struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; | 306 | struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; |
304 | }; | 307 | }; |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 097ab4f27a6b..18dd5ea2c721 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c | |||
@@ -227,7 +227,11 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, | |||
227 | } else | 227 | } else |
228 | n_elem = task->num_scatter; | 228 | n_elem = task->num_scatter; |
229 | 229 | ||
230 | rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); | 230 | if (hisi_hba->hw->slot_index_alloc) |
231 | rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, | ||
232 | device); | ||
233 | else | ||
234 | rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); | ||
231 | if (rc) | 235 | if (rc) |
232 | goto err_out; | 236 | goto err_out; |
233 | rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, | 237 | rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, |
@@ -417,7 +421,10 @@ static int hisi_sas_dev_found(struct domain_device *device) | |||
417 | struct hisi_sas_device *sas_dev; | 421 | struct hisi_sas_device *sas_dev; |
418 | struct device *dev = &hisi_hba->pdev->dev; | 422 | struct device *dev = &hisi_hba->pdev->dev; |
419 | 423 | ||
420 | sas_dev = hisi_sas_alloc_dev(device); | 424 | if (hisi_hba->hw->alloc_dev) |
425 | sas_dev = hisi_hba->hw->alloc_dev(device); | ||
426 | else | ||
427 | sas_dev = hisi_sas_alloc_dev(device); | ||
421 | if (!sas_dev) { | 428 | if (!sas_dev) { |
422 | dev_err(dev, "fail alloc dev: max support %d devices\n", | 429 | dev_err(dev, "fail alloc dev: max support %d devices\n", |
423 | HISI_SAS_MAX_DEVICES); | 430 | HISI_SAS_MAX_DEVICES); |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index b7337476454b..bbe98ecea0bc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | |||
@@ -465,6 +465,62 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, | |||
465 | return readl(regs); | 465 | return readl(regs); |
466 | } | 466 | } |
467 | 467 | ||
468 | /* This function needs to be protected from pre-emption. */ | ||
469 | static int | ||
470 | slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, | ||
471 | struct domain_device *device) | ||
472 | { | ||
473 | unsigned int index = 0; | ||
474 | void *bitmap = hisi_hba->slot_index_tags; | ||
475 | int sata_dev = dev_is_sata(device); | ||
476 | |||
477 | while (1) { | ||
478 | index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, | ||
479 | index); | ||
480 | if (index >= hisi_hba->slot_index_count) | ||
481 | return -SAS_QUEUE_FULL; | ||
482 | /* | ||
483 | * SAS IPTT bit0 should be 1 | ||
484 | */ | ||
485 | if (sata_dev || (index & 1)) | ||
486 | break; | ||
487 | index++; | ||
488 | } | ||
489 | |||
490 | set_bit(index, bitmap); | ||
491 | *slot_idx = index; | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static struct | ||
496 | hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) | ||
497 | { | ||
498 | struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; | ||
499 | struct hisi_sas_device *sas_dev = NULL; | ||
500 | int i, sata_dev = dev_is_sata(device); | ||
501 | |||
502 | spin_lock(&hisi_hba->lock); | ||
503 | for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { | ||
504 | /* | ||
505 | * SATA device id bit0 should be 0 | ||
506 | */ | ||
507 | if (sata_dev && (i & 1)) | ||
508 | continue; | ||
509 | if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { | ||
510 | hisi_hba->devices[i].device_id = i; | ||
511 | sas_dev = &hisi_hba->devices[i]; | ||
512 | sas_dev->dev_status = HISI_SAS_DEV_NORMAL; | ||
513 | sas_dev->dev_type = device->dev_type; | ||
514 | sas_dev->hisi_hba = hisi_hba; | ||
515 | sas_dev->sas_device = device; | ||
516 | break; | ||
517 | } | ||
518 | } | ||
519 | spin_unlock(&hisi_hba->lock); | ||
520 | |||
521 | return sas_dev; | ||
522 | } | ||
523 | |||
468 | static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) | 524 | static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) |
469 | { | 525 | { |
470 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | 526 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); |
@@ -544,7 +600,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, | |||
544 | } | 600 | } |
545 | 601 | ||
546 | qw0 |= ((1 << ITCT_HDR_VALID_OFF) | | 602 | qw0 |= ((1 << ITCT_HDR_VALID_OFF) | |
547 | (device->max_linkrate << ITCT_HDR_MCR_OFF) | | 603 | (device->linkrate << ITCT_HDR_MCR_OFF) | |
548 | (1 << ITCT_HDR_VLN_OFF) | | 604 | (1 << ITCT_HDR_VLN_OFF) | |
549 | (port->id << ITCT_HDR_PORT_ID_OFF)); | 605 | (port->id << ITCT_HDR_PORT_ID_OFF)); |
550 | itct->qw0 = cpu_to_le64(qw0); | 606 | itct->qw0 = cpu_to_le64(qw0); |
@@ -554,10 +610,11 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, | |||
554 | itct->sas_addr = __swab64(itct->sas_addr); | 610 | itct->sas_addr = __swab64(itct->sas_addr); |
555 | 611 | ||
556 | /* qw2 */ | 612 | /* qw2 */ |
557 | itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | | 613 | if (!dev_is_sata(device)) |
558 | (0xff00ULL << ITCT_HDR_BITLT_OFF) | | 614 | itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | |
559 | (0xff00ULL << ITCT_HDR_MCTLT_OFF) | | 615 | (0x1ULL << ITCT_HDR_BITLT_OFF) | |
560 | (0xff00ULL << ITCT_HDR_RTOLT_OFF)); | 616 | (0x32ULL << ITCT_HDR_MCTLT_OFF) | |
617 | (0x1ULL << ITCT_HDR_RTOLT_OFF)); | ||
561 | } | 618 | } |
562 | 619 | ||
563 | static void free_device_v2_hw(struct hisi_hba *hisi_hba, | 620 | static void free_device_v2_hw(struct hisi_hba *hisi_hba, |
@@ -715,7 +772,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) | |||
715 | hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); | 772 | hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); |
716 | hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); | 773 | hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); |
717 | hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); | 774 | hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); |
718 | hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20); | 775 | hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); |
719 | hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); | 776 | hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); |
720 | hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); | 777 | hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); |
721 | hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); | 778 | hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); |
@@ -1993,22 +2050,23 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) | |||
1993 | u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; | 2050 | u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; |
1994 | irqreturn_t res = IRQ_HANDLED; | 2051 | irqreturn_t res = IRQ_HANDLED; |
1995 | u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; | 2052 | u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; |
1996 | int phy_no; | 2053 | int phy_no, offset; |
1997 | 2054 | ||
1998 | phy_no = sas_phy->id; | 2055 | phy_no = sas_phy->id; |
1999 | initial_fis = &hisi_hba->initial_fis[phy_no]; | 2056 | initial_fis = &hisi_hba->initial_fis[phy_no]; |
2000 | fis = &initial_fis->fis; | 2057 | fis = &initial_fis->fis; |
2001 | 2058 | ||
2002 | ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1); | 2059 | offset = 4 * (phy_no / 4); |
2003 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no); | 2060 | ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); |
2061 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, | ||
2062 | ent_msk | 1 << ((phy_no % 4) * 8)); | ||
2004 | 2063 | ||
2005 | ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1); | 2064 | ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); |
2006 | ent_tmp = ent_int; | 2065 | ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * |
2066 | (phy_no % 4))); | ||
2007 | ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); | 2067 | ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); |
2008 | if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { | 2068 | if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { |
2009 | dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); | 2069 | dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); |
2010 | hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); | ||
2011 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); | ||
2012 | res = IRQ_NONE; | 2070 | res = IRQ_NONE; |
2013 | goto end; | 2071 | goto end; |
2014 | } | 2072 | } |
@@ -2056,8 +2114,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) | |||
2056 | queue_work(hisi_hba->wq, &phy->phyup_ws); | 2114 | queue_work(hisi_hba->wq, &phy->phyup_ws); |
2057 | 2115 | ||
2058 | end: | 2116 | end: |
2059 | hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); | 2117 | hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); |
2060 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); | 2118 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); |
2061 | 2119 | ||
2062 | return res; | 2120 | return res; |
2063 | } | 2121 | } |
@@ -2165,6 +2223,8 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) | |||
2165 | static const struct hisi_sas_hw hisi_sas_v2_hw = { | 2223 | static const struct hisi_sas_hw hisi_sas_v2_hw = { |
2166 | .hw_init = hisi_sas_v2_init, | 2224 | .hw_init = hisi_sas_v2_init, |
2167 | .setup_itct = setup_itct_v2_hw, | 2225 | .setup_itct = setup_itct_v2_hw, |
2226 | .slot_index_alloc = slot_index_alloc_quirk_v2_hw, | ||
2227 | .alloc_dev = alloc_dev_quirk_v2_hw, | ||
2168 | .sl_notify = sl_notify_v2_hw, | 2228 | .sl_notify = sl_notify_v2_hw, |
2169 | .get_wideport_bitmap = get_wideport_bitmap_v2_hw, | 2229 | .get_wideport_bitmap = get_wideport_bitmap_v2_hw, |
2170 | .free_device = free_device_v2_hw, | 2230 | .free_device = free_device_v2_hw, |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5be944c8b71c..ff8dcd5b0631 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -60,7 +60,7 @@ | |||
60 | * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' | 60 | * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' |
61 | * with an optional trailing '-' followed by a byte value (0-255). | 61 | * with an optional trailing '-' followed by a byte value (0-255). |
62 | */ | 62 | */ |
63 | #define HPSA_DRIVER_VERSION "3.4.14-0" | 63 | #define HPSA_DRIVER_VERSION "3.4.16-0" |
64 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 64 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
65 | #define HPSA "hpsa" | 65 | #define HPSA "hpsa" |
66 | 66 | ||
@@ -294,6 +294,9 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h); | |||
294 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | 294 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
295 | struct ReportExtendedLUNdata *buf, int bufsize); | 295 | struct ReportExtendedLUNdata *buf, int bufsize); |
296 | static int hpsa_luns_changed(struct ctlr_info *h); | 296 | static int hpsa_luns_changed(struct ctlr_info *h); |
297 | static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, | ||
298 | struct hpsa_scsi_dev_t *dev, | ||
299 | unsigned char *scsi3addr); | ||
297 | 300 | ||
298 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | 301 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
299 | { | 302 | { |
@@ -728,6 +731,29 @@ static ssize_t unique_id_show(struct device *dev, | |||
728 | sn[12], sn[13], sn[14], sn[15]); | 731 | sn[12], sn[13], sn[14], sn[15]); |
729 | } | 732 | } |
730 | 733 | ||
734 | static ssize_t sas_address_show(struct device *dev, | ||
735 | struct device_attribute *attr, char *buf) | ||
736 | { | ||
737 | struct ctlr_info *h; | ||
738 | struct scsi_device *sdev; | ||
739 | struct hpsa_scsi_dev_t *hdev; | ||
740 | unsigned long flags; | ||
741 | u64 sas_address; | ||
742 | |||
743 | sdev = to_scsi_device(dev); | ||
744 | h = sdev_to_hba(sdev); | ||
745 | spin_lock_irqsave(&h->lock, flags); | ||
746 | hdev = sdev->hostdata; | ||
747 | if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { | ||
748 | spin_unlock_irqrestore(&h->lock, flags); | ||
749 | return -ENODEV; | ||
750 | } | ||
751 | sas_address = hdev->sas_address; | ||
752 | spin_unlock_irqrestore(&h->lock, flags); | ||
753 | |||
754 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); | ||
755 | } | ||
756 | |||
731 | static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, | 757 | static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, |
732 | struct device_attribute *attr, char *buf) | 758 | struct device_attribute *attr, char *buf) |
733 | { | 759 | { |
@@ -840,6 +866,7 @@ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); | |||
840 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | 866 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); |
841 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); | 867 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); |
842 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | 868 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); |
869 | static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL); | ||
843 | static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, | 870 | static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, |
844 | host_show_hp_ssd_smart_path_enabled, NULL); | 871 | host_show_hp_ssd_smart_path_enabled, NULL); |
845 | static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); | 872 | static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); |
@@ -865,6 +892,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = { | |||
865 | &dev_attr_unique_id, | 892 | &dev_attr_unique_id, |
866 | &dev_attr_hp_ssd_smart_path_enabled, | 893 | &dev_attr_hp_ssd_smart_path_enabled, |
867 | &dev_attr_path_info, | 894 | &dev_attr_path_info, |
895 | &dev_attr_sas_address, | ||
868 | NULL, | 896 | NULL, |
869 | }; | 897 | }; |
870 | 898 | ||
@@ -1637,9 +1665,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, | |||
1637 | for (j = 0; j < ndevices; j++) { | 1665 | for (j = 0; j < ndevices; j++) { |
1638 | if (dev[j] == NULL) | 1666 | if (dev[j] == NULL) |
1639 | continue; | 1667 | continue; |
1640 | if (dev[j]->devtype != TYPE_DISK) | 1668 | if (dev[j]->devtype != TYPE_DISK && |
1641 | continue; | 1669 | dev[j]->devtype != TYPE_ZBC) |
1642 | if (dev[j]->devtype != TYPE_ZBC) | ||
1643 | continue; | 1670 | continue; |
1644 | if (is_logical_device(dev[j])) | 1671 | if (is_logical_device(dev[j])) |
1645 | continue; | 1672 | continue; |
@@ -1684,9 +1711,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, | |||
1684 | for (i = 0; i < ndevices; i++) { | 1711 | for (i = 0; i < ndevices; i++) { |
1685 | if (dev[i] == NULL) | 1712 | if (dev[i] == NULL) |
1686 | continue; | 1713 | continue; |
1687 | if (dev[i]->devtype != TYPE_DISK) | 1714 | if (dev[i]->devtype != TYPE_DISK && |
1688 | continue; | 1715 | dev[i]->devtype != TYPE_ZBC) |
1689 | if (dev[i]->devtype != TYPE_ZBC) | ||
1690 | continue; | 1716 | continue; |
1691 | if (!is_logical_device(dev[i])) | 1717 | if (!is_logical_device(dev[i])) |
1692 | continue; | 1718 | continue; |
@@ -1720,6 +1746,51 @@ static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) | |||
1720 | return rc; | 1746 | return rc; |
1721 | } | 1747 | } |
1722 | 1748 | ||
1749 | static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, | ||
1750 | struct hpsa_scsi_dev_t *dev) | ||
1751 | { | ||
1752 | int i; | ||
1753 | int count = 0; | ||
1754 | |||
1755 | for (i = 0; i < h->nr_cmds; i++) { | ||
1756 | struct CommandList *c = h->cmd_pool + i; | ||
1757 | int refcount = atomic_inc_return(&c->refcount); | ||
1758 | |||
1759 | if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, | ||
1760 | dev->scsi3addr)) { | ||
1761 | unsigned long flags; | ||
1762 | |||
1763 | spin_lock_irqsave(&h->lock, flags); /* Implied MB */ | ||
1764 | if (!hpsa_is_cmd_idle(c)) | ||
1765 | ++count; | ||
1766 | spin_unlock_irqrestore(&h->lock, flags); | ||
1767 | } | ||
1768 | |||
1769 | cmd_free(h, c); | ||
1770 | } | ||
1771 | |||
1772 | return count; | ||
1773 | } | ||
1774 | |||
1775 | static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, | ||
1776 | struct hpsa_scsi_dev_t *device) | ||
1777 | { | ||
1778 | int cmds = 0; | ||
1779 | int waits = 0; | ||
1780 | |||
1781 | while (1) { | ||
1782 | cmds = hpsa_find_outstanding_commands_for_dev(h, device); | ||
1783 | if (cmds == 0) | ||
1784 | break; | ||
1785 | if (++waits > 20) | ||
1786 | break; | ||
1787 | dev_warn(&h->pdev->dev, | ||
1788 | "%s: removing device with %d outstanding commands!\n", | ||
1789 | __func__, cmds); | ||
1790 | msleep(1000); | ||
1791 | } | ||
1792 | } | ||
1793 | |||
1723 | static void hpsa_remove_device(struct ctlr_info *h, | 1794 | static void hpsa_remove_device(struct ctlr_info *h, |
1724 | struct hpsa_scsi_dev_t *device) | 1795 | struct hpsa_scsi_dev_t *device) |
1725 | { | 1796 | { |
@@ -1743,8 +1814,13 @@ static void hpsa_remove_device(struct ctlr_info *h, | |||
1743 | hpsa_show_dev_msg(KERN_WARNING, h, device, | 1814 | hpsa_show_dev_msg(KERN_WARNING, h, device, |
1744 | "didn't find device for removal."); | 1815 | "didn't find device for removal."); |
1745 | } | 1816 | } |
1746 | } else /* HBA */ | 1817 | } else { /* HBA */ |
1818 | |||
1819 | device->removed = 1; | ||
1820 | hpsa_wait_for_outstanding_commands_for_dev(h, device); | ||
1821 | |||
1747 | hpsa_remove_sas_device(device); | 1822 | hpsa_remove_sas_device(device); |
1823 | } | ||
1748 | } | 1824 | } |
1749 | 1825 | ||
1750 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, | 1826 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, |
@@ -2146,7 +2222,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, | |||
2146 | static int handle_ioaccel_mode2_error(struct ctlr_info *h, | 2222 | static int handle_ioaccel_mode2_error(struct ctlr_info *h, |
2147 | struct CommandList *c, | 2223 | struct CommandList *c, |
2148 | struct scsi_cmnd *cmd, | 2224 | struct scsi_cmnd *cmd, |
2149 | struct io_accel2_cmd *c2) | 2225 | struct io_accel2_cmd *c2, |
2226 | struct hpsa_scsi_dev_t *dev) | ||
2150 | { | 2227 | { |
2151 | int data_len; | 2228 | int data_len; |
2152 | int retry = 0; | 2229 | int retry = 0; |
@@ -2210,8 +2287,27 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |||
2210 | case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: | 2287 | case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: |
2211 | case IOACCEL2_STATUS_SR_INVALID_DEVICE: | 2288 | case IOACCEL2_STATUS_SR_INVALID_DEVICE: |
2212 | case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: | 2289 | case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: |
2213 | /* We will get an event from ctlr to trigger rescan */ | 2290 | /* |
2214 | retry = 1; | 2291 | * Did an HBA disk disappear? We will eventually |
2292 | * get a state change event from the controller but | ||
2293 | * in the meantime, we need to tell the OS that the | ||
2294 | * HBA disk is no longer there and stop I/O | ||
2295 | * from going down. This allows the potential re-insert | ||
2296 | * of the disk to get the same device node. | ||
2297 | */ | ||
2298 | if (dev->physical_device && dev->expose_device) { | ||
2299 | cmd->result = DID_NO_CONNECT << 16; | ||
2300 | dev->removed = 1; | ||
2301 | h->drv_req_rescan = 1; | ||
2302 | dev_warn(&h->pdev->dev, | ||
2303 | "%s: device is gone!\n", __func__); | ||
2304 | } else | ||
2305 | /* | ||
2306 | * Retry by sending down the RAID path. | ||
2307 | * We will get an event from ctlr to | ||
2308 | * trigger rescan regardless. | ||
2309 | */ | ||
2310 | retry = 1; | ||
2215 | break; | 2311 | break; |
2216 | default: | 2312 | default: |
2217 | retry = 1; | 2313 | retry = 1; |
@@ -2335,13 +2431,15 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
2335 | c2->error_data.serv_response == | 2431 | c2->error_data.serv_response == |
2336 | IOACCEL2_SERV_RESPONSE_FAILURE) { | 2432 | IOACCEL2_SERV_RESPONSE_FAILURE) { |
2337 | if (c2->error_data.status == | 2433 | if (c2->error_data.status == |
2338 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) | 2434 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { |
2339 | dev->offload_enabled = 0; | 2435 | dev->offload_enabled = 0; |
2436 | dev->offload_to_be_enabled = 0; | ||
2437 | } | ||
2340 | 2438 | ||
2341 | return hpsa_retry_cmd(h, c); | 2439 | return hpsa_retry_cmd(h, c); |
2342 | } | 2440 | } |
2343 | 2441 | ||
2344 | if (handle_ioaccel_mode2_error(h, c, cmd, c2)) | 2442 | if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) |
2345 | return hpsa_retry_cmd(h, c); | 2443 | return hpsa_retry_cmd(h, c); |
2346 | 2444 | ||
2347 | return hpsa_cmd_free_and_done(h, c, cmd); | 2445 | return hpsa_cmd_free_and_done(h, c, cmd); |
@@ -2806,7 +2904,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2806 | goto out; | 2904 | goto out; |
2807 | } | 2905 | } |
2808 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 2906 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
2809 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 2907 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
2810 | if (rc) | 2908 | if (rc) |
2811 | goto out; | 2909 | goto out; |
2812 | ei = c->err_info; | 2910 | ei = c->err_info; |
@@ -2832,7 +2930,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2832 | /* fill_cmd can't fail here, no data buffer to map. */ | 2930 | /* fill_cmd can't fail here, no data buffer to map. */ |
2833 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, | 2931 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, |
2834 | scsi3addr, TYPE_MSG); | 2932 | scsi3addr, TYPE_MSG); |
2835 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | 2933 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); |
2836 | if (rc) { | 2934 | if (rc) { |
2837 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); | 2935 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); |
2838 | goto out; | 2936 | goto out; |
@@ -3080,7 +3178,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h, | |||
3080 | return -1; | 3178 | return -1; |
3081 | } | 3179 | } |
3082 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 3180 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3083 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 3181 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
3084 | if (rc) | 3182 | if (rc) |
3085 | goto out; | 3183 | goto out; |
3086 | ei = c->err_info; | 3184 | ei = c->err_info; |
@@ -3123,7 +3221,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, | |||
3123 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | 3221 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
3124 | 3222 | ||
3125 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 3223 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3126 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 3224 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
3127 | if (rc) | 3225 | if (rc) |
3128 | goto out; | 3226 | goto out; |
3129 | ei = c->err_info; | 3227 | ei = c->err_info; |
@@ -3151,7 +3249,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h, | |||
3151 | goto out; | 3249 | goto out; |
3152 | 3250 | ||
3153 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 3251 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3154 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 3252 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
3155 | if (rc) | 3253 | if (rc) |
3156 | goto out; | 3254 | goto out; |
3157 | ei = c->err_info; | 3255 | ei = c->err_info; |
@@ -3182,7 +3280,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h, | |||
3182 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | 3280 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
3183 | 3281 | ||
3184 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, | 3282 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, |
3185 | NO_TIMEOUT); | 3283 | DEFAULT_TIMEOUT); |
3186 | ei = c->err_info; | 3284 | ei = c->err_info; |
3187 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | 3285 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3188 | hpsa_scsi_interpret_error(h, c); | 3286 | hpsa_scsi_interpret_error(h, c); |
@@ -3250,7 +3348,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, | |||
3250 | c->Request.CDB[5] = 0; | 3348 | c->Request.CDB[5] = 0; |
3251 | 3349 | ||
3252 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, | 3350 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, |
3253 | NO_TIMEOUT); | 3351 | DEFAULT_TIMEOUT); |
3254 | if (rc) | 3352 | if (rc) |
3255 | goto out; | 3353 | goto out; |
3256 | 3354 | ||
@@ -3462,7 +3560,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
3462 | if (extended_response) | 3560 | if (extended_response) |
3463 | c->Request.CDB[1] = extended_response; | 3561 | c->Request.CDB[1] = extended_response; |
3464 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 3562 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3465 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 3563 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
3466 | if (rc) | 3564 | if (rc) |
3467 | goto out; | 3565 | goto out; |
3468 | ei = c->err_info; | 3566 | ei = c->err_info; |
@@ -3569,7 +3667,8 @@ static int hpsa_volume_offline(struct ctlr_info *h, | |||
3569 | c = cmd_alloc(h); | 3667 | c = cmd_alloc(h); |
3570 | 3668 | ||
3571 | (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); | 3669 | (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); |
3572 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); | 3670 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
3671 | DEFAULT_TIMEOUT); | ||
3573 | if (rc) { | 3672 | if (rc) { |
3574 | cmd_free(h, c); | 3673 | cmd_free(h, c); |
3575 | return 0; | 3674 | return 0; |
@@ -3644,7 +3743,8 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h, | |||
3644 | c = cmd_alloc(h); | 3743 | c = cmd_alloc(h); |
3645 | 3744 | ||
3646 | (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); | 3745 | (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); |
3647 | (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); | 3746 | (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
3747 | DEFAULT_TIMEOUT); | ||
3648 | /* no unmap needed here because no data xfer. */ | 3748 | /* no unmap needed here because no data xfer. */ |
3649 | ei = c->err_info; | 3749 | ei = c->err_info; |
3650 | switch (ei->CommandStatus) { | 3750 | switch (ei->CommandStatus) { |
@@ -5234,6 +5334,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5234 | 5334 | ||
5235 | dev = cmd->device->hostdata; | 5335 | dev = cmd->device->hostdata; |
5236 | if (!dev) { | 5336 | if (!dev) { |
5337 | cmd->result = NOT_READY << 16; /* host byte */ | ||
5338 | cmd->scsi_done(cmd); | ||
5339 | return 0; | ||
5340 | } | ||
5341 | |||
5342 | if (dev->removed) { | ||
5237 | cmd->result = DID_NO_CONNECT << 16; | 5343 | cmd->result = DID_NO_CONNECT << 16; |
5238 | cmd->scsi_done(cmd); | 5344 | cmd->scsi_done(cmd); |
5239 | return 0; | 5345 | return 0; |
@@ -5414,7 +5520,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h, | |||
5414 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ | 5520 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ |
5415 | (void) fill_cmd(c, TEST_UNIT_READY, h, | 5521 | (void) fill_cmd(c, TEST_UNIT_READY, h, |
5416 | NULL, 0, 0, lunaddr, TYPE_CMD); | 5522 | NULL, 0, 0, lunaddr, TYPE_CMD); |
5417 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | 5523 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); |
5418 | if (rc) | 5524 | if (rc) |
5419 | return rc; | 5525 | return rc; |
5420 | /* no unmap needed here because no data xfer. */ | 5526 | /* no unmap needed here because no data xfer. */ |
@@ -5638,7 +5744,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
5638 | 0, 0, scsi3addr, TYPE_MSG); | 5744 | 0, 0, scsi3addr, TYPE_MSG); |
5639 | if (h->needs_abort_tags_swizzled) | 5745 | if (h->needs_abort_tags_swizzled) |
5640 | swizzle_abort_tag(&c->Request.CDB[4]); | 5746 | swizzle_abort_tag(&c->Request.CDB[4]); |
5641 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | 5747 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); |
5642 | hpsa_get_tag(h, abort, &taglower, &tagupper); | 5748 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
5643 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", | 5749 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", |
5644 | __func__, tagupper, taglower); | 5750 | __func__, tagupper, taglower); |
@@ -5803,7 +5909,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, | |||
5803 | c = cmd_alloc(h); | 5909 | c = cmd_alloc(h); |
5804 | setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); | 5910 | setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); |
5805 | c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | 5911 | c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
5806 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | 5912 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); |
5807 | hpsa_get_tag(h, abort, &taglower, &tagupper); | 5913 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
5808 | dev_dbg(&h->pdev->dev, | 5914 | dev_dbg(&h->pdev->dev, |
5809 | "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", | 5915 | "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", |
@@ -6348,7 +6454,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
6348 | c->SG[0].Len = cpu_to_le32(iocommand.buf_size); | 6454 | c->SG[0].Len = cpu_to_le32(iocommand.buf_size); |
6349 | c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ | 6455 | c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ |
6350 | } | 6456 | } |
6351 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); | 6457 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
6458 | DEFAULT_TIMEOUT); | ||
6352 | if (iocommand.buf_size > 0) | 6459 | if (iocommand.buf_size > 0) |
6353 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | 6460 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); |
6354 | check_ioctl_unit_attention(h, c); | 6461 | check_ioctl_unit_attention(h, c); |
@@ -6480,7 +6587,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
6480 | } | 6587 | } |
6481 | c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); | 6588 | c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); |
6482 | } | 6589 | } |
6483 | status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); | 6590 | status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
6591 | DEFAULT_TIMEOUT); | ||
6484 | if (sg_used) | 6592 | if (sg_used) |
6485 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | 6593 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); |
6486 | check_ioctl_unit_attention(h, c); | 6594 | check_ioctl_unit_attention(h, c); |
@@ -8254,8 +8362,10 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) | |||
8254 | event_type = "configuration change"; | 8362 | event_type = "configuration change"; |
8255 | /* Stop sending new RAID offload reqs via the IO accelerator */ | 8363 | /* Stop sending new RAID offload reqs via the IO accelerator */ |
8256 | scsi_block_requests(h->scsi_host); | 8364 | scsi_block_requests(h->scsi_host); |
8257 | for (i = 0; i < h->ndevices; i++) | 8365 | for (i = 0; i < h->ndevices; i++) { |
8258 | h->dev[i]->offload_enabled = 0; | 8366 | h->dev[i]->offload_enabled = 0; |
8367 | h->dev[i]->offload_to_be_enabled = 0; | ||
8368 | } | ||
8259 | hpsa_drain_accel_commands(h); | 8369 | hpsa_drain_accel_commands(h); |
8260 | /* Set 'accelerator path config change' bit */ | 8370 | /* Set 'accelerator path config change' bit */ |
8261 | dev_warn(&h->pdev->dev, | 8371 | dev_warn(&h->pdev->dev, |
@@ -8541,11 +8651,6 @@ reinit_after_soft_reset: | |||
8541 | if (rc) | 8651 | if (rc) |
8542 | goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ | 8652 | goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ |
8543 | 8653 | ||
8544 | /* hook into SCSI subsystem */ | ||
8545 | rc = hpsa_scsi_add_host(h); | ||
8546 | if (rc) | ||
8547 | goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ | ||
8548 | |||
8549 | /* create the resubmit workqueue */ | 8654 | /* create the resubmit workqueue */ |
8550 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); | 8655 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); |
8551 | if (!h->rescan_ctlr_wq) { | 8656 | if (!h->rescan_ctlr_wq) { |
@@ -8642,6 +8747,11 @@ reinit_after_soft_reset: | |||
8642 | dev_info(&h->pdev->dev, | 8747 | dev_info(&h->pdev->dev, |
8643 | "Can't track change to report lun data\n"); | 8748 | "Can't track change to report lun data\n"); |
8644 | 8749 | ||
8750 | /* hook into SCSI subsystem */ | ||
8751 | rc = hpsa_scsi_add_host(h); | ||
8752 | if (rc) | ||
8753 | goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ | ||
8754 | |||
8645 | /* Monitor the controller for firmware lockups */ | 8755 | /* Monitor the controller for firmware lockups */ |
8646 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; | 8756 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; |
8647 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); | 8757 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); |
@@ -8703,7 +8813,7 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
8703 | goto out; | 8813 | goto out; |
8704 | } | 8814 | } |
8705 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 8815 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
8706 | PCI_DMA_TODEVICE, NO_TIMEOUT); | 8816 | PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); |
8707 | if (rc) | 8817 | if (rc) |
8708 | goto out; | 8818 | goto out; |
8709 | if (c->err_info->CommandStatus != 0) | 8819 | if (c->err_info->CommandStatus != 0) |
@@ -8742,7 +8852,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) | |||
8742 | goto errout; | 8852 | goto errout; |
8743 | 8853 | ||
8744 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 8854 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
8745 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 8855 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
8746 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | 8856 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
8747 | goto errout; | 8857 | goto errout; |
8748 | 8858 | ||
@@ -8754,7 +8864,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) | |||
8754 | goto errout; | 8864 | goto errout; |
8755 | 8865 | ||
8756 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 8866 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
8757 | PCI_DMA_TODEVICE, NO_TIMEOUT); | 8867 | PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); |
8758 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | 8868 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
8759 | goto errout; | 8869 | goto errout; |
8760 | 8870 | ||
@@ -8764,7 +8874,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) | |||
8764 | goto errout; | 8874 | goto errout; |
8765 | 8875 | ||
8766 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | 8876 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
8767 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | 8877 | PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); |
8768 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | 8878 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
8769 | goto errout; | 8879 | goto errout; |
8770 | 8880 | ||
@@ -9602,6 +9712,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy) | |||
9602 | static int | 9712 | static int |
9603 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) | 9713 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) |
9604 | { | 9714 | { |
9715 | *identifier = 0; | ||
9605 | return 0; | 9716 | return 0; |
9606 | } | 9717 | } |
9607 | 9718 | ||
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index d06bb7417e36..a1487e67f7a1 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -63,6 +63,7 @@ struct hpsa_scsi_dev_t { | |||
63 | unsigned char scsi3addr[8]; /* as presented to the HW */ | 63 | unsigned char scsi3addr[8]; /* as presented to the HW */ |
64 | u8 physical_device : 1; | 64 | u8 physical_device : 1; |
65 | u8 expose_device; | 65 | u8 expose_device; |
66 | u8 removed : 1; /* device is marked for death */ | ||
66 | #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" | 67 | #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" |
67 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ | 68 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ |
68 | u64 sas_address; | 69 | u64 sas_address; |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 6bffd91b973a..c051694bfcb0 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -2127,7 +2127,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
2127 | struct iscsi_conn *conn; | 2127 | struct iscsi_conn *conn; |
2128 | struct iscsi_task *task; | 2128 | struct iscsi_task *task; |
2129 | struct iscsi_tm *hdr; | 2129 | struct iscsi_tm *hdr; |
2130 | int rc, age; | 2130 | int age; |
2131 | 2131 | ||
2132 | cls_session = starget_to_session(scsi_target(sc->device)); | 2132 | cls_session = starget_to_session(scsi_target(sc->device)); |
2133 | session = cls_session->dd_data; | 2133 | session = cls_session->dd_data; |
@@ -2188,10 +2188,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
2188 | hdr = &conn->tmhdr; | 2188 | hdr = &conn->tmhdr; |
2189 | iscsi_prep_abort_task_pdu(task, hdr); | 2189 | iscsi_prep_abort_task_pdu(task, hdr); |
2190 | 2190 | ||
2191 | if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { | 2191 | if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) |
2192 | rc = FAILED; | ||
2193 | goto failed; | 2192 | goto failed; |
2194 | } | ||
2195 | 2193 | ||
2196 | switch (conn->tmf_state) { | 2194 | switch (conn->tmf_state) { |
2197 | case TMF_SUCCESS: | 2195 | case TMF_SUCCESS: |
@@ -2423,7 +2421,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) | |||
2423 | * | 2421 | * |
2424 | * This will attempt to send a warm target reset. | 2422 | * This will attempt to send a warm target reset. |
2425 | */ | 2423 | */ |
2426 | int iscsi_eh_target_reset(struct scsi_cmnd *sc) | 2424 | static int iscsi_eh_target_reset(struct scsi_cmnd *sc) |
2427 | { | 2425 | { |
2428 | struct iscsi_cls_session *cls_session; | 2426 | struct iscsi_cls_session *cls_session; |
2429 | struct iscsi_session *session; | 2427 | struct iscsi_session *session; |
@@ -2495,7 +2493,6 @@ done: | |||
2495 | mutex_unlock(&session->eh_mutex); | 2493 | mutex_unlock(&session->eh_mutex); |
2496 | return rc; | 2494 | return rc; |
2497 | } | 2495 | } |
2498 | EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); | ||
2499 | 2496 | ||
2500 | /** | 2497 | /** |
2501 | * iscsi_eh_recover_target - reset target and possibly the session | 2498 | * iscsi_eh_recover_target - reset target and possibly the session |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 90a3ca5a4dbd..d5bd420595c1 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -694,6 +694,7 @@ struct lpfc_hba { | |||
694 | uint8_t wwnn[8]; | 694 | uint8_t wwnn[8]; |
695 | uint8_t wwpn[8]; | 695 | uint8_t wwpn[8]; |
696 | uint32_t RandomData[7]; | 696 | uint32_t RandomData[7]; |
697 | uint32_t fcp_embed_io; | ||
697 | 698 | ||
698 | /* HBA Config Parameters */ | 699 | /* HBA Config Parameters */ |
699 | uint32_t cfg_ack0; | 700 | uint32_t cfg_ack0; |
@@ -757,7 +758,6 @@ struct lpfc_hba { | |||
757 | uint32_t cfg_fdmi_on; | 758 | uint32_t cfg_fdmi_on; |
758 | #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ | 759 | #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ |
759 | #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ | 760 | #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ |
760 | #define LPFC_FDMI_SMART_SAN 2 /* SmartSAN supported */ | ||
761 | uint32_t cfg_enable_SmartSAN; | 761 | uint32_t cfg_enable_SmartSAN; |
762 | lpfc_vpd_t vpd; /* vital product data */ | 762 | lpfc_vpd_t vpd; /* vital product data */ |
763 | 763 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 343ae9482891..cfec2eca4dd3 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -4584,15 +4584,14 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); | |||
4584 | # lpfc_fdmi_on: Controls FDMI support. | 4584 | # lpfc_fdmi_on: Controls FDMI support. |
4585 | # 0 No FDMI support (default) | 4585 | # 0 No FDMI support (default) |
4586 | # 1 Traditional FDMI support | 4586 | # 1 Traditional FDMI support |
4587 | # 2 Smart SAN support | 4587 | # Traditional FDMI support means the driver will assume FDMI-2 support; |
4588 | # If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2 | 4588 | # however, if that fails, it will fallback to FDMI-1. |
4589 | # overwriting the current value. If lpfc_enable_SmartSAN is set 0, the | 4589 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. |
4590 | # driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1. | 4590 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of |
4591 | # A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to | 4591 | # lpfc_fdmi_on. |
4592 | # set lpfc_fdmi_on back to 1. | 4592 | # Value range [0,1]. Default value is 0. |
4593 | # Value range [0,2]. Default value is 0. | ||
4594 | */ | 4593 | */ |
4595 | LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support"); | 4594 | LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); |
4596 | 4595 | ||
4597 | /* | 4596 | /* |
4598 | # Specifies the maximum number of ELS cmds we can have outstanding (for | 4597 | # Specifies the maximum number of ELS cmds we can have outstanding (for |
@@ -5150,7 +5149,6 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) | |||
5150 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); | 5149 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); |
5151 | } | 5150 | } |
5152 | 5151 | ||
5153 | |||
5154 | /* | 5152 | /* |
5155 | * Dynamic FC Host Attributes Support | 5153 | * Dynamic FC Host Attributes Support |
5156 | */ | 5154 | */ |
@@ -5857,14 +5855,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
5857 | else | 5855 | else |
5858 | phba->cfg_poll = lpfc_poll; | 5856 | phba->cfg_poll = lpfc_poll; |
5859 | 5857 | ||
5860 | /* Ensure fdmi_on and enable_SmartSAN don't conflict */ | ||
5861 | if (phba->cfg_enable_SmartSAN) { | ||
5862 | phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN; | ||
5863 | } else { | ||
5864 | if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) | ||
5865 | phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; | ||
5866 | } | ||
5867 | |||
5868 | phba->cfg_soft_wwnn = 0L; | 5858 | phba->cfg_soft_wwnn = 0L; |
5869 | phba->cfg_soft_wwpn = 0L; | 5859 | phba->cfg_soft_wwpn = 0L; |
5870 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); | 5860 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 79e261d2a0c8..a38816e96654 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -2322,7 +2322,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, | |||
2322 | ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; | 2322 | ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2323 | memset(ae, 0, 256); | 2323 | memset(ae, 0, 256); |
2324 | 2324 | ||
2325 | strncpy(ae->un.AttrString, "Smart SAN Version 1.0", | 2325 | strncpy(ae->un.AttrString, "Smart SAN Version 2.0", |
2326 | sizeof(ae->un.AttrString)); | 2326 | sizeof(ae->un.AttrString)); |
2327 | len = strnlen(ae->un.AttrString, | 2327 | len = strnlen(ae->un.AttrString, |
2328 | sizeof(ae->un.AttrString)); | 2328 | sizeof(ae->un.AttrString)); |
@@ -2397,7 +2397,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, | |||
2397 | uint32_t size; | 2397 | uint32_t size; |
2398 | 2398 | ||
2399 | ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; | 2399 | ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2400 | ae->un.AttrInt = cpu_to_be32(0); | 2400 | ae->un.AttrInt = cpu_to_be32(1); |
2401 | size = FOURBYTES + sizeof(uint32_t); | 2401 | size = FOURBYTES + sizeof(uint32_t); |
2402 | ad->AttrLen = cpu_to_be16(size); | 2402 | ad->AttrLen = cpu_to_be16(size); |
2403 | ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY); | 2403 | ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 7f5abb8f52bc..0498f5760d2b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -690,16 +690,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
690 | fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); | 690 | fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); |
691 | if (fabric_param_changed) { | 691 | if (fabric_param_changed) { |
692 | /* Reset FDMI attribute masks based on config parameter */ | 692 | /* Reset FDMI attribute masks based on config parameter */ |
693 | if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) { | 693 | if (phba->cfg_enable_SmartSAN || |
694 | vport->fdmi_hba_mask = 0; | 694 | (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { |
695 | vport->fdmi_port_mask = 0; | ||
696 | } else { | ||
697 | /* Setup appropriate attribute masks */ | 695 | /* Setup appropriate attribute masks */ |
698 | vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; | 696 | vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; |
699 | if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) | 697 | if (phba->cfg_enable_SmartSAN) |
700 | vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; | 698 | vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; |
701 | else | 699 | else |
702 | vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; | 700 | vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; |
701 | } else { | ||
702 | vport->fdmi_hba_mask = 0; | ||
703 | vport->fdmi_port_mask = 0; | ||
703 | } | 704 | } |
704 | 705 | ||
705 | } | 706 | } |
@@ -1069,7 +1070,10 @@ stop_rr_fcf_flogi: | |||
1069 | lpfc_sli4_unreg_all_rpis(vport); | 1070 | lpfc_sli4_unreg_all_rpis(vport); |
1070 | } | 1071 | } |
1071 | } | 1072 | } |
1072 | lpfc_issue_reg_vfi(vport); | 1073 | |
1074 | /* Do not register VFI if the driver aborted FLOGI */ | ||
1075 | if (!lpfc_error_lost_link(irsp)) | ||
1076 | lpfc_issue_reg_vfi(vport); | ||
1073 | lpfc_nlp_put(ndlp); | 1077 | lpfc_nlp_put(ndlp); |
1074 | goto out; | 1078 | goto out; |
1075 | } | 1079 | } |
@@ -4705,6 +4709,144 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, | |||
4705 | desc->length = cpu_to_be32(sizeof(desc->info)); | 4709 | desc->length = cpu_to_be32(sizeof(desc->info)); |
4706 | } | 4710 | } |
4707 | 4711 | ||
4712 | void | ||
4713 | lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, | ||
4714 | struct lpfc_vport *vport) | ||
4715 | { | ||
4716 | desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); | ||
4717 | |||
4718 | desc->bbc_info.port_bbc = cpu_to_be32( | ||
4719 | vport->fc_sparam.cmn.bbCreditMsb | | ||
4720 | vport->fc_sparam.cmn.bbCreditlsb << 8); | ||
4721 | if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) | ||
4722 | desc->bbc_info.attached_port_bbc = cpu_to_be32( | ||
4723 | vport->phba->fc_fabparam.cmn.bbCreditMsb | | ||
4724 | vport->phba->fc_fabparam.cmn.bbCreditlsb << 8); | ||
4725 | else | ||
4726 | desc->bbc_info.attached_port_bbc = 0; | ||
4727 | |||
4728 | desc->bbc_info.rtt = 0; | ||
4729 | desc->length = cpu_to_be32(sizeof(desc->bbc_info)); | ||
4730 | } | ||
4731 | |||
4732 | void | ||
4733 | lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) | ||
4734 | { | ||
4735 | uint32_t flags; | ||
4736 | |||
4737 | desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); | ||
4738 | |||
4739 | desc->oed_info.hi_alarm = | ||
4740 | cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]); | ||
4741 | desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]); | ||
4742 | desc->oed_info.hi_warning = | ||
4743 | cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]); | ||
4744 | desc->oed_info.lo_warning = | ||
4745 | cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]); | ||
4746 | flags = 0xf; /* All four are valid */ | ||
4747 | flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); | ||
4748 | desc->oed_info.function_flags = cpu_to_be32(flags); | ||
4749 | desc->length = cpu_to_be32(sizeof(desc->oed_info)); | ||
4750 | } | ||
4751 | |||
4752 | void | ||
4753 | lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc, | ||
4754 | uint8_t *page_a2) | ||
4755 | { | ||
4756 | uint32_t flags; | ||
4757 | |||
4758 | desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); | ||
4759 | |||
4760 | desc->oed_info.hi_alarm = | ||
4761 | cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]); | ||
4762 | desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]); | ||
4763 | desc->oed_info.hi_warning = | ||
4764 | cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]); | ||
4765 | desc->oed_info.lo_warning = | ||
4766 | cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]); | ||
4767 | flags = 0xf; /* All four are valid */ | ||
4768 | flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); | ||
4769 | desc->oed_info.function_flags = cpu_to_be32(flags); | ||
4770 | desc->length = cpu_to_be32(sizeof(desc->oed_info)); | ||
4771 | } | ||
4772 | |||
4773 | void | ||
4774 | lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc, | ||
4775 | uint8_t *page_a2) | ||
4776 | { | ||
4777 | uint32_t flags; | ||
4778 | |||
4779 | desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); | ||
4780 | |||
4781 | desc->oed_info.hi_alarm = | ||
4782 | cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]); | ||
4783 | desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]); | ||
4784 | desc->oed_info.hi_warning = | ||
4785 | cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]); | ||
4786 | desc->oed_info.lo_warning = | ||
4787 | cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]); | ||
4788 | flags = 0xf; /* All four are valid */ | ||
4789 | flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); | ||
4790 | desc->oed_info.function_flags = cpu_to_be32(flags); | ||
4791 | desc->length = cpu_to_be32(sizeof(desc->oed_info)); | ||
4792 | } | ||
4793 | |||
4794 | void | ||
4795 | lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc, | ||
4796 | uint8_t *page_a2) | ||
4797 | { | ||
4798 | uint32_t flags; | ||
4799 | |||
4800 | desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); | ||
4801 | |||
4802 | desc->oed_info.hi_alarm = | ||
4803 | cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]); | ||
4804 | desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]); | ||
4805 | desc->oed_info.hi_warning = | ||
4806 | cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]); | ||
4807 | desc->oed_info.lo_warning = | ||
4808 | cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]); | ||
4809 | flags = 0xf; /* All four are valid */ | ||
4810 | flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); | ||
4811 | desc->oed_info.function_flags = cpu_to_be32(flags); | ||
4812 | desc->length = cpu_to_be32(sizeof(desc->oed_info)); | ||
4813 | } | ||
4814 | |||
4815 | |||
4816 | void | ||
4817 | lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc, | ||
4818 | uint8_t *page_a2) | ||
4819 | { | ||
4820 | uint32_t flags; | ||
4821 | |||
4822 | desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); | ||
4823 | |||
4824 | desc->oed_info.hi_alarm = | ||
4825 | cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]); | ||
4826 | desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]); | ||
4827 | desc->oed_info.hi_warning = | ||
4828 | cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]); | ||
4829 | desc->oed_info.lo_warning = | ||
4830 | cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]); | ||
4831 | flags = 0xf; /* All four are valid */ | ||
4832 | flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); | ||
4833 | desc->oed_info.function_flags = cpu_to_be32(flags); | ||
4834 | desc->length = cpu_to_be32(sizeof(desc->oed_info)); | ||
4835 | } | ||
4836 | |||
4837 | void | ||
4838 | lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, | ||
4839 | uint8_t *page_a0, struct lpfc_vport *vport) | ||
4840 | { | ||
4841 | desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); | ||
4842 | memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); | ||
4843 | memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); | ||
4844 | memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); | ||
4845 | memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); | ||
4846 | memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); | ||
4847 | desc->length = cpu_to_be32(sizeof(desc->opd_info)); | ||
4848 | } | ||
4849 | |||
4708 | int | 4850 | int |
4709 | lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) | 4851 | lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) |
4710 | { | 4852 | { |
@@ -4776,6 +4918,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) | |||
4776 | 4918 | ||
4777 | if (rdp_cap == 0) | 4919 | if (rdp_cap == 0) |
4778 | rdp_cap = RDP_CAP_UNKNOWN; | 4920 | rdp_cap = RDP_CAP_UNKNOWN; |
4921 | if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) | ||
4922 | rdp_cap |= RDP_CAP_USER_CONFIGURED; | ||
4779 | 4923 | ||
4780 | desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); | 4924 | desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); |
4781 | desc->length = cpu_to_be32(sizeof(desc->info)); | 4925 | desc->length = cpu_to_be32(sizeof(desc->info)); |
@@ -4875,6 +5019,19 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, | |||
4875 | lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); | 5019 | lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); |
4876 | lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, | 5020 | lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, |
4877 | vport, ndlp); | 5021 | vport, ndlp); |
5022 | lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat, | ||
5023 | vport); | ||
5024 | lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc, | ||
5025 | rdp_context->page_a2); | ||
5026 | lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc, | ||
5027 | rdp_context->page_a2); | ||
5028 | lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc, | ||
5029 | rdp_context->page_a2); | ||
5030 | lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc, | ||
5031 | rdp_context->page_a2); | ||
5032 | lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc, | ||
5033 | rdp_context->page_a2); | ||
5034 | lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport); | ||
4878 | fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, | 5035 | fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, |
4879 | &rdp_context->link_stat); | 5036 | &rdp_context->link_stat); |
4880 | rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); | 5037 | rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); |
@@ -7849,8 +8006,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
7849 | return; | 8006 | return; |
7850 | } | 8007 | } |
7851 | 8008 | ||
7852 | if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) && | 8009 | if ((phba->cfg_enable_SmartSAN || |
7853 | (vport->load_flag & FC_ALLOW_FDMI)) | 8010 | (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && |
8011 | (vport->load_flag & FC_ALLOW_FDMI)) | ||
7854 | lpfc_start_fdmi(vport); | 8012 | lpfc_start_fdmi(vport); |
7855 | } | 8013 | } |
7856 | 8014 | ||
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 25b5dcd1a5c8..ed223937798a 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -4545,7 +4545,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4545 | (!(vport->load_flag & FC_UNLOADING)) && | 4545 | (!(vport->load_flag & FC_UNLOADING)) && |
4546 | (bf_get(lpfc_sli_intf_if_type, | 4546 | (bf_get(lpfc_sli_intf_if_type, |
4547 | &phba->sli4_hba.sli_intf) == | 4547 | &phba->sli4_hba.sli_intf) == |
4548 | LPFC_SLI_INTF_IF_TYPE_2)) { | 4548 | LPFC_SLI_INTF_IF_TYPE_2) && |
4549 | (atomic_read(&ndlp->kref.refcount) > 0)) { | ||
4549 | mbox->context1 = lpfc_nlp_get(ndlp); | 4550 | mbox->context1 = lpfc_nlp_get(ndlp); |
4550 | mbox->mbox_cmpl = | 4551 | mbox->mbox_cmpl = |
4551 | lpfc_sli4_unreg_rpi_cmpl_clr; | 4552 | lpfc_sli4_unreg_rpi_cmpl_clr; |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index dd20412c7e4c..39f0fd000d2c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -1134,9 +1134,10 @@ struct fc_rdp_link_error_status_desc { | |||
1134 | #define RDP_PS_16GB 0x0400 | 1134 | #define RDP_PS_16GB 0x0400 |
1135 | #define RDP_PS_32GB 0x0200 | 1135 | #define RDP_PS_32GB 0x0200 |
1136 | 1136 | ||
1137 | #define RDP_CAP_UNKNOWN 0x0001 | 1137 | #define RDP_CAP_USER_CONFIGURED 0x0002 |
1138 | #define RDP_PS_UNKNOWN 0x0002 | 1138 | #define RDP_CAP_UNKNOWN 0x0001 |
1139 | #define RDP_PS_NOT_ESTABLISHED 0x0001 | 1139 | #define RDP_PS_UNKNOWN 0x0002 |
1140 | #define RDP_PS_NOT_ESTABLISHED 0x0001 | ||
1140 | 1141 | ||
1141 | struct fc_rdp_port_speed { | 1142 | struct fc_rdp_port_speed { |
1142 | uint16_t capabilities; | 1143 | uint16_t capabilities; |
@@ -1192,6 +1193,58 @@ struct fc_rdp_sfp_desc { | |||
1192 | struct fc_rdp_sfp_info sfp_info; | 1193 | struct fc_rdp_sfp_info sfp_info; |
1193 | }; | 1194 | }; |
1194 | 1195 | ||
1196 | /* Buffer Credit Descriptor */ | ||
1197 | struct fc_rdp_bbc_info { | ||
1198 | uint32_t port_bbc; /* FC_Port buffer-to-buffer credit */ | ||
1199 | uint32_t attached_port_bbc; | ||
1200 | uint32_t rtt; /* Round trip time */ | ||
1201 | }; | ||
1202 | #define RDP_BBC_DESC_TAG 0x00010006 | ||
1203 | struct fc_rdp_bbc_desc { | ||
1204 | uint32_t tag; | ||
1205 | uint32_t length; | ||
1206 | struct fc_rdp_bbc_info bbc_info; | ||
1207 | }; | ||
1208 | |||
1209 | #define RDP_OED_TEMPERATURE 0x1 | ||
1210 | #define RDP_OED_VOLTAGE 0x2 | ||
1211 | #define RDP_OED_TXBIAS 0x3 | ||
1212 | #define RDP_OED_TXPOWER 0x4 | ||
1213 | #define RDP_OED_RXPOWER 0x5 | ||
1214 | |||
1215 | #define RDP_OED_TYPE_SHIFT 28 | ||
1216 | /* Optical Element Data descriptor */ | ||
1217 | struct fc_rdp_oed_info { | ||
1218 | uint16_t hi_alarm; | ||
1219 | uint16_t lo_alarm; | ||
1220 | uint16_t hi_warning; | ||
1221 | uint16_t lo_warning; | ||
1222 | uint32_t function_flags; | ||
1223 | }; | ||
1224 | #define RDP_OED_DESC_TAG 0x00010007 | ||
1225 | struct fc_rdp_oed_sfp_desc { | ||
1226 | uint32_t tag; | ||
1227 | uint32_t length; | ||
1228 | struct fc_rdp_oed_info oed_info; | ||
1229 | }; | ||
1230 | |||
1231 | /* Optical Product Data descriptor */ | ||
1232 | struct fc_rdp_opd_sfp_info { | ||
1233 | uint8_t vendor_name[16]; | ||
1234 | uint8_t model_number[16]; | ||
1235 | uint8_t serial_number[16]; | ||
1236 | uint8_t reserved[2]; | ||
1237 | uint8_t revision[2]; | ||
1238 | uint8_t date[8]; | ||
1239 | }; | ||
1240 | |||
1241 | #define RDP_OPD_DESC_TAG 0x00010008 | ||
1242 | struct fc_rdp_opd_sfp_desc { | ||
1243 | uint32_t tag; | ||
1244 | uint32_t length; | ||
1245 | struct fc_rdp_opd_sfp_info opd_info; | ||
1246 | }; | ||
1247 | |||
1195 | struct fc_rdp_req_frame { | 1248 | struct fc_rdp_req_frame { |
1196 | uint32_t rdp_command; /* ELS command opcode (0x18)*/ | 1249 | uint32_t rdp_command; /* ELS command opcode (0x18)*/ |
1197 | uint32_t rdp_des_length; /* RDP Payload Word 1 */ | 1250 | uint32_t rdp_des_length; /* RDP Payload Word 1 */ |
@@ -1208,7 +1261,14 @@ struct fc_rdp_res_frame { | |||
1208 | struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ | 1261 | struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ |
1209 | struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ | 1262 | struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ |
1210 | struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ | 1263 | struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ |
1211 | struct fc_fec_rdp_desc fec_desc; /* FC Word 34 - 37 */ | 1264 | struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/ |
1265 | struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/ | ||
1266 | struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/ | ||
1267 | struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/ | ||
1268 | struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/ | ||
1269 | struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/ | ||
1270 | struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/ | ||
1271 | struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/ | ||
1212 | }; | 1272 | }; |
1213 | 1273 | ||
1214 | 1274 | ||
@@ -1216,7 +1276,10 @@ struct fc_rdp_res_frame { | |||
1216 | + sizeof(struct fc_rdp_sfp_desc) \ | 1276 | + sizeof(struct fc_rdp_sfp_desc) \ |
1217 | + sizeof(struct fc_rdp_port_speed_desc) \ | 1277 | + sizeof(struct fc_rdp_port_speed_desc) \ |
1218 | + sizeof(struct fc_rdp_link_error_status_desc) \ | 1278 | + sizeof(struct fc_rdp_link_error_status_desc) \ |
1219 | + (sizeof(struct fc_rdp_port_name_desc) * 2)) | 1279 | + (sizeof(struct fc_rdp_port_name_desc) * 2) \ |
1280 | + sizeof(struct fc_rdp_bbc_desc) \ | ||
1281 | + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \ | ||
1282 | + sizeof(struct fc_rdp_opd_sfp_desc)) | ||
1220 | 1283 | ||
1221 | 1284 | ||
1222 | /******** FDMI ********/ | 1285 | /******** FDMI ********/ |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 608f9415fb08..0c7070bf2813 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -2557,7 +2557,26 @@ struct lpfc_mbx_memory_dump_type3 { | |||
2557 | 2557 | ||
2558 | /* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ | 2558 | /* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ |
2559 | 2559 | ||
2560 | #define SSF_AW_THRESHOLDS 0 | 2560 | #define SSF_TEMP_HIGH_ALARM 0 |
2561 | #define SSF_TEMP_LOW_ALARM 2 | ||
2562 | #define SSF_TEMP_HIGH_WARNING 4 | ||
2563 | #define SSF_TEMP_LOW_WARNING 6 | ||
2564 | #define SSF_VOLTAGE_HIGH_ALARM 8 | ||
2565 | #define SSF_VOLTAGE_LOW_ALARM 10 | ||
2566 | #define SSF_VOLTAGE_HIGH_WARNING 12 | ||
2567 | #define SSF_VOLTAGE_LOW_WARNING 14 | ||
2568 | #define SSF_BIAS_HIGH_ALARM 16 | ||
2569 | #define SSF_BIAS_LOW_ALARM 18 | ||
2570 | #define SSF_BIAS_HIGH_WARNING 20 | ||
2571 | #define SSF_BIAS_LOW_WARNING 22 | ||
2572 | #define SSF_TXPOWER_HIGH_ALARM 24 | ||
2573 | #define SSF_TXPOWER_LOW_ALARM 26 | ||
2574 | #define SSF_TXPOWER_HIGH_WARNING 28 | ||
2575 | #define SSF_TXPOWER_LOW_WARNING 30 | ||
2576 | #define SSF_RXPOWER_HIGH_ALARM 32 | ||
2577 | #define SSF_RXPOWER_LOW_ALARM 34 | ||
2578 | #define SSF_RXPOWER_HIGH_WARNING 36 | ||
2579 | #define SSF_RXPOWER_LOW_WARNING 38 | ||
2561 | #define SSF_EXT_CAL_CONSTANTS 56 | 2580 | #define SSF_EXT_CAL_CONSTANTS 56 |
2562 | #define SSF_CC_DMI 95 | 2581 | #define SSF_CC_DMI 95 |
2563 | #define SFF_TEMPERATURE_B1 96 | 2582 | #define SFF_TEMPERATURE_B1 96 |
@@ -2865,6 +2884,9 @@ struct lpfc_sli4_parameters { | |||
2865 | uint32_t word17; | 2884 | uint32_t word17; |
2866 | uint32_t word18; | 2885 | uint32_t word18; |
2867 | uint32_t word19; | 2886 | uint32_t word19; |
2887 | #define cfg_ext_embed_cb_SHIFT 0 | ||
2888 | #define cfg_ext_embed_cb_MASK 0x00000001 | ||
2889 | #define cfg_ext_embed_cb_WORD word19 | ||
2868 | }; | 2890 | }; |
2869 | 2891 | ||
2870 | struct lpfc_mbx_get_sli4_parameters { | 2892 | struct lpfc_mbx_get_sli4_parameters { |
@@ -3919,6 +3941,9 @@ union lpfc_wqe { | |||
3919 | union lpfc_wqe128 { | 3941 | union lpfc_wqe128 { |
3920 | uint32_t words[32]; | 3942 | uint32_t words[32]; |
3921 | struct lpfc_wqe_generic generic; | 3943 | struct lpfc_wqe_generic generic; |
3944 | struct fcp_icmnd64_wqe fcp_icmd; | ||
3945 | struct fcp_iread64_wqe fcp_iread; | ||
3946 | struct fcp_iwrite64_wqe fcp_iwrite; | ||
3922 | struct xmit_seq64_wqe xmit_sequence; | 3947 | struct xmit_seq64_wqe xmit_sequence; |
3923 | struct gen_req64_wqe gen_req; | 3948 | struct gen_req64_wqe gen_req; |
3924 | }; | 3949 | }; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f57d02c3b6cf..b43f7ac9812c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -6158,11 +6158,12 @@ lpfc_create_shost(struct lpfc_hba *phba) | |||
6158 | * any initial discovery should be completed. | 6158 | * any initial discovery should be completed. |
6159 | */ | 6159 | */ |
6160 | vport->load_flag |= FC_ALLOW_FDMI; | 6160 | vport->load_flag |= FC_ALLOW_FDMI; |
6161 | if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { | 6161 | if (phba->cfg_enable_SmartSAN || |
6162 | (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { | ||
6162 | 6163 | ||
6163 | /* Setup appropriate attribute masks */ | 6164 | /* Setup appropriate attribute masks */ |
6164 | vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; | 6165 | vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; |
6165 | if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) | 6166 | if (phba->cfg_enable_SmartSAN) |
6166 | vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; | 6167 | vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; |
6167 | else | 6168 | else |
6168 | vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; | 6169 | vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; |
@@ -7264,8 +7265,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
7264 | phba->sli4_hba.fcp_cq[idx] = qdesc; | 7265 | phba->sli4_hba.fcp_cq[idx] = qdesc; |
7265 | 7266 | ||
7266 | /* Create Fast Path FCP WQs */ | 7267 | /* Create Fast Path FCP WQs */ |
7267 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, | 7268 | if (phba->fcp_embed_io) { |
7268 | phba->sli4_hba.wq_ecount); | 7269 | qdesc = lpfc_sli4_queue_alloc(phba, |
7270 | LPFC_WQE128_SIZE, | ||
7271 | LPFC_WQE128_DEF_COUNT); | ||
7272 | } else { | ||
7273 | qdesc = lpfc_sli4_queue_alloc(phba, | ||
7274 | phba->sli4_hba.wq_esize, | ||
7275 | phba->sli4_hba.wq_ecount); | ||
7276 | } | ||
7269 | if (!qdesc) { | 7277 | if (!qdesc) { |
7270 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7278 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
7271 | "0503 Failed allocate fast-path FCP " | 7279 | "0503 Failed allocate fast-path FCP " |
@@ -9510,6 +9518,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
9510 | if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) | 9518 | if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) |
9511 | sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; | 9519 | sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; |
9512 | 9520 | ||
9521 | /* | ||
9522 | * Issue IOs with CDB embedded in WQE to minimized the number | ||
9523 | * of DMAs the firmware has to do. Setting this to 1 also forces | ||
9524 | * the driver to use 128 bytes WQEs for FCP IOs. | ||
9525 | */ | ||
9526 | if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) | ||
9527 | phba->fcp_embed_io = 1; | ||
9528 | else | ||
9529 | phba->fcp_embed_io = 0; | ||
9513 | return 0; | 9530 | return 0; |
9514 | } | 9531 | } |
9515 | 9532 | ||
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index f87f90e9b7df..12dbe99ccc50 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) | |||
2145 | reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); | 2145 | reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); |
2146 | reg_vfi->e_d_tov = phba->fc_edtov; | 2146 | reg_vfi->e_d_tov = phba->fc_edtov; |
2147 | reg_vfi->r_a_tov = phba->fc_ratov; | 2147 | reg_vfi->r_a_tov = phba->fc_ratov; |
2148 | reg_vfi->bde.addrHigh = putPaddrHigh(phys); | 2148 | if (phys) { |
2149 | reg_vfi->bde.addrLow = putPaddrLow(phys); | 2149 | reg_vfi->bde.addrHigh = putPaddrHigh(phys); |
2150 | reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); | 2150 | reg_vfi->bde.addrLow = putPaddrLow(phys); |
2151 | reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 2151 | reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); |
2152 | reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
2153 | } | ||
2152 | bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); | 2154 | bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); |
2153 | 2155 | ||
2154 | /* Only FC supports upd bit */ | 2156 | /* Only FC supports upd bit */ |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 4fb3581d4614..3fa65338d3f5 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -231,13 +231,15 @@ lpfc_mem_free(struct lpfc_hba *phba) | |||
231 | if (phba->lpfc_hbq_pool) | 231 | if (phba->lpfc_hbq_pool) |
232 | pci_pool_destroy(phba->lpfc_hbq_pool); | 232 | pci_pool_destroy(phba->lpfc_hbq_pool); |
233 | phba->lpfc_hbq_pool = NULL; | 233 | phba->lpfc_hbq_pool = NULL; |
234 | mempool_destroy(phba->rrq_pool); | 234 | |
235 | if (phba->rrq_pool) | ||
236 | mempool_destroy(phba->rrq_pool); | ||
235 | phba->rrq_pool = NULL; | 237 | phba->rrq_pool = NULL; |
236 | 238 | ||
237 | /* Free NLP memory pool */ | 239 | /* Free NLP memory pool */ |
238 | mempool_destroy(phba->nlp_mem_pool); | 240 | mempool_destroy(phba->nlp_mem_pool); |
239 | phba->nlp_mem_pool = NULL; | 241 | phba->nlp_mem_pool = NULL; |
240 | if (phba->sli_rev == LPFC_SLI_REV4) { | 242 | if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { |
241 | mempool_destroy(phba->active_rrq_pool); | 243 | mempool_destroy(phba->active_rrq_pool); |
242 | phba->active_rrq_pool = NULL; | 244 | phba->active_rrq_pool = NULL; |
243 | } | 245 | } |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 193733e8c823..56a3df4fddb0 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1512,6 +1512,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, | |||
1512 | if ((mb = phba->sli.mbox_active)) { | 1512 | if ((mb = phba->sli.mbox_active)) { |
1513 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && | 1513 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
1514 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1514 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
1515 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; | ||
1515 | lpfc_nlp_put(ndlp); | 1516 | lpfc_nlp_put(ndlp); |
1516 | mb->context2 = NULL; | 1517 | mb->context2 = NULL; |
1517 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 1518 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
@@ -1527,6 +1528,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, | |||
1527 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1528 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1528 | kfree(mp); | 1529 | kfree(mp); |
1529 | } | 1530 | } |
1531 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; | ||
1530 | lpfc_nlp_put(ndlp); | 1532 | lpfc_nlp_put(ndlp); |
1531 | list_del(&mb->list); | 1533 | list_del(&mb->list); |
1532 | phba->sli.mboxq_cnt--; | 1534 | phba->sli.mboxq_cnt--; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2207726b88ee..70edf21ae1b9 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -2000,10 +2000,9 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list) | |||
2000 | * @phba: Pointer to HBA context object. | 2000 | * @phba: Pointer to HBA context object. |
2001 | * @tag: Tag of the hbq buffer. | 2001 | * @tag: Tag of the hbq buffer. |
2002 | * | 2002 | * |
2003 | * This function is called with hbalock held. This function searches | 2003 | * This function searches for the hbq buffer associated with the given tag in |
2004 | * for the hbq buffer associated with the given tag in the hbq buffer | 2004 | * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer |
2005 | * list. If it finds the hbq buffer, it returns the hbq_buffer other wise | 2005 | * otherwise it returns NULL. |
2006 | * it returns NULL. | ||
2007 | **/ | 2006 | **/ |
2008 | static struct hbq_dmabuf * | 2007 | static struct hbq_dmabuf * |
2009 | lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) | 2008 | lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) |
@@ -2012,8 +2011,6 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) | |||
2012 | struct hbq_dmabuf *hbq_buf; | 2011 | struct hbq_dmabuf *hbq_buf; |
2013 | uint32_t hbqno; | 2012 | uint32_t hbqno; |
2014 | 2013 | ||
2015 | lockdep_assert_held(&phba->hbalock); | ||
2016 | |||
2017 | hbqno = tag >> 16; | 2014 | hbqno = tag >> 16; |
2018 | if (hbqno >= LPFC_MAX_HBQS) | 2015 | if (hbqno >= LPFC_MAX_HBQS) |
2019 | return NULL; | 2016 | return NULL; |
@@ -2211,6 +2208,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2211 | rpi = pmb->u.mb.un.varWords[0]; | 2208 | rpi = pmb->u.mb.un.varWords[0]; |
2212 | vpi = pmb->u.mb.un.varRegLogin.vpi; | 2209 | vpi = pmb->u.mb.un.varRegLogin.vpi; |
2213 | lpfc_unreg_login(phba, vpi, rpi, pmb); | 2210 | lpfc_unreg_login(phba, vpi, rpi, pmb); |
2211 | pmb->vport = vport; | ||
2214 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 2212 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
2215 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | 2213 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
2216 | if (rc != MBX_NOT_FINISHED) | 2214 | if (rc != MBX_NOT_FINISHED) |
@@ -4688,6 +4686,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) | |||
4688 | 4686 | ||
4689 | break; | 4687 | break; |
4690 | } | 4688 | } |
4689 | phba->fcp_embed_io = 0; /* SLI4 FC support only */ | ||
4691 | 4690 | ||
4692 | rc = lpfc_sli_config_port(phba, mode); | 4691 | rc = lpfc_sli_config_port(phba, mode); |
4693 | 4692 | ||
@@ -6320,10 +6319,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
6320 | 6319 | ||
6321 | mqe = &mboxq->u.mqe; | 6320 | mqe = &mboxq->u.mqe; |
6322 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); | 6321 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
6323 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) | 6322 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { |
6324 | phba->hba_flag |= HBA_FCOE_MODE; | 6323 | phba->hba_flag |= HBA_FCOE_MODE; |
6325 | else | 6324 | phba->fcp_embed_io = 0; /* SLI4 FC support only */ |
6325 | } else { | ||
6326 | phba->hba_flag &= ~HBA_FCOE_MODE; | 6326 | phba->hba_flag &= ~HBA_FCOE_MODE; |
6327 | } | ||
6327 | 6328 | ||
6328 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == | 6329 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == |
6329 | LPFC_DCBX_CEE_MODE) | 6330 | LPFC_DCBX_CEE_MODE) |
@@ -8218,12 +8219,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8218 | else | 8219 | else |
8219 | command_type = ELS_COMMAND_NON_FIP; | 8220 | command_type = ELS_COMMAND_NON_FIP; |
8220 | 8221 | ||
8222 | if (phba->fcp_embed_io) | ||
8223 | memset(wqe, 0, sizeof(union lpfc_wqe128)); | ||
8221 | /* Some of the fields are in the right position already */ | 8224 | /* Some of the fields are in the right position already */ |
8222 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | 8225 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
8223 | abort_tag = (uint32_t) iocbq->iotag; | ||
8224 | xritag = iocbq->sli4_xritag; | ||
8225 | wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ | 8226 | wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ |
8226 | wqe->generic.wqe_com.word10 = 0; | 8227 | wqe->generic.wqe_com.word10 = 0; |
8228 | |||
8229 | abort_tag = (uint32_t) iocbq->iotag; | ||
8230 | xritag = iocbq->sli4_xritag; | ||
8227 | /* words0-2 bpl convert bde */ | 8231 | /* words0-2 bpl convert bde */ |
8228 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { | 8232 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { |
8229 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | 8233 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / |
@@ -8372,11 +8376,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8372 | iocbq->iocb.ulpFCP2Rcvy); | 8376 | iocbq->iocb.ulpFCP2Rcvy); |
8373 | bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); | 8377 | bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); |
8374 | /* Always open the exchange */ | 8378 | /* Always open the exchange */ |
8375 | bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); | ||
8376 | bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); | 8379 | bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); |
8377 | bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, | 8380 | bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, |
8378 | LPFC_WQE_LENLOC_WORD4); | 8381 | LPFC_WQE_LENLOC_WORD4); |
8379 | bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); | ||
8380 | bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); | 8382 | bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); |
8381 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); | 8383 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); |
8382 | if (iocbq->iocb_flag & LPFC_IO_OAS) { | 8384 | if (iocbq->iocb_flag & LPFC_IO_OAS) { |
@@ -8387,6 +8389,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8387 | (phba->cfg_XLanePriority << 1)); | 8389 | (phba->cfg_XLanePriority << 1)); |
8388 | } | 8390 | } |
8389 | } | 8391 | } |
8392 | /* Note, word 10 is already initialized to 0 */ | ||
8393 | |||
8394 | if (phba->fcp_embed_io) { | ||
8395 | struct lpfc_scsi_buf *lpfc_cmd; | ||
8396 | struct sli4_sge *sgl; | ||
8397 | union lpfc_wqe128 *wqe128; | ||
8398 | struct fcp_cmnd *fcp_cmnd; | ||
8399 | uint32_t *ptr; | ||
8400 | |||
8401 | /* 128 byte wqe support here */ | ||
8402 | wqe128 = (union lpfc_wqe128 *)wqe; | ||
8403 | |||
8404 | lpfc_cmd = iocbq->context1; | ||
8405 | sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
8406 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
8407 | |||
8408 | /* Word 0-2 - FCP_CMND */ | ||
8409 | wqe128->generic.bde.tus.f.bdeFlags = | ||
8410 | BUFF_TYPE_BDE_IMMED; | ||
8411 | wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; | ||
8412 | wqe128->generic.bde.addrHigh = 0; | ||
8413 | wqe128->generic.bde.addrLow = 88; /* Word 22 */ | ||
8414 | |||
8415 | bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1); | ||
8416 | |||
8417 | /* Word 22-29 FCP CMND Payload */ | ||
8418 | ptr = &wqe128->words[22]; | ||
8419 | memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); | ||
8420 | } | ||
8390 | break; | 8421 | break; |
8391 | case CMD_FCP_IREAD64_CR: | 8422 | case CMD_FCP_IREAD64_CR: |
8392 | /* word3 iocb=iotag wqe=payload_offset_len */ | 8423 | /* word3 iocb=iotag wqe=payload_offset_len */ |
@@ -8401,11 +8432,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8401 | iocbq->iocb.ulpFCP2Rcvy); | 8432 | iocbq->iocb.ulpFCP2Rcvy); |
8402 | bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); | 8433 | bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); |
8403 | /* Always open the exchange */ | 8434 | /* Always open the exchange */ |
8404 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | ||
8405 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); | 8435 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); |
8406 | bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, | 8436 | bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, |
8407 | LPFC_WQE_LENLOC_WORD4); | 8437 | LPFC_WQE_LENLOC_WORD4); |
8408 | bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); | ||
8409 | bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); | 8438 | bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); |
8410 | bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); | 8439 | bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); |
8411 | if (iocbq->iocb_flag & LPFC_IO_OAS) { | 8440 | if (iocbq->iocb_flag & LPFC_IO_OAS) { |
@@ -8416,6 +8445,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8416 | (phba->cfg_XLanePriority << 1)); | 8445 | (phba->cfg_XLanePriority << 1)); |
8417 | } | 8446 | } |
8418 | } | 8447 | } |
8448 | /* Note, word 10 is already initialized to 0 */ | ||
8449 | |||
8450 | if (phba->fcp_embed_io) { | ||
8451 | struct lpfc_scsi_buf *lpfc_cmd; | ||
8452 | struct sli4_sge *sgl; | ||
8453 | union lpfc_wqe128 *wqe128; | ||
8454 | struct fcp_cmnd *fcp_cmnd; | ||
8455 | uint32_t *ptr; | ||
8456 | |||
8457 | /* 128 byte wqe support here */ | ||
8458 | wqe128 = (union lpfc_wqe128 *)wqe; | ||
8459 | |||
8460 | lpfc_cmd = iocbq->context1; | ||
8461 | sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
8462 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
8463 | |||
8464 | /* Word 0-2 - FCP_CMND */ | ||
8465 | wqe128->generic.bde.tus.f.bdeFlags = | ||
8466 | BUFF_TYPE_BDE_IMMED; | ||
8467 | wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; | ||
8468 | wqe128->generic.bde.addrHigh = 0; | ||
8469 | wqe128->generic.bde.addrLow = 88; /* Word 22 */ | ||
8470 | |||
8471 | bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1); | ||
8472 | |||
8473 | /* Word 22-29 FCP CMND Payload */ | ||
8474 | ptr = &wqe128->words[22]; | ||
8475 | memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); | ||
8476 | } | ||
8419 | break; | 8477 | break; |
8420 | case CMD_FCP_ICMND64_CR: | 8478 | case CMD_FCP_ICMND64_CR: |
8421 | /* word3 iocb=iotag wqe=payload_offset_len */ | 8479 | /* word3 iocb=iotag wqe=payload_offset_len */ |
@@ -8427,13 +8485,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8427 | /* word3 iocb=IO_TAG wqe=reserved */ | 8485 | /* word3 iocb=IO_TAG wqe=reserved */ |
8428 | bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); | 8486 | bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); |
8429 | /* Always open the exchange */ | 8487 | /* Always open the exchange */ |
8430 | bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); | ||
8431 | bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); | 8488 | bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); |
8432 | bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); | 8489 | bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); |
8433 | bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); | 8490 | bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); |
8434 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, | 8491 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, |
8435 | LPFC_WQE_LENLOC_NONE); | 8492 | LPFC_WQE_LENLOC_NONE); |
8436 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); | ||
8437 | bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, | 8493 | bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, |
8438 | iocbq->iocb.ulpFCP2Rcvy); | 8494 | iocbq->iocb.ulpFCP2Rcvy); |
8439 | if (iocbq->iocb_flag & LPFC_IO_OAS) { | 8495 | if (iocbq->iocb_flag & LPFC_IO_OAS) { |
@@ -8444,6 +8500,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8444 | (phba->cfg_XLanePriority << 1)); | 8500 | (phba->cfg_XLanePriority << 1)); |
8445 | } | 8501 | } |
8446 | } | 8502 | } |
8503 | /* Note, word 10 is already initialized to 0 */ | ||
8504 | |||
8505 | if (phba->fcp_embed_io) { | ||
8506 | struct lpfc_scsi_buf *lpfc_cmd; | ||
8507 | struct sli4_sge *sgl; | ||
8508 | union lpfc_wqe128 *wqe128; | ||
8509 | struct fcp_cmnd *fcp_cmnd; | ||
8510 | uint32_t *ptr; | ||
8511 | |||
8512 | /* 128 byte wqe support here */ | ||
8513 | wqe128 = (union lpfc_wqe128 *)wqe; | ||
8514 | |||
8515 | lpfc_cmd = iocbq->context1; | ||
8516 | sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
8517 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
8518 | |||
8519 | /* Word 0-2 - FCP_CMND */ | ||
8520 | wqe128->generic.bde.tus.f.bdeFlags = | ||
8521 | BUFF_TYPE_BDE_IMMED; | ||
8522 | wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; | ||
8523 | wqe128->generic.bde.addrHigh = 0; | ||
8524 | wqe128->generic.bde.addrLow = 88; /* Word 22 */ | ||
8525 | |||
8526 | bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1); | ||
8527 | |||
8528 | /* Word 22-29 FCP CMND Payload */ | ||
8529 | ptr = &wqe128->words[22]; | ||
8530 | memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); | ||
8531 | } | ||
8447 | break; | 8532 | break; |
8448 | case CMD_GEN_REQUEST64_CR: | 8533 | case CMD_GEN_REQUEST64_CR: |
8449 | /* For this command calculate the xmit length of the | 8534 | /* For this command calculate the xmit length of the |
@@ -8675,12 +8760,19 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
8675 | struct lpfc_iocbq *piocb, uint32_t flag) | 8760 | struct lpfc_iocbq *piocb, uint32_t flag) |
8676 | { | 8761 | { |
8677 | struct lpfc_sglq *sglq; | 8762 | struct lpfc_sglq *sglq; |
8678 | union lpfc_wqe wqe; | 8763 | union lpfc_wqe *wqe; |
8764 | union lpfc_wqe128 wqe128; | ||
8679 | struct lpfc_queue *wq; | 8765 | struct lpfc_queue *wq; |
8680 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; | 8766 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; |
8681 | 8767 | ||
8682 | lockdep_assert_held(&phba->hbalock); | 8768 | lockdep_assert_held(&phba->hbalock); |
8683 | 8769 | ||
8770 | /* | ||
8771 | * The WQE can be either 64 or 128 bytes, | ||
8772 | * so allocate space on the stack assuming the largest. | ||
8773 | */ | ||
8774 | wqe = (union lpfc_wqe *)&wqe128; | ||
8775 | |||
8684 | if (piocb->sli4_xritag == NO_XRI) { | 8776 | if (piocb->sli4_xritag == NO_XRI) { |
8685 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | 8777 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
8686 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | 8778 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) |
@@ -8727,7 +8819,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
8727 | return IOCB_ERROR; | 8819 | return IOCB_ERROR; |
8728 | } | 8820 | } |
8729 | 8821 | ||
8730 | if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) | 8822 | if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) |
8731 | return IOCB_ERROR; | 8823 | return IOCB_ERROR; |
8732 | 8824 | ||
8733 | if ((piocb->iocb_flag & LPFC_IO_FCP) || | 8825 | if ((piocb->iocb_flag & LPFC_IO_FCP) || |
@@ -8737,12 +8829,12 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
8737 | } else { | 8829 | } else { |
8738 | wq = phba->sli4_hba.oas_wq; | 8830 | wq = phba->sli4_hba.oas_wq; |
8739 | } | 8831 | } |
8740 | if (lpfc_sli4_wq_put(wq, &wqe)) | 8832 | if (lpfc_sli4_wq_put(wq, wqe)) |
8741 | return IOCB_ERROR; | 8833 | return IOCB_ERROR; |
8742 | } else { | 8834 | } else { |
8743 | if (unlikely(!phba->sli4_hba.els_wq)) | 8835 | if (unlikely(!phba->sli4_hba.els_wq)) |
8744 | return IOCB_ERROR; | 8836 | return IOCB_ERROR; |
8745 | if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) | 8837 | if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) |
8746 | return IOCB_ERROR; | 8838 | return IOCB_ERROR; |
8747 | } | 8839 | } |
8748 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb); | 8840 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb); |
@@ -8757,9 +8849,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
8757 | * pointer from the lpfc_hba struct. | 8849 | * pointer from the lpfc_hba struct. |
8758 | * | 8850 | * |
8759 | * Return codes: | 8851 | * Return codes: |
8760 | * IOCB_ERROR - Error | 8852 | * IOCB_ERROR - Error |
8761 | * IOCB_SUCCESS - Success | 8853 | * IOCB_SUCCESS - Success |
8762 | * IOCB_BUSY - Busy | 8854 | * IOCB_BUSY - Busy |
8763 | **/ | 8855 | **/ |
8764 | int | 8856 | int |
8765 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, | 8857 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4dc22562aaf1..fa0d531bf351 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2015 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "11.0.0.10." | 21 | #define LPFC_DRIVER_VERSION "11.1.0.0." |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | 23 | ||
24 | /* Used for SLI 2/3 */ | 24 | /* Used for SLI 2/3 */ |
@@ -30,4 +30,4 @@ | |||
30 | 30 | ||
31 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ | 31 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ |
32 | LPFC_DRIVER_VERSION | 32 | LPFC_DRIVER_VERSION |
33 | #define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved." | 33 | #define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved." |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index b3f85def18cc..c27f4b724547 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2013 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -395,7 +395,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
395 | 395 | ||
396 | /* At this point we are fully registered with SCSI Layer. */ | 396 | /* At this point we are fully registered with SCSI Layer. */ |
397 | vport->load_flag |= FC_ALLOW_FDMI; | 397 | vport->load_flag |= FC_ALLOW_FDMI; |
398 | if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { | 398 | if (phba->cfg_enable_SmartSAN || |
399 | (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { | ||
399 | /* Setup appropriate attribute masks */ | 400 | /* Setup appropriate attribute masks */ |
400 | vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; | 401 | vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; |
401 | vport->fdmi_port_mask = phba->pport->fdmi_port_mask; | 402 | vport->fdmi_port_mask = phba->pport->fdmi_port_mask; |
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index bb2381314a2b..a590089b9397 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c | |||
@@ -28,24 +28,23 @@ | |||
28 | 28 | ||
29 | /* Definitions for the core NCR5380 driver. */ | 29 | /* Definitions for the core NCR5380 driver. */ |
30 | 30 | ||
31 | #define PSEUDO_DMA | 31 | #define NCR5380_implementation_fields unsigned char *pdma_base; \ |
32 | 32 | int pdma_residual | |
33 | #define NCR5380_implementation_fields unsigned char *pdma_base | ||
34 | 33 | ||
35 | #define NCR5380_read(reg) macscsi_read(instance, reg) | 34 | #define NCR5380_read(reg) macscsi_read(instance, reg) |
36 | #define NCR5380_write(reg, value) macscsi_write(instance, reg, value) | 35 | #define NCR5380_write(reg, value) macscsi_write(instance, reg, value) |
37 | 36 | ||
38 | #define NCR5380_pread macscsi_pread | 37 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ |
39 | #define NCR5380_pwrite macscsi_pwrite | 38 | macscsi_dma_xfer_len(instance, cmd) |
40 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) | 39 | #define NCR5380_dma_recv_setup macscsi_pread |
40 | #define NCR5380_dma_send_setup macscsi_pwrite | ||
41 | #define NCR5380_dma_residual(instance) (hostdata->pdma_residual) | ||
41 | 42 | ||
42 | #define NCR5380_intr macscsi_intr | 43 | #define NCR5380_intr macscsi_intr |
43 | #define NCR5380_queue_command macscsi_queue_command | 44 | #define NCR5380_queue_command macscsi_queue_command |
44 | #define NCR5380_abort macscsi_abort | 45 | #define NCR5380_abort macscsi_abort |
45 | #define NCR5380_bus_reset macscsi_bus_reset | 46 | #define NCR5380_bus_reset macscsi_bus_reset |
46 | #define NCR5380_info macscsi_info | 47 | #define NCR5380_info macscsi_info |
47 | #define NCR5380_show_info macscsi_show_info | ||
48 | #define NCR5380_write_info macscsi_write_info | ||
49 | 48 | ||
50 | #include "NCR5380.h" | 49 | #include "NCR5380.h" |
51 | 50 | ||
@@ -57,8 +56,6 @@ static int setup_sg_tablesize = -1; | |||
57 | module_param(setup_sg_tablesize, int, 0); | 56 | module_param(setup_sg_tablesize, int, 0); |
58 | static int setup_use_pdma = -1; | 57 | static int setup_use_pdma = -1; |
59 | module_param(setup_use_pdma, int, 0); | 58 | module_param(setup_use_pdma, int, 0); |
60 | static int setup_use_tagged_queuing = -1; | ||
61 | module_param(setup_use_tagged_queuing, int, 0); | ||
62 | static int setup_hostid = -1; | 59 | static int setup_hostid = -1; |
63 | module_param(setup_hostid, int, 0); | 60 | module_param(setup_hostid, int, 0); |
64 | static int setup_toshiba_delay = -1; | 61 | static int setup_toshiba_delay = -1; |
@@ -97,8 +94,7 @@ static int __init mac_scsi_setup(char *str) | |||
97 | setup_sg_tablesize = ints[3]; | 94 | setup_sg_tablesize = ints[3]; |
98 | if (ints[0] >= 4) | 95 | if (ints[0] >= 4) |
99 | setup_hostid = ints[4]; | 96 | setup_hostid = ints[4]; |
100 | if (ints[0] >= 5) | 97 | /* ints[5] (use_tagged_queuing) is ignored */ |
101 | setup_use_tagged_queuing = ints[5]; | ||
102 | if (ints[0] >= 6) | 98 | if (ints[0] >= 6) |
103 | setup_use_pdma = ints[6]; | 99 | setup_use_pdma = ints[6]; |
104 | if (ints[0] >= 7) | 100 | if (ints[0] >= 7) |
@@ -109,19 +105,9 @@ static int __init mac_scsi_setup(char *str) | |||
109 | __setup("mac5380=", mac_scsi_setup); | 105 | __setup("mac5380=", mac_scsi_setup); |
110 | #endif /* !MODULE */ | 106 | #endif /* !MODULE */ |
111 | 107 | ||
112 | #ifdef PSEUDO_DMA | 108 | /* Pseudo DMA asm originally by Ove Edlund */ |
113 | /* | 109 | |
114 | Pseudo-DMA: (Ove Edlund) | 110 | #define CP_IO_TO_MEM(s,d,n) \ |
115 | The code attempts to catch bus errors that occur if one for example | ||
116 | "trips over the cable". | ||
117 | XXX: Since bus errors in the PDMA routines never happen on my | ||
118 | computer, the bus error code is untested. | ||
119 | If the code works as intended, a bus error results in Pseudo-DMA | ||
120 | being disabled, meaning that the driver switches to slow handshake. | ||
121 | If bus errors are NOT extremely rare, this has to be changed. | ||
122 | */ | ||
123 | |||
124 | #define CP_IO_TO_MEM(s,d,len) \ | ||
125 | __asm__ __volatile__ \ | 111 | __asm__ __volatile__ \ |
126 | (" cmp.w #4,%2\n" \ | 112 | (" cmp.w #4,%2\n" \ |
127 | " bls 8f\n" \ | 113 | " bls 8f\n" \ |
@@ -158,61 +144,73 @@ __asm__ __volatile__ \ | |||
158 | " 9: \n" \ | 144 | " 9: \n" \ |
159 | ".section .fixup,\"ax\"\n" \ | 145 | ".section .fixup,\"ax\"\n" \ |
160 | " .even\n" \ | 146 | " .even\n" \ |
161 | "90: moveq.l #1, %2\n" \ | 147 | "91: moveq.l #1, %2\n" \ |
148 | " jra 9b\n" \ | ||
149 | "94: moveq.l #4, %2\n" \ | ||
162 | " jra 9b\n" \ | 150 | " jra 9b\n" \ |
163 | ".previous\n" \ | 151 | ".previous\n" \ |
164 | ".section __ex_table,\"a\"\n" \ | 152 | ".section __ex_table,\"a\"\n" \ |
165 | " .align 4\n" \ | 153 | " .align 4\n" \ |
166 | " .long 1b,90b\n" \ | 154 | " .long 1b,91b\n" \ |
167 | " .long 3b,90b\n" \ | 155 | " .long 3b,94b\n" \ |
168 | " .long 31b,90b\n" \ | 156 | " .long 31b,94b\n" \ |
169 | " .long 32b,90b\n" \ | 157 | " .long 32b,94b\n" \ |
170 | " .long 33b,90b\n" \ | 158 | " .long 33b,94b\n" \ |
171 | " .long 34b,90b\n" \ | 159 | " .long 34b,94b\n" \ |
172 | " .long 35b,90b\n" \ | 160 | " .long 35b,94b\n" \ |
173 | " .long 36b,90b\n" \ | 161 | " .long 36b,94b\n" \ |
174 | " .long 37b,90b\n" \ | 162 | " .long 37b,94b\n" \ |
175 | " .long 5b,90b\n" \ | 163 | " .long 5b,94b\n" \ |
176 | " .long 7b,90b\n" \ | 164 | " .long 7b,91b\n" \ |
177 | ".previous" \ | 165 | ".previous" \ |
178 | : "=a"(s), "=a"(d), "=d"(len) \ | 166 | : "=a"(s), "=a"(d), "=d"(n) \ |
179 | : "0"(s), "1"(d), "2"(len) \ | 167 | : "0"(s), "1"(d), "2"(n) \ |
180 | : "d0") | 168 | : "d0") |
181 | 169 | ||
182 | static int macscsi_pread(struct Scsi_Host *instance, | 170 | static int macscsi_pread(struct Scsi_Host *instance, |
183 | unsigned char *dst, int len) | 171 | unsigned char *dst, int len) |
184 | { | 172 | { |
185 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 173 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
186 | unsigned char *d; | 174 | unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4); |
187 | unsigned char *s; | 175 | unsigned char *d = dst; |
188 | 176 | int n = len; | |
189 | s = hostdata->pdma_base + (INPUT_DATA_REG << 4); | 177 | int transferred; |
190 | d = dst; | 178 | |
191 | 179 | while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, | |
192 | /* These conditions are derived from MacOS */ | 180 | BASR_DRQ | BASR_PHASE_MATCH, |
193 | 181 | BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { | |
194 | while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && | 182 | CP_IO_TO_MEM(s, d, n); |
195 | !(NCR5380_read(STATUS_REG) & SR_REQ)) | 183 | |
196 | ; | 184 | transferred = d - dst - n; |
197 | 185 | hostdata->pdma_residual = len - transferred; | |
198 | if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && | 186 | |
199 | (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { | 187 | /* No bus error. */ |
200 | pr_err("Error in macscsi_pread\n"); | 188 | if (n == 0) |
201 | return -1; | 189 | return 0; |
190 | |||
191 | /* Target changed phase early? */ | ||
192 | if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, | ||
193 | BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) | ||
194 | scmd_printk(KERN_ERR, hostdata->connected, | ||
195 | "%s: !REQ and !ACK\n", __func__); | ||
196 | if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) | ||
197 | return 0; | ||
198 | |||
199 | dsprintk(NDEBUG_PSEUDO_DMA, instance, | ||
200 | "%s: bus error (%d/%d)\n", __func__, transferred, len); | ||
201 | NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); | ||
202 | d = dst + transferred; | ||
203 | n = len - transferred; | ||
202 | } | 204 | } |
203 | 205 | ||
204 | CP_IO_TO_MEM(s, d, len); | 206 | scmd_printk(KERN_ERR, hostdata->connected, |
205 | 207 | "%s: phase mismatch or !DRQ\n", __func__); | |
206 | if (len != 0) { | 208 | NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); |
207 | pr_notice("Bus error in macscsi_pread\n"); | 209 | return -1; |
208 | return -1; | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | 210 | } |
213 | 211 | ||
214 | 212 | ||
215 | #define CP_MEM_TO_IO(s,d,len) \ | 213 | #define CP_MEM_TO_IO(s,d,n) \ |
216 | __asm__ __volatile__ \ | 214 | __asm__ __volatile__ \ |
217 | (" cmp.w #4,%2\n" \ | 215 | (" cmp.w #4,%2\n" \ |
218 | " bls 8f\n" \ | 216 | " bls 8f\n" \ |
@@ -249,59 +247,89 @@ __asm__ __volatile__ \ | |||
249 | " 9: \n" \ | 247 | " 9: \n" \ |
250 | ".section .fixup,\"ax\"\n" \ | 248 | ".section .fixup,\"ax\"\n" \ |
251 | " .even\n" \ | 249 | " .even\n" \ |
252 | "90: moveq.l #1, %2\n" \ | 250 | "91: moveq.l #1, %2\n" \ |
251 | " jra 9b\n" \ | ||
252 | "94: moveq.l #4, %2\n" \ | ||
253 | " jra 9b\n" \ | 253 | " jra 9b\n" \ |
254 | ".previous\n" \ | 254 | ".previous\n" \ |
255 | ".section __ex_table,\"a\"\n" \ | 255 | ".section __ex_table,\"a\"\n" \ |
256 | " .align 4\n" \ | 256 | " .align 4\n" \ |
257 | " .long 1b,90b\n" \ | 257 | " .long 1b,91b\n" \ |
258 | " .long 3b,90b\n" \ | 258 | " .long 3b,94b\n" \ |
259 | " .long 31b,90b\n" \ | 259 | " .long 31b,94b\n" \ |
260 | " .long 32b,90b\n" \ | 260 | " .long 32b,94b\n" \ |
261 | " .long 33b,90b\n" \ | 261 | " .long 33b,94b\n" \ |
262 | " .long 34b,90b\n" \ | 262 | " .long 34b,94b\n" \ |
263 | " .long 35b,90b\n" \ | 263 | " .long 35b,94b\n" \ |
264 | " .long 36b,90b\n" \ | 264 | " .long 36b,94b\n" \ |
265 | " .long 37b,90b\n" \ | 265 | " .long 37b,94b\n" \ |
266 | " .long 5b,90b\n" \ | 266 | " .long 5b,94b\n" \ |
267 | " .long 7b,90b\n" \ | 267 | " .long 7b,91b\n" \ |
268 | ".previous" \ | 268 | ".previous" \ |
269 | : "=a"(s), "=a"(d), "=d"(len) \ | 269 | : "=a"(s), "=a"(d), "=d"(n) \ |
270 | : "0"(s), "1"(d), "2"(len) \ | 270 | : "0"(s), "1"(d), "2"(n) \ |
271 | : "d0") | 271 | : "d0") |
272 | 272 | ||
273 | static int macscsi_pwrite(struct Scsi_Host *instance, | 273 | static int macscsi_pwrite(struct Scsi_Host *instance, |
274 | unsigned char *src, int len) | 274 | unsigned char *src, int len) |
275 | { | 275 | { |
276 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 276 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
277 | unsigned char *s; | 277 | unsigned char *s = src; |
278 | unsigned char *d; | 278 | unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); |
279 | 279 | int n = len; | |
280 | s = src; | 280 | int transferred; |
281 | d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); | 281 | |
282 | 282 | while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, | |
283 | /* These conditions are derived from MacOS */ | 283 | BASR_DRQ | BASR_PHASE_MATCH, |
284 | BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { | ||
285 | CP_MEM_TO_IO(s, d, n); | ||
286 | |||
287 | transferred = s - src - n; | ||
288 | hostdata->pdma_residual = len - transferred; | ||
289 | |||
290 | /* Target changed phase early? */ | ||
291 | if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, | ||
292 | BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) | ||
293 | scmd_printk(KERN_ERR, hostdata->connected, | ||
294 | "%s: !REQ and !ACK\n", __func__); | ||
295 | if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) | ||
296 | return 0; | ||
297 | |||
298 | /* No bus error. */ | ||
299 | if (n == 0) { | ||
300 | if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG, | ||
301 | TCR_LAST_BYTE_SENT, | ||
302 | TCR_LAST_BYTE_SENT, HZ / 64) < 0) | ||
303 | scmd_printk(KERN_ERR, hostdata->connected, | ||
304 | "%s: Last Byte Sent timeout\n", __func__); | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | dsprintk(NDEBUG_PSEUDO_DMA, instance, | ||
309 | "%s: bus error (%d/%d)\n", __func__, transferred, len); | ||
310 | NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); | ||
311 | s = src + transferred; | ||
312 | n = len - transferred; | ||
313 | } | ||
284 | 314 | ||
285 | while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && | 315 | scmd_printk(KERN_ERR, hostdata->connected, |
286 | (!(NCR5380_read(STATUS_REG) & SR_REQ) || | 316 | "%s: phase mismatch or !DRQ\n", __func__); |
287 | (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) | 317 | NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); |
288 | ; | ||
289 | 318 | ||
290 | if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { | 319 | return -1; |
291 | pr_err("Error in macscsi_pwrite\n"); | 320 | } |
292 | return -1; | ||
293 | } | ||
294 | 321 | ||
295 | CP_MEM_TO_IO(s, d, len); | 322 | static int macscsi_dma_xfer_len(struct Scsi_Host *instance, |
323 | struct scsi_cmnd *cmd) | ||
324 | { | ||
325 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
296 | 326 | ||
297 | if (len != 0) { | 327 | if (hostdata->flags & FLAG_NO_PSEUDO_DMA || |
298 | pr_notice("Bus error in macscsi_pwrite\n"); | 328 | cmd->SCp.this_residual < 16) |
299 | return -1; | 329 | return 0; |
300 | } | ||
301 | 330 | ||
302 | return 0; | 331 | return cmd->SCp.this_residual; |
303 | } | 332 | } |
304 | #endif | ||
305 | 333 | ||
306 | #include "NCR5380.c" | 334 | #include "NCR5380.c" |
307 | 335 | ||
@@ -311,8 +339,6 @@ static int macscsi_pwrite(struct Scsi_Host *instance, | |||
311 | static struct scsi_host_template mac_scsi_template = { | 339 | static struct scsi_host_template mac_scsi_template = { |
312 | .module = THIS_MODULE, | 340 | .module = THIS_MODULE, |
313 | .proc_name = DRV_MODULE_NAME, | 341 | .proc_name = DRV_MODULE_NAME, |
314 | .show_info = macscsi_show_info, | ||
315 | .write_info = macscsi_write_info, | ||
316 | .name = "Macintosh NCR5380 SCSI", | 342 | .name = "Macintosh NCR5380 SCSI", |
317 | .info = macscsi_info, | 343 | .info = macscsi_info, |
318 | .queuecommand = macscsi_queue_command, | 344 | .queuecommand = macscsi_queue_command, |
@@ -320,7 +346,7 @@ static struct scsi_host_template mac_scsi_template = { | |||
320 | .eh_bus_reset_handler = macscsi_bus_reset, | 346 | .eh_bus_reset_handler = macscsi_bus_reset, |
321 | .can_queue = 16, | 347 | .can_queue = 16, |
322 | .this_id = 7, | 348 | .this_id = 7, |
323 | .sg_tablesize = SG_ALL, | 349 | .sg_tablesize = 1, |
324 | .cmd_per_lun = 2, | 350 | .cmd_per_lun = 2, |
325 | .use_clustering = DISABLE_CLUSTERING, | 351 | .use_clustering = DISABLE_CLUSTERING, |
326 | .cmd_size = NCR5380_CMD_SIZE, | 352 | .cmd_size = NCR5380_CMD_SIZE, |
@@ -338,9 +364,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev) | |||
338 | if (!pio_mem) | 364 | if (!pio_mem) |
339 | return -ENODEV; | 365 | return -ENODEV; |
340 | 366 | ||
341 | #ifdef PSEUDO_DMA | ||
342 | pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 367 | pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
343 | #endif | ||
344 | 368 | ||
345 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 369 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
346 | 370 | ||
@@ -358,8 +382,6 @@ static int __init mac_scsi_probe(struct platform_device *pdev) | |||
358 | mac_scsi_template.sg_tablesize = setup_sg_tablesize; | 382 | mac_scsi_template.sg_tablesize = setup_sg_tablesize; |
359 | if (setup_hostid >= 0) | 383 | if (setup_hostid >= 0) |
360 | mac_scsi_template.this_id = setup_hostid & 7; | 384 | mac_scsi_template.this_id = setup_hostid & 7; |
361 | if (setup_use_pdma < 0) | ||
362 | setup_use_pdma = 0; | ||
363 | 385 | ||
364 | instance = scsi_host_alloc(&mac_scsi_template, | 386 | instance = scsi_host_alloc(&mac_scsi_template, |
365 | sizeof(struct NCR5380_hostdata)); | 387 | sizeof(struct NCR5380_hostdata)); |
@@ -379,12 +401,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) | |||
379 | } else | 401 | } else |
380 | host_flags |= FLAG_NO_PSEUDO_DMA; | 402 | host_flags |= FLAG_NO_PSEUDO_DMA; |
381 | 403 | ||
382 | #ifdef SUPPORT_TAGS | ||
383 | host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; | ||
384 | #endif | ||
385 | host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; | 404 | host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; |
386 | 405 | ||
387 | error = NCR5380_init(instance, host_flags); | 406 | error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); |
388 | if (error) | 407 | if (error) |
389 | goto fail_init; | 408 | goto fail_init; |
390 | 409 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fce414a2cd76..ca86c885dfaa 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -35,8 +35,8 @@ | |||
35 | /* | 35 | /* |
36 | * MegaRAID SAS Driver meta data | 36 | * MegaRAID SAS Driver meta data |
37 | */ | 37 | */ |
38 | #define MEGASAS_VERSION "06.810.09.00-rc1" | 38 | #define MEGASAS_VERSION "06.811.02.00-rc1" |
39 | #define MEGASAS_RELDATE "Jan. 28, 2016" | 39 | #define MEGASAS_RELDATE "April 12, 2016" |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Device IDs | 42 | * Device IDs |
@@ -1344,6 +1344,8 @@ struct megasas_ctrl_info { | |||
1344 | #define SCAN_PD_CHANNEL 0x1 | 1344 | #define SCAN_PD_CHANNEL 0x1 |
1345 | #define SCAN_VD_CHANNEL 0x2 | 1345 | #define SCAN_VD_CHANNEL 0x2 |
1346 | 1346 | ||
1347 | #define MEGASAS_KDUMP_QUEUE_DEPTH 100 | ||
1348 | |||
1347 | enum MR_SCSI_CMD_TYPE { | 1349 | enum MR_SCSI_CMD_TYPE { |
1348 | READ_WRITE_LDIO = 0, | 1350 | READ_WRITE_LDIO = 0, |
1349 | NON_READ_WRITE_LDIO = 1, | 1351 | NON_READ_WRITE_LDIO = 1, |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e6ebc7ae2df1..f4b0690450d2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -2670,17 +2670,6 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | |||
2670 | } | 2670 | } |
2671 | 2671 | ||
2672 | /** | 2672 | /** |
2673 | * megasas_reset_device - Device reset handler entry point | ||
2674 | */ | ||
2675 | static int megasas_reset_device(struct scsi_cmnd *scmd) | ||
2676 | { | ||
2677 | /* | ||
2678 | * First wait for all commands to complete | ||
2679 | */ | ||
2680 | return megasas_generic_reset(scmd); | ||
2681 | } | ||
2682 | |||
2683 | /** | ||
2684 | * megasas_reset_bus_host - Bus & host reset handler entry point | 2673 | * megasas_reset_bus_host - Bus & host reset handler entry point |
2685 | */ | 2674 | */ |
2686 | static int megasas_reset_bus_host(struct scsi_cmnd *scmd) | 2675 | static int megasas_reset_bus_host(struct scsi_cmnd *scmd) |
@@ -2702,6 +2691,50 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) | |||
2702 | } | 2691 | } |
2703 | 2692 | ||
2704 | /** | 2693 | /** |
2694 | * megasas_task_abort - Issues task abort request to firmware | ||
2695 | * (supported only for fusion adapters) | ||
2696 | * @scmd: SCSI command pointer | ||
2697 | */ | ||
2698 | static int megasas_task_abort(struct scsi_cmnd *scmd) | ||
2699 | { | ||
2700 | int ret; | ||
2701 | struct megasas_instance *instance; | ||
2702 | |||
2703 | instance = (struct megasas_instance *)scmd->device->host->hostdata; | ||
2704 | |||
2705 | if (instance->ctrl_context) | ||
2706 | ret = megasas_task_abort_fusion(scmd); | ||
2707 | else { | ||
2708 | sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); | ||
2709 | ret = FAILED; | ||
2710 | } | ||
2711 | |||
2712 | return ret; | ||
2713 | } | ||
2714 | |||
2715 | /** | ||
2716 | * megasas_reset_target: Issues target reset request to firmware | ||
2717 | * (supported only for fusion adapters) | ||
2718 | * @scmd: SCSI command pointer | ||
2719 | */ | ||
2720 | static int megasas_reset_target(struct scsi_cmnd *scmd) | ||
2721 | { | ||
2722 | int ret; | ||
2723 | struct megasas_instance *instance; | ||
2724 | |||
2725 | instance = (struct megasas_instance *)scmd->device->host->hostdata; | ||
2726 | |||
2727 | if (instance->ctrl_context) | ||
2728 | ret = megasas_reset_target_fusion(scmd); | ||
2729 | else { | ||
2730 | sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); | ||
2731 | ret = FAILED; | ||
2732 | } | ||
2733 | |||
2734 | return ret; | ||
2735 | } | ||
2736 | |||
2737 | /** | ||
2705 | * megasas_bios_param - Returns disk geometry for a disk | 2738 | * megasas_bios_param - Returns disk geometry for a disk |
2706 | * @sdev: device handle | 2739 | * @sdev: device handle |
2707 | * @bdev: block device | 2740 | * @bdev: block device |
@@ -2969,8 +3002,8 @@ static struct scsi_host_template megasas_template = { | |||
2969 | .slave_alloc = megasas_slave_alloc, | 3002 | .slave_alloc = megasas_slave_alloc, |
2970 | .slave_destroy = megasas_slave_destroy, | 3003 | .slave_destroy = megasas_slave_destroy, |
2971 | .queuecommand = megasas_queue_command, | 3004 | .queuecommand = megasas_queue_command, |
2972 | .eh_device_reset_handler = megasas_reset_device, | 3005 | .eh_target_reset_handler = megasas_reset_target, |
2973 | .eh_bus_reset_handler = megasas_reset_bus_host, | 3006 | .eh_abort_handler = megasas_task_abort, |
2974 | .eh_host_reset_handler = megasas_reset_bus_host, | 3007 | .eh_host_reset_handler = megasas_reset_bus_host, |
2975 | .eh_timed_out = megasas_reset_timer, | 3008 | .eh_timed_out = megasas_reset_timer, |
2976 | .shost_attrs = megaraid_host_attrs, | 3009 | .shost_attrs = megaraid_host_attrs, |
@@ -5152,7 +5185,7 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
5152 | 5185 | ||
5153 | instance->instancet->enable_intr(instance); | 5186 | instance->instancet->enable_intr(instance); |
5154 | 5187 | ||
5155 | dev_err(&instance->pdev->dev, "INIT adapter done\n"); | 5188 | dev_info(&instance->pdev->dev, "INIT adapter done\n"); |
5156 | 5189 | ||
5157 | megasas_setup_jbod_map(instance); | 5190 | megasas_setup_jbod_map(instance); |
5158 | 5191 | ||
@@ -5598,14 +5631,6 @@ static int megasas_io_attach(struct megasas_instance *instance) | |||
5598 | host->max_lun = MEGASAS_MAX_LUN; | 5631 | host->max_lun = MEGASAS_MAX_LUN; |
5599 | host->max_cmd_len = 16; | 5632 | host->max_cmd_len = 16; |
5600 | 5633 | ||
5601 | /* Fusion only supports host reset */ | ||
5602 | if (instance->ctrl_context) { | ||
5603 | host->hostt->eh_device_reset_handler = NULL; | ||
5604 | host->hostt->eh_bus_reset_handler = NULL; | ||
5605 | host->hostt->eh_target_reset_handler = megasas_reset_target_fusion; | ||
5606 | host->hostt->eh_abort_handler = megasas_task_abort_fusion; | ||
5607 | } | ||
5608 | |||
5609 | /* | 5634 | /* |
5610 | * Notify the mid-layer about the new controller | 5635 | * Notify the mid-layer about the new controller |
5611 | */ | 5636 | */ |
@@ -5761,13 +5786,6 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5761 | break; | 5786 | break; |
5762 | } | 5787 | } |
5763 | 5788 | ||
5764 | instance->system_info_buf = pci_zalloc_consistent(pdev, | ||
5765 | sizeof(struct MR_DRV_SYSTEM_INFO), | ||
5766 | &instance->system_info_h); | ||
5767 | |||
5768 | if (!instance->system_info_buf) | ||
5769 | dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); | ||
5770 | |||
5771 | /* Crash dump feature related initialisation*/ | 5789 | /* Crash dump feature related initialisation*/ |
5772 | instance->drv_buf_index = 0; | 5790 | instance->drv_buf_index = 0; |
5773 | instance->drv_buf_alloc = 0; | 5791 | instance->drv_buf_alloc = 0; |
@@ -5777,14 +5795,6 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5777 | spin_lock_init(&instance->crashdump_lock); | 5795 | spin_lock_init(&instance->crashdump_lock); |
5778 | instance->crash_dump_buf = NULL; | 5796 | instance->crash_dump_buf = NULL; |
5779 | 5797 | ||
5780 | if (!reset_devices) | ||
5781 | instance->crash_dump_buf = pci_alloc_consistent(pdev, | ||
5782 | CRASH_DMA_BUF_SIZE, | ||
5783 | &instance->crash_dump_h); | ||
5784 | if (!instance->crash_dump_buf) | ||
5785 | dev_err(&pdev->dev, "Can't allocate Firmware " | ||
5786 | "crash dump DMA buffer\n"); | ||
5787 | |||
5788 | megasas_poll_wait_aen = 0; | 5798 | megasas_poll_wait_aen = 0; |
5789 | instance->flag_ieee = 0; | 5799 | instance->flag_ieee = 0; |
5790 | instance->ev = NULL; | 5800 | instance->ev = NULL; |
@@ -5803,11 +5813,26 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5803 | goto fail_alloc_dma_buf; | 5813 | goto fail_alloc_dma_buf; |
5804 | } | 5814 | } |
5805 | 5815 | ||
5806 | instance->pd_info = pci_alloc_consistent(pdev, | 5816 | if (!reset_devices) { |
5807 | sizeof(struct MR_PD_INFO), &instance->pd_info_h); | 5817 | instance->system_info_buf = pci_zalloc_consistent(pdev, |
5818 | sizeof(struct MR_DRV_SYSTEM_INFO), | ||
5819 | &instance->system_info_h); | ||
5820 | if (!instance->system_info_buf) | ||
5821 | dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); | ||
5822 | |||
5823 | instance->pd_info = pci_alloc_consistent(pdev, | ||
5824 | sizeof(struct MR_PD_INFO), &instance->pd_info_h); | ||
5808 | 5825 | ||
5809 | if (!instance->pd_info) | 5826 | if (!instance->pd_info) |
5810 | dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); | 5827 | dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); |
5828 | |||
5829 | instance->crash_dump_buf = pci_alloc_consistent(pdev, | ||
5830 | CRASH_DMA_BUF_SIZE, | ||
5831 | &instance->crash_dump_h); | ||
5832 | if (!instance->crash_dump_buf) | ||
5833 | dev_err(&pdev->dev, "Can't allocate Firmware " | ||
5834 | "crash dump DMA buffer\n"); | ||
5835 | } | ||
5811 | 5836 | ||
5812 | /* | 5837 | /* |
5813 | * Initialize locks and queues | 5838 | * Initialize locks and queues |
@@ -7174,6 +7199,16 @@ static int __init megasas_init(void) | |||
7174 | int rval; | 7199 | int rval; |
7175 | 7200 | ||
7176 | /* | 7201 | /* |
7202 | * Booted in kdump kernel, minimize memory footprints by | ||
7203 | * disabling few features | ||
7204 | */ | ||
7205 | if (reset_devices) { | ||
7206 | msix_vectors = 1; | ||
7207 | rdpq_enable = 0; | ||
7208 | dual_qdepth_disable = 1; | ||
7209 | } | ||
7210 | |||
7211 | /* | ||
7177 | * Announce driver version and other information | 7212 | * Announce driver version and other information |
7178 | */ | 7213 | */ |
7179 | pr_info("megasas: %s\n", MEGASAS_VERSION); | 7214 | pr_info("megasas: %s\n", MEGASAS_VERSION); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 98a848bdfdc2..ec837544f784 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -257,6 +257,9 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c | |||
257 | if (!instance->is_rdpq) | 257 | if (!instance->is_rdpq) |
258 | instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); | 258 | instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); |
259 | 259 | ||
260 | if (reset_devices) | ||
261 | instance->max_fw_cmds = min(instance->max_fw_cmds, | ||
262 | (u16)MEGASAS_KDUMP_QUEUE_DEPTH); | ||
260 | /* | 263 | /* |
261 | * Reduce the max supported cmds by 1. This is to ensure that the | 264 | * Reduce the max supported cmds by 1. This is to ensure that the |
262 | * reply_q_sz (1 more than the max cmd that driver may send) | 265 | * reply_q_sz (1 more than the max cmd that driver may send) |
@@ -851,7 +854,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
851 | ret = 1; | 854 | ret = 1; |
852 | goto fail_fw_init; | 855 | goto fail_fw_init; |
853 | } | 856 | } |
854 | dev_err(&instance->pdev->dev, "Init cmd success\n"); | 857 | dev_info(&instance->pdev->dev, "Init cmd success\n"); |
855 | 858 | ||
856 | ret = 0; | 859 | ret = 0; |
857 | 860 | ||
@@ -2759,6 +2762,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, | |||
2759 | dev_warn(&instance->pdev->dev, "Found FW in FAULT state," | 2762 | dev_warn(&instance->pdev->dev, "Found FW in FAULT state," |
2760 | " will reset adapter scsi%d.\n", | 2763 | " will reset adapter scsi%d.\n", |
2761 | instance->host->host_no); | 2764 | instance->host->host_no); |
2765 | megasas_complete_cmd_dpc_fusion((unsigned long)instance); | ||
2762 | retval = 1; | 2766 | retval = 1; |
2763 | goto out; | 2767 | goto out; |
2764 | } | 2768 | } |
@@ -2766,6 +2770,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, | |||
2766 | if (reason == MFI_IO_TIMEOUT_OCR) { | 2770 | if (reason == MFI_IO_TIMEOUT_OCR) { |
2767 | dev_info(&instance->pdev->dev, | 2771 | dev_info(&instance->pdev->dev, |
2768 | "MFI IO is timed out, initiating OCR\n"); | 2772 | "MFI IO is timed out, initiating OCR\n"); |
2773 | megasas_complete_cmd_dpc_fusion((unsigned long)instance); | ||
2769 | retval = 1; | 2774 | retval = 1; |
2770 | goto out; | 2775 | goto out; |
2771 | } | 2776 | } |
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index dfad5b8c1890..a9a659fc2812 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
10 | * | 10 | * |
11 | * mpi2.h Version: 02.00.39 | 11 | * mpi2.h Version: 02.00.42 |
12 | * | 12 | * |
13 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 | 13 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 |
14 | * prefix are for use only on MPI v2.5 products, and must not be used | 14 | * prefix are for use only on MPI v2.5 products, and must not be used |
@@ -100,6 +100,9 @@ | |||
100 | * Added MPI2_DIAG_SBR_RELOAD. | 100 | * Added MPI2_DIAG_SBR_RELOAD. |
101 | * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. | 101 | * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. |
102 | * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. | 102 | * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. |
103 | * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. | ||
104 | * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT | ||
105 | * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT | ||
103 | * -------------------------------------------------------------------------- | 106 | * -------------------------------------------------------------------------- |
104 | */ | 107 | */ |
105 | 108 | ||
@@ -139,7 +142,7 @@ | |||
139 | #define MPI2_VERSION_02_06 (0x0206) | 142 | #define MPI2_VERSION_02_06 (0x0206) |
140 | 143 | ||
141 | /*Unit and Dev versioning for this MPI header set */ | 144 | /*Unit and Dev versioning for this MPI header set */ |
142 | #define MPI2_HEADER_VERSION_UNIT (0x27) | 145 | #define MPI2_HEADER_VERSION_UNIT (0x2A) |
143 | #define MPI2_HEADER_VERSION_DEV (0x00) | 146 | #define MPI2_HEADER_VERSION_DEV (0x00) |
144 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 147 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
145 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 148 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 9cf09bf7c4a8..95356a82ee99 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
8 | * | 8 | * |
9 | * mpi2_cnfg.h Version: 02.00.33 | 9 | * mpi2_cnfg.h Version: 02.00.35 |
10 | * | 10 | * |
11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 | 11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 |
12 | * prefix are for use only on MPI v2.5 products, and must not be used | 12 | * prefix are for use only on MPI v2.5 products, and must not be used |
@@ -183,9 +183,12 @@ | |||
183 | * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. | 183 | * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. |
184 | * Added AdapterOrderAux fields to BIOS Page 3. | 184 | * Added AdapterOrderAux fields to BIOS Page 3. |
185 | * 03-16-15 02.00.31 Updated for MPI v2.6. | 185 | * 03-16-15 02.00.31 Updated for MPI v2.6. |
186 | * Added Flags field to IO Unit Page 7. | ||
186 | * Added new SAS Phy Event codes | 187 | * Added new SAS Phy Event codes |
187 | * 05-25-15 02.00.33 Added more defines for the BiosOptions field of | 188 | * 05-25-15 02.00.33 Added more defines for the BiosOptions field of |
188 | * MPI2_CONFIG_PAGE_BIOS_1. | 189 | * MPI2_CONFIG_PAGE_BIOS_1. |
190 | * 08-25-15 02.00.34 Bumped Header Version. | ||
191 | * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. | ||
189 | * -------------------------------------------------------------------------- | 192 | * -------------------------------------------------------------------------- |
190 | */ | 193 | */ |
191 | 194 | ||
@@ -958,13 +961,16 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { | |||
958 | U8 Reserved3; /*0x17 */ | 961 | U8 Reserved3; /*0x17 */ |
959 | U32 BoardPowerRequirement; /*0x18 */ | 962 | U32 BoardPowerRequirement; /*0x18 */ |
960 | U32 PCISlotPowerAllocation; /*0x1C */ | 963 | U32 PCISlotPowerAllocation; /*0x1C */ |
961 | U32 Reserved6; /* 0x20 */ | 964 | /* reserved prior to MPI v2.6 */ |
962 | U32 Reserved7; /* 0x24 */ | 965 | U8 Flags; /* 0x20 */ |
966 | U8 Reserved6; /* 0x21 */ | ||
967 | U16 Reserved7; /* 0x22 */ | ||
968 | U32 Reserved8; /* 0x24 */ | ||
963 | } MPI2_CONFIG_PAGE_IO_UNIT_7, | 969 | } MPI2_CONFIG_PAGE_IO_UNIT_7, |
964 | *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, | 970 | *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, |
965 | Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; | 971 | Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; |
966 | 972 | ||
967 | #define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) | 973 | #define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) |
968 | 974 | ||
969 | /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ | 975 | /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ |
970 | #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) | 976 | #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) |
@@ -1045,6 +1051,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { | |||
1045 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) | 1051 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) |
1046 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) | 1052 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) |
1047 | 1053 | ||
1054 | /* defines for IO Unit Page 7 Flags field */ | ||
1055 | #define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) | ||
1048 | 1056 | ||
1049 | /*IO Unit Page 8 */ | 1057 | /*IO Unit Page 8 */ |
1050 | 1058 | ||
@@ -2271,7 +2279,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { | |||
2271 | U8 | 2279 | U8 |
2272 | BootDeviceWaitTime; /*0x24 */ | 2280 | BootDeviceWaitTime; /*0x24 */ |
2273 | U8 | 2281 | U8 |
2274 | Reserved4; /*0x25 */ | 2282 | SATADeviceWaitTime; /*0x25 */ |
2275 | U16 | 2283 | U16 |
2276 | Reserved5; /*0x26 */ | 2284 | Reserved5; /*0x26 */ |
2277 | U8 | 2285 | U8 |
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index c38f624b859d..bba56b61d36c 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI SCSI initiator mode messages and structures | 6 | * Title: MPI SCSI initiator mode messages and structures |
7 | * Creation Date: June 23, 2006 | 7 | * Creation Date: June 23, 2006 |
8 | * | 8 | * |
9 | * mpi2_init.h Version: 02.00.17 | 9 | * mpi2_init.h Version: 02.00.20 |
10 | * | 10 | * |
11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 | 11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 |
12 | * prefix are for use only on MPI v2.5 products, and must not be used | 12 | * prefix are for use only on MPI v2.5 products, and must not be used |
@@ -51,6 +51,9 @@ | |||
51 | * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. | 51 | * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. |
52 | * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and | 52 | * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and |
53 | * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. | 53 | * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. |
54 | * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. | ||
55 | * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. | ||
56 | * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. | ||
54 | * -------------------------------------------------------------------------- | 57 | * -------------------------------------------------------------------------- |
55 | */ | 58 | */ |
56 | 59 | ||
@@ -359,8 +362,14 @@ typedef struct _MPI2_SCSI_IO_REPLY { | |||
359 | U16 TaskTag; /*0x20 */ | 362 | U16 TaskTag; /*0x20 */ |
360 | U16 SCSIStatusQualifier; /* 0x22 */ | 363 | U16 SCSIStatusQualifier; /* 0x22 */ |
361 | U32 BidirectionalTransferCount; /*0x24 */ | 364 | U32 BidirectionalTransferCount; /*0x24 */ |
362 | U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ | 365 | /* MPI 2.5+ only; Reserved in MPI 2.0 */ |
363 | U32 Reserved6; /*0x2C */ | 366 | U32 EEDPErrorOffset; /* 0x28 */ |
367 | /* MPI 2.5+ only; Reserved in MPI 2.0 */ | ||
368 | U16 EEDPObservedAppTag; /* 0x2C */ | ||
369 | /* MPI 2.5+ only; Reserved in MPI 2.0 */ | ||
370 | U16 EEDPObservedGuard; /* 0x2E */ | ||
371 | /* MPI 2.5+ only; Reserved in MPI 2.0 */ | ||
372 | U32 EEDPObservedRefTag; /* 0x30 */ | ||
364 | } MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, | 373 | } MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, |
365 | Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; | 374 | Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; |
366 | 375 | ||
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index cf510ed91924..8bae305bc156 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
8 | * | 8 | * |
9 | * mpi2_ioc.h Version: 02.00.26 | 9 | * mpi2_ioc.h Version: 02.00.27 |
10 | * | 10 | * |
11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 | 11 | * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 |
12 | * prefix are for use only on MPI v2.5 products, and must not be used | 12 | * prefix are for use only on MPI v2.5 products, and must not be used |
@@ -134,9 +134,13 @@ | |||
134 | * Added Encrypted Hash Extended Image. | 134 | * Added Encrypted Hash Extended Image. |
135 | * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. | 135 | * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. |
136 | * 11-18-14 02.00.25 Updated copyright information. | 136 | * 11-18-14 02.00.25 Updated copyright information. |
137 | * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and | 137 | * 03-16-15 02.00.26 Updated for MPI v2.6. |
138 | * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and | ||
139 | * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. | ||
140 | * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and | ||
138 | * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. | 141 | * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. |
139 | * Added MPI26_CTRL_OP_SHUTDOWN. | 142 | * Added MPI26_CTRL_OP_SHUTDOWN. |
143 | * 08-25-15 02.00.27 Added IC ARCH Class based signature defines | ||
140 | * -------------------------------------------------------------------------- | 144 | * -------------------------------------------------------------------------- |
141 | */ | 145 | */ |
142 | 146 | ||
@@ -168,7 +172,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST { | |||
168 | U16 MsgVersion; /*0x0C */ | 172 | U16 MsgVersion; /*0x0C */ |
169 | U16 HeaderVersion; /*0x0E */ | 173 | U16 HeaderVersion; /*0x0E */ |
170 | U32 Reserved5; /*0x10 */ | 174 | U32 Reserved5; /*0x10 */ |
171 | U16 Reserved6; /*0x14 */ | 175 | U16 ConfigurationFlags; /* 0x14 */ |
172 | U8 HostPageSize; /*0x16 */ | 176 | U8 HostPageSize; /*0x16 */ |
173 | U8 HostMSIxVectors; /*0x17 */ | 177 | U8 HostMSIxVectors; /*0x17 */ |
174 | U16 Reserved8; /*0x18 */ | 178 | U16 Reserved8; /*0x18 */ |
@@ -516,6 +520,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { | |||
516 | #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) | 520 | #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) |
517 | #define MPI2_EVENT_HOST_MESSAGE (0x0028) | 521 | #define MPI2_EVENT_HOST_MESSAGE (0x0028) |
518 | #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) | 522 | #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) |
523 | #define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) | ||
519 | #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) | 524 | #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) |
520 | #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) | 525 | #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) |
521 | 526 | ||
@@ -580,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { | |||
580 | } MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, | 585 | } MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, |
581 | Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; | 586 | Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; |
582 | 587 | ||
583 | /*Power Performance Change Event */ | 588 | /*Power Performance Change Event data */ |
584 | 589 | ||
585 | typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { | 590 | typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { |
586 | U8 CurrentPowerMode; /*0x00 */ | 591 | U8 CurrentPowerMode; /*0x00 */ |
@@ -605,6 +610,21 @@ typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { | |||
605 | #define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) | 610 | #define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) |
606 | #define MPI2_EVENT_PM_MODE_STANDBY (0x06) | 611 | #define MPI2_EVENT_PM_MODE_STANDBY (0x06) |
607 | 612 | ||
613 | /* Active Cable Exception Event data */ | ||
614 | |||
615 | typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { | ||
616 | U32 ActiveCablePowerRequirement; /* 0x00 */ | ||
617 | U8 ReasonCode; /* 0x04 */ | ||
618 | U8 ReceptacleID; /* 0x05 */ | ||
619 | U16 Reserved1; /* 0x06 */ | ||
620 | } MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, | ||
621 | *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, | ||
622 | Mpi26EventDataActiveCableExcept_t, | ||
623 | *pMpi26EventDataActiveCableExcept_t; | ||
624 | |||
625 | /* defines for ReasonCode field */ | ||
626 | #define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) | ||
627 | |||
608 | /*Hard Reset Received Event data */ | 628 | /*Hard Reset Received Event data */ |
609 | 629 | ||
610 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { | 630 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { |
@@ -1366,7 +1386,16 @@ typedef struct _MPI2_FW_IMAGE_HEADER { | |||
1366 | /*Signature0 field */ | 1386 | /*Signature0 field */ |
1367 | #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) | 1387 | #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) |
1368 | #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) | 1388 | #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) |
1369 | #define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A) | 1389 | /* Last byte is defined by architecture */ |
1390 | #define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) | ||
1391 | #define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) | ||
1392 | #define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) | ||
1393 | #define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) | ||
1394 | /* legacy (0x5AEAA55A) */ | ||
1395 | #define MPI26_FW_HEADER_SIGNATURE0 \ | ||
1396 | (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) | ||
1397 | #define MPI26_FW_HEADER_SIGNATURE0_3516 \ | ||
1398 | (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) | ||
1370 | 1399 | ||
1371 | /*Signature1 field */ | 1400 | /*Signature1 field */ |
1372 | #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) | 1401 | #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) |
@@ -1778,6 +1807,7 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { | |||
1778 | #define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) | 1807 | #define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) |
1779 | #define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) | 1808 | #define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) |
1780 | #define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) | 1809 | #define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) |
1810 | #define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) | ||
1781 | #define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) | 1811 | #define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) |
1782 | #define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) | 1812 | #define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) |
1783 | #define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) | 1813 | #define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 8c44b9c424af..751f13edece0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/dma-mapping.h> | 57 | #include <linux/dma-mapping.h> |
58 | #include <linux/io.h> | 58 | #include <linux/io.h> |
59 | #include <linux/time.h> | 59 | #include <linux/time.h> |
60 | #include <linux/ktime.h> | ||
60 | #include <linux/kthread.h> | 61 | #include <linux/kthread.h> |
61 | #include <linux/aer.h> | 62 | #include <linux/aer.h> |
62 | 63 | ||
@@ -654,6 +655,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | |||
654 | case MPI2_EVENT_TEMP_THRESHOLD: | 655 | case MPI2_EVENT_TEMP_THRESHOLD: |
655 | desc = "Temperature Threshold"; | 656 | desc = "Temperature Threshold"; |
656 | break; | 657 | break; |
658 | case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: | ||
659 | desc = "Active cable exception"; | ||
660 | break; | ||
657 | } | 661 | } |
658 | 662 | ||
659 | if (!desc) | 663 | if (!desc) |
@@ -1100,18 +1104,16 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) | |||
1100 | } | 1104 | } |
1101 | 1105 | ||
1102 | /** | 1106 | /** |
1103 | * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues | 1107 | * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts |
1104 | * @ioc: per adapter object | 1108 | * @ioc: per adapter object |
1105 | * Context: ISR conext | 1109 | * Context: non ISR conext |
1106 | * | 1110 | * |
1107 | * Called when a Task Management request has completed. We want | 1111 | * Called when a Task Management request has completed. |
1108 | * to flush the other reply queues so all the outstanding IO has been | ||
1109 | * completed back to OS before we process the TM completetion. | ||
1110 | * | 1112 | * |
1111 | * Return nothing. | 1113 | * Return nothing. |
1112 | */ | 1114 | */ |
1113 | void | 1115 | void |
1114 | mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) | 1116 | mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) |
1115 | { | 1117 | { |
1116 | struct adapter_reply_queue *reply_q; | 1118 | struct adapter_reply_queue *reply_q; |
1117 | 1119 | ||
@@ -1122,12 +1124,13 @@ mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |||
1122 | return; | 1124 | return; |
1123 | 1125 | ||
1124 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | 1126 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
1125 | if (ioc->shost_recovery) | 1127 | if (ioc->shost_recovery || ioc->remove_host || |
1128 | ioc->pci_error_recovery) | ||
1126 | return; | 1129 | return; |
1127 | /* TMs are on msix_index == 0 */ | 1130 | /* TMs are on msix_index == 0 */ |
1128 | if (reply_q->msix_index == 0) | 1131 | if (reply_q->msix_index == 0) |
1129 | continue; | 1132 | continue; |
1130 | _base_interrupt(reply_q->vector, (void *)reply_q); | 1133 | synchronize_irq(reply_q->vector); |
1131 | } | 1134 | } |
1132 | } | 1135 | } |
1133 | 1136 | ||
@@ -3207,10 +3210,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |||
3207 | sg_tablesize = MPT_MIN_PHYS_SEGMENTS; | 3210 | sg_tablesize = MPT_MIN_PHYS_SEGMENTS; |
3208 | else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { | 3211 | else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { |
3209 | sg_tablesize = min_t(unsigned short, sg_tablesize, | 3212 | sg_tablesize = min_t(unsigned short, sg_tablesize, |
3210 | SCSI_MAX_SG_CHAIN_SEGMENTS); | 3213 | SG_MAX_SEGMENTS); |
3211 | pr_warn(MPT3SAS_FMT | 3214 | pr_warn(MPT3SAS_FMT |
3212 | "sg_tablesize(%u) is bigger than kernel" | 3215 | "sg_tablesize(%u) is bigger than kernel" |
3213 | " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, | 3216 | " defined SG_CHUNK_SIZE(%u)\n", ioc->name, |
3214 | sg_tablesize, MPT_MAX_PHYS_SEGMENTS); | 3217 | sg_tablesize, MPT_MAX_PHYS_SEGMENTS); |
3215 | } | 3218 | } |
3216 | ioc->shost->sg_tablesize = sg_tablesize; | 3219 | ioc->shost->sg_tablesize = sg_tablesize; |
@@ -4387,7 +4390,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |||
4387 | Mpi2IOCInitRequest_t mpi_request; | 4390 | Mpi2IOCInitRequest_t mpi_request; |
4388 | Mpi2IOCInitReply_t mpi_reply; | 4391 | Mpi2IOCInitReply_t mpi_reply; |
4389 | int i, r = 0; | 4392 | int i, r = 0; |
4390 | struct timeval current_time; | 4393 | ktime_t current_time; |
4391 | u16 ioc_status; | 4394 | u16 ioc_status; |
4392 | u32 reply_post_free_array_sz = 0; | 4395 | u32 reply_post_free_array_sz = 0; |
4393 | Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; | 4396 | Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; |
@@ -4449,9 +4452,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |||
4449 | /* This time stamp specifies number of milliseconds | 4452 | /* This time stamp specifies number of milliseconds |
4450 | * since epoch ~ midnight January 1, 1970. | 4453 | * since epoch ~ midnight January 1, 1970. |
4451 | */ | 4454 | */ |
4452 | do_gettimeofday(¤t_time); | 4455 | current_time = ktime_get_real(); |
4453 | mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + | 4456 | mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); |
4454 | (current_time.tv_usec / 1000)); | ||
4455 | 4457 | ||
4456 | if (ioc->logging_level & MPT_DEBUG_INIT) { | 4458 | if (ioc->logging_level & MPT_DEBUG_INIT) { |
4457 | __le32 *mfp; | 4459 | __le32 *mfp; |
@@ -5424,6 +5426,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
5424 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 5426 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
5425 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 5427 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
5426 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); | 5428 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); |
5429 | if (ioc->hba_mpi_version_belonged == MPI26_VERSION) | ||
5430 | _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); | ||
5427 | 5431 | ||
5428 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 5432 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
5429 | if (r) | 5433 | if (r) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 32580b514b18..892c9be008b5 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -73,8 +73,8 @@ | |||
73 | #define MPT3SAS_DRIVER_NAME "mpt3sas" | 73 | #define MPT3SAS_DRIVER_NAME "mpt3sas" |
74 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" | 74 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
75 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" | 75 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" |
76 | #define MPT3SAS_DRIVER_VERSION "12.100.00.00" | 76 | #define MPT3SAS_DRIVER_VERSION "13.100.00.00" |
77 | #define MPT3SAS_MAJOR_VERSION 12 | 77 | #define MPT3SAS_MAJOR_VERSION 13 |
78 | #define MPT3SAS_MINOR_VERSION 100 | 78 | #define MPT3SAS_MINOR_VERSION 100 |
79 | #define MPT3SAS_BUILD_VERSION 0 | 79 | #define MPT3SAS_BUILD_VERSION 0 |
80 | #define MPT3SAS_RELEASE_VERSION 00 | 80 | #define MPT3SAS_RELEASE_VERSION 00 |
@@ -90,7 +90,7 @@ | |||
90 | /* | 90 | /* |
91 | * Set MPT3SAS_SG_DEPTH value based on user input. | 91 | * Set MPT3SAS_SG_DEPTH value based on user input. |
92 | */ | 92 | */ |
93 | #define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS | 93 | #define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE |
94 | #define MPT_MIN_PHYS_SEGMENTS 16 | 94 | #define MPT_MIN_PHYS_SEGMENTS 16 |
95 | 95 | ||
96 | #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE | 96 | #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE |
@@ -112,6 +112,8 @@ | |||
112 | #define MPT3SAS_SAS_QUEUE_DEPTH 254 | 112 | #define MPT3SAS_SAS_QUEUE_DEPTH 254 |
113 | #define MPT3SAS_RAID_QUEUE_DEPTH 128 | 113 | #define MPT3SAS_RAID_QUEUE_DEPTH 128 |
114 | 114 | ||
115 | #define MPT3SAS_RAID_MAX_SECTORS 8192 | ||
116 | |||
115 | #define MPT_NAME_LENGTH 32 /* generic length of strings */ | 117 | #define MPT_NAME_LENGTH 32 /* generic length of strings */ |
116 | #define MPT_STRING_LENGTH 64 | 118 | #define MPT_STRING_LENGTH 64 |
117 | 119 | ||
@@ -1234,7 +1236,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); | |||
1234 | void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); | 1236 | void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); |
1235 | __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, | 1237 | __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, |
1236 | u16 smid); | 1238 | u16 smid); |
1237 | void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc); | 1239 | |
1240 | void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); | ||
1238 | 1241 | ||
1239 | /* hi-priority queue */ | 1242 | /* hi-priority queue */ |
1240 | u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); | 1243 | u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index e0e4920d0fa6..6a4df5a315e9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -174,13 +174,13 @@ struct sense_info { | |||
174 | * struct fw_event_work - firmware event struct | 174 | * struct fw_event_work - firmware event struct |
175 | * @list: link list framework | 175 | * @list: link list framework |
176 | * @work: work object (ioc->fault_reset_work_q) | 176 | * @work: work object (ioc->fault_reset_work_q) |
177 | * @cancel_pending_work: flag set during reset handling | ||
178 | * @ioc: per adapter object | 177 | * @ioc: per adapter object |
179 | * @device_handle: device handle | 178 | * @device_handle: device handle |
180 | * @VF_ID: virtual function id | 179 | * @VF_ID: virtual function id |
181 | * @VP_ID: virtual port id | 180 | * @VP_ID: virtual port id |
182 | * @ignore: flag meaning this event has been marked to ignore | 181 | * @ignore: flag meaning this event has been marked to ignore |
183 | * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h | 182 | * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h |
183 | * @refcount: kref for this event | ||
184 | * @event_data: reply event data payload follows | 184 | * @event_data: reply event data payload follows |
185 | * | 185 | * |
186 | * This object stored on ioc->fw_event_list. | 186 | * This object stored on ioc->fw_event_list. |
@@ -188,8 +188,6 @@ struct sense_info { | |||
188 | struct fw_event_work { | 188 | struct fw_event_work { |
189 | struct list_head list; | 189 | struct list_head list; |
190 | struct work_struct work; | 190 | struct work_struct work; |
191 | u8 cancel_pending_work; | ||
192 | struct delayed_work delayed_work; | ||
193 | 191 | ||
194 | struct MPT3SAS_ADAPTER *ioc; | 192 | struct MPT3SAS_ADAPTER *ioc; |
195 | u16 device_handle; | 193 | u16 device_handle; |
@@ -1911,6 +1909,14 @@ scsih_slave_configure(struct scsi_device *sdev) | |||
1911 | (unsigned long long)raid_device->wwid, | 1909 | (unsigned long long)raid_device->wwid, |
1912 | raid_device->num_pds, ds); | 1910 | raid_device->num_pds, ds); |
1913 | 1911 | ||
1912 | if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { | ||
1913 | blk_queue_max_hw_sectors(sdev->request_queue, | ||
1914 | MPT3SAS_RAID_MAX_SECTORS); | ||
1915 | sdev_printk(KERN_INFO, sdev, | ||
1916 | "Set queue's max_sector to: %u\n", | ||
1917 | MPT3SAS_RAID_MAX_SECTORS); | ||
1918 | } | ||
1919 | |||
1914 | scsih_change_queue_depth(sdev, qdepth); | 1920 | scsih_change_queue_depth(sdev, qdepth); |
1915 | 1921 | ||
1916 | /* raid transport support */ | 1922 | /* raid transport support */ |
@@ -2118,7 +2124,6 @@ _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
2118 | return 1; | 2124 | return 1; |
2119 | if (ioc->tm_cmds.smid != smid) | 2125 | if (ioc->tm_cmds.smid != smid) |
2120 | return 1; | 2126 | return 1; |
2121 | mpt3sas_base_flush_reply_queues(ioc); | ||
2122 | ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; | 2127 | ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; |
2123 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | 2128 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); |
2124 | if (mpi_reply) { | 2129 | if (mpi_reply) { |
@@ -2303,6 +2308,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, | |||
2303 | } | 2308 | } |
2304 | } | 2309 | } |
2305 | 2310 | ||
2311 | /* sync IRQs in case those were busy during flush. */ | ||
2312 | mpt3sas_base_sync_reply_irqs(ioc); | ||
2313 | |||
2306 | if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { | 2314 | if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { |
2307 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); | 2315 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); |
2308 | mpi_reply = ioc->tm_cmds.reply; | 2316 | mpi_reply = ioc->tm_cmds.reply; |
@@ -2804,12 +2812,12 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) | |||
2804 | /* | 2812 | /* |
2805 | * Wait on the fw_event to complete. If this returns 1, then | 2813 | * Wait on the fw_event to complete. If this returns 1, then |
2806 | * the event was never executed, and we need a put for the | 2814 | * the event was never executed, and we need a put for the |
2807 | * reference the delayed_work had on the fw_event. | 2815 | * reference the work had on the fw_event. |
2808 | * | 2816 | * |
2809 | * If it did execute, we wait for it to finish, and the put will | 2817 | * If it did execute, we wait for it to finish, and the put will |
2810 | * happen from _firmware_event_work() | 2818 | * happen from _firmware_event_work() |
2811 | */ | 2819 | */ |
2812 | if (cancel_delayed_work_sync(&fw_event->delayed_work)) | 2820 | if (cancel_work_sync(&fw_event->work)) |
2813 | fw_event_work_put(fw_event); | 2821 | fw_event_work_put(fw_event); |
2814 | 2822 | ||
2815 | fw_event_work_put(fw_event); | 2823 | fw_event_work_put(fw_event); |
@@ -3961,7 +3969,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
3961 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | | 3969 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | |
3962 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; | 3970 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; |
3963 | mpi_request->CDB.EEDP32.PrimaryReferenceTag = | 3971 | mpi_request->CDB.EEDP32.PrimaryReferenceTag = |
3964 | cpu_to_be32(scsi_get_lba(scmd)); | 3972 | cpu_to_be32(scsi_prot_ref_tag(scmd)); |
3965 | break; | 3973 | break; |
3966 | 3974 | ||
3967 | case SCSI_PROT_DIF_TYPE3: | 3975 | case SCSI_PROT_DIF_TYPE3: |
@@ -7850,6 +7858,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
7850 | Mpi2EventNotificationReply_t *mpi_reply; | 7858 | Mpi2EventNotificationReply_t *mpi_reply; |
7851 | u16 event; | 7859 | u16 event; |
7852 | u16 sz; | 7860 | u16 sz; |
7861 | Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; | ||
7853 | 7862 | ||
7854 | /* events turned off due to host reset or driver unloading */ | 7863 | /* events turned off due to host reset or driver unloading */ |
7855 | if (ioc->remove_host || ioc->pci_error_recovery) | 7864 | if (ioc->remove_host || ioc->pci_error_recovery) |
@@ -7962,6 +7971,18 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
7962 | (Mpi2EventDataTemperature_t *) | 7971 | (Mpi2EventDataTemperature_t *) |
7963 | mpi_reply->EventData); | 7972 | mpi_reply->EventData); |
7964 | break; | 7973 | break; |
7974 | case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: | ||
7975 | ActiveCableEventData = | ||
7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; | ||
7977 | if (ActiveCableEventData->ReasonCode == | ||
7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) | ||
7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", | ||
7980 | ioc->name, ActiveCableEventData->ReceptacleID); | ||
7981 | pr_info("cannot be powered and devices connected to this active cable"); | ||
7982 | pr_info("will not be seen. This active cable"); | ||
7983 | pr_info("requires %d mW of power", | ||
7984 | ActiveCableEventData->ActiveCablePowerRequirement); | ||
7985 | break; | ||
7965 | 7986 | ||
7966 | default: /* ignore the rest */ | 7987 | default: /* ignore the rest */ |
7967 | return 1; | 7988 | return 1; |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index c7c250519c4b..8280046fd1f0 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -704,24 +704,7 @@ static struct pci_device_id mvs_pci_table[] = { | |||
704 | .class_mask = 0, | 704 | .class_mask = 0, |
705 | .driver_data = chip_9445, | 705 | .driver_data = chip_9445, |
706 | }, | 706 | }, |
707 | { | 707 | { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ |
708 | .vendor = PCI_VENDOR_ID_MARVELL_EXT, | ||
709 | .device = 0x9485, | ||
710 | .subvendor = PCI_ANY_ID, | ||
711 | .subdevice = 0x9480, | ||
712 | .class = 0, | ||
713 | .class_mask = 0, | ||
714 | .driver_data = chip_9485, | ||
715 | }, | ||
716 | { | ||
717 | .vendor = PCI_VENDOR_ID_MARVELL_EXT, | ||
718 | .device = 0x9485, | ||
719 | .subvendor = PCI_ANY_ID, | ||
720 | .subdevice = 0x9485, | ||
721 | .class = 0, | ||
722 | .class_mask = 0, | ||
723 | .driver_data = chip_9485, | ||
724 | }, | ||
725 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ | 708 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ |
726 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | 709 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
727 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | 710 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 512037e27783..2f689ae7a803 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c | |||
@@ -1,5 +1,3 @@ | |||
1 | #define PSEUDO_DMA | ||
2 | |||
3 | /* | 1 | /* |
4 | * This driver adapted from Drew Eckhardt's Trantor T128 driver | 2 | * This driver adapted from Drew Eckhardt's Trantor T128 driver |
5 | * | 3 | * |
@@ -77,7 +75,6 @@ | |||
77 | 75 | ||
78 | #include <scsi/scsi_host.h> | 76 | #include <scsi/scsi_host.h> |
79 | #include "pas16.h" | 77 | #include "pas16.h" |
80 | #define AUTOPROBE_IRQ | ||
81 | #include "NCR5380.h" | 78 | #include "NCR5380.h" |
82 | 79 | ||
83 | 80 | ||
@@ -377,7 +374,7 @@ static int __init pas16_detect(struct scsi_host_template *tpnt) | |||
377 | 374 | ||
378 | instance->io_port = io_port; | 375 | instance->io_port = io_port; |
379 | 376 | ||
380 | if (NCR5380_init(instance, 0)) | 377 | if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) |
381 | goto out_unregister; | 378 | goto out_unregister; |
382 | 379 | ||
383 | NCR5380_maybe_reset_bus(instance); | 380 | NCR5380_maybe_reset_bus(instance); |
@@ -460,7 +457,7 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, | |||
460 | } | 457 | } |
461 | 458 | ||
462 | /* | 459 | /* |
463 | * Function : int NCR5380_pread (struct Scsi_Host *instance, | 460 | * Function : int pas16_pread (struct Scsi_Host *instance, |
464 | * unsigned char *dst, int len) | 461 | * unsigned char *dst, int len) |
465 | * | 462 | * |
466 | * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to | 463 | * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to |
@@ -472,14 +469,14 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, | |||
472 | * timeout. | 469 | * timeout. |
473 | */ | 470 | */ |
474 | 471 | ||
475 | static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, | 472 | static inline int pas16_pread(struct Scsi_Host *instance, |
476 | int len) { | 473 | unsigned char *dst, int len) |
474 | { | ||
477 | register unsigned char *d = dst; | 475 | register unsigned char *d = dst; |
478 | register unsigned short reg = (unsigned short) (instance->io_port + | 476 | register unsigned short reg = (unsigned short) (instance->io_port + |
479 | P_DATA_REG_OFFSET); | 477 | P_DATA_REG_OFFSET); |
480 | register int i = len; | 478 | register int i = len; |
481 | int ii = 0; | 479 | int ii = 0; |
482 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
483 | 480 | ||
484 | while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) | 481 | while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) |
485 | ++ii; | 482 | ++ii; |
@@ -492,13 +489,11 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, | |||
492 | instance->host_no); | 489 | instance->host_no); |
493 | return -1; | 490 | return -1; |
494 | } | 491 | } |
495 | if (ii > hostdata->spin_max_r) | ||
496 | hostdata->spin_max_r = ii; | ||
497 | return 0; | 492 | return 0; |
498 | } | 493 | } |
499 | 494 | ||
500 | /* | 495 | /* |
501 | * Function : int NCR5380_pwrite (struct Scsi_Host *instance, | 496 | * Function : int pas16_pwrite (struct Scsi_Host *instance, |
502 | * unsigned char *src, int len) | 497 | * unsigned char *src, int len) |
503 | * | 498 | * |
504 | * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from | 499 | * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from |
@@ -510,13 +505,13 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, | |||
510 | * timeout. | 505 | * timeout. |
511 | */ | 506 | */ |
512 | 507 | ||
513 | static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, | 508 | static inline int pas16_pwrite(struct Scsi_Host *instance, |
514 | int len) { | 509 | unsigned char *src, int len) |
510 | { | ||
515 | register unsigned char *s = src; | 511 | register unsigned char *s = src; |
516 | register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); | 512 | register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); |
517 | register int i = len; | 513 | register int i = len; |
518 | int ii = 0; | 514 | int ii = 0; |
519 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | ||
520 | 515 | ||
521 | while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) | 516 | while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) |
522 | ++ii; | 517 | ++ii; |
@@ -529,8 +524,6 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src | |||
529 | instance->host_no); | 524 | instance->host_no); |
530 | return -1; | 525 | return -1; |
531 | } | 526 | } |
532 | if (ii > hostdata->spin_max_w) | ||
533 | hostdata->spin_max_w = ii; | ||
534 | return 0; | 527 | return 0; |
535 | } | 528 | } |
536 | 529 | ||
@@ -550,8 +543,6 @@ static struct scsi_host_template driver_template = { | |||
550 | .detect = pas16_detect, | 543 | .detect = pas16_detect, |
551 | .release = pas16_release, | 544 | .release = pas16_release, |
552 | .proc_name = "pas16", | 545 | .proc_name = "pas16", |
553 | .show_info = pas16_show_info, | ||
554 | .write_info = pas16_write_info, | ||
555 | .info = pas16_info, | 546 | .info = pas16_info, |
556 | .queuecommand = pas16_queue_command, | 547 | .queuecommand = pas16_queue_command, |
557 | .eh_abort_handler = pas16_abort, | 548 | .eh_abort_handler = pas16_abort, |
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index d37527717225..9fe7f33660b4 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h | |||
@@ -103,14 +103,15 @@ | |||
103 | #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) ) | 103 | #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) ) |
104 | 104 | ||
105 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) | 105 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) |
106 | #define NCR5380_dma_recv_setup pas16_pread | ||
107 | #define NCR5380_dma_send_setup pas16_pwrite | ||
108 | #define NCR5380_dma_residual(instance) (0) | ||
106 | 109 | ||
107 | #define NCR5380_intr pas16_intr | 110 | #define NCR5380_intr pas16_intr |
108 | #define NCR5380_queue_command pas16_queue_command | 111 | #define NCR5380_queue_command pas16_queue_command |
109 | #define NCR5380_abort pas16_abort | 112 | #define NCR5380_abort pas16_abort |
110 | #define NCR5380_bus_reset pas16_bus_reset | 113 | #define NCR5380_bus_reset pas16_bus_reset |
111 | #define NCR5380_info pas16_info | 114 | #define NCR5380_info pas16_info |
112 | #define NCR5380_show_info pas16_show_info | ||
113 | #define NCR5380_write_info pas16_write_info | ||
114 | 115 | ||
115 | /* 15 14 12 10 7 5 3 | 116 | /* 15 14 12 10 7 5 3 |
116 | 1101 0100 1010 1000 */ | 117 | 1101 0100 1010 1000 */ |
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 062ab34b86f8..6bd7bf4f4a81 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c | |||
@@ -418,8 +418,6 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) | |||
418 | if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { | 418 | if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { |
419 | pm8001_ha->io_mem[logicalBar].membase = | 419 | pm8001_ha->io_mem[logicalBar].membase = |
420 | pci_resource_start(pdev, bar); | 420 | pci_resource_start(pdev, bar); |
421 | pm8001_ha->io_mem[logicalBar].membase &= | ||
422 | (u32)PCI_BASE_ADDRESS_MEM_MASK; | ||
423 | pm8001_ha->io_mem[logicalBar].memsize = | 421 | pm8001_ha->io_mem[logicalBar].memsize = |
424 | pci_resource_len(pdev, bar); | 422 | pci_resource_len(pdev, bar); |
425 | pm8001_ha->io_mem[logicalBar].memvirtaddr = | 423 | pm8001_ha->io_mem[logicalBar].memvirtaddr = |
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index b5029e543b91..15dff7099955 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/ktime.h> | ||
9 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
10 | #include <linux/ratelimit.h> | 11 | #include <linux/ratelimit.h> |
11 | #include <linux/vmalloc.h> | 12 | #include <linux/vmalloc.h> |
@@ -1812,7 +1813,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) | |||
1812 | struct host_system_info *phost_info; | 1813 | struct host_system_info *phost_info; |
1813 | struct register_host_info *preg_hsi; | 1814 | struct register_host_info *preg_hsi; |
1814 | struct new_utsname *p_sysid = NULL; | 1815 | struct new_utsname *p_sysid = NULL; |
1815 | struct timeval tv; | ||
1816 | 1816 | ||
1817 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 1817 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
1818 | if (!sp) | 1818 | if (!sp) |
@@ -1886,8 +1886,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) | |||
1886 | p_sysid->domainname, DOMNAME_LENGTH); | 1886 | p_sysid->domainname, DOMNAME_LENGTH); |
1887 | strncpy(phost_info->hostdriver, | 1887 | strncpy(phost_info->hostdriver, |
1888 | QLA2XXX_VERSION, VERSION_LENGTH); | 1888 | QLA2XXX_VERSION, VERSION_LENGTH); |
1889 | do_gettimeofday(&tv); | 1889 | preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); |
1890 | preg_hsi->utc = (uint64_t)tv.tv_sec; | ||
1891 | ql_dbg(ql_dbg_init, vha, 0x0149, | 1890 | ql_dbg(ql_dbg_init, vha, 0x0149, |
1892 | "ISP%04X: Host registration with firmware\n", | 1891 | "ISP%04X: Host registration with firmware\n", |
1893 | ha->pdev->device); | 1892 | ha->pdev->device); |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index b6b4cfdd7620..54380b434b30 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -1229,7 +1229,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1229 | if (buf == NULL) { | 1229 | if (buf == NULL) { |
1230 | ql_log(ql_log_fatal, vha, 0x010c, | 1230 | ql_log(ql_log_fatal, vha, 0x010c, |
1231 | "Unable to allocate memory.\n"); | 1231 | "Unable to allocate memory.\n"); |
1232 | return -1; | 1232 | return -ENOMEM; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | for (i = 0; i < n; i++) { | 1235 | for (i = 0; i < n; i++) { |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f3d69a98c725..0f9ba41e27d8 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -6,23 +6,15 @@ | |||
6 | * anything out of the ordinary is seen. | 6 | * anything out of the ordinary is seen. |
7 | * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | 7 | * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
8 | * | 8 | * |
9 | * This version is more generic, simulating a variable number of disk | 9 | * Copyright (C) 2001 - 2016 Douglas Gilbert |
10 | * (or disk like devices) sharing a common amount of RAM. To be more | ||
11 | * realistic, the simulated devices have the transport attributes of | ||
12 | * SAS disks. | ||
13 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
14 | * | 15 | * |
15 | * For documentation see http://sg.danny.cz/sg/sdebug26.html | 16 | * For documentation see http://sg.danny.cz/sg/sdebug26.html |
16 | * | 17 | * |
17 | * D. Gilbert (dpg) work for Magneto-Optical device test [20010421] | ||
18 | * dpg: work for devfs large number of disks [20010809] | ||
19 | * forked for lk 2.5 series [20011216, 20020101] | ||
20 | * use vmalloc() more inquiry+mode_sense [20020302] | ||
21 | * add timers for delayed responses [20020721] | ||
22 | * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031] | ||
23 | * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118] | ||
24 | * dpg: change style of boot options to "scsi_debug.num_tgts=2" and | ||
25 | * module options to "modprobe scsi_debug num_tgts=2" [20021221] | ||
26 | */ | 18 | */ |
27 | 19 | ||
28 | 20 | ||
@@ -32,7 +24,7 @@ | |||
32 | 24 | ||
33 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
34 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
35 | #include <linux/timer.h> | 27 | #include <linux/jiffies.h> |
36 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
37 | #include <linux/types.h> | 29 | #include <linux/types.h> |
38 | #include <linux/string.h> | 30 | #include <linux/string.h> |
@@ -49,6 +41,7 @@ | |||
49 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
50 | #include <linux/atomic.h> | 42 | #include <linux/atomic.h> |
51 | #include <linux/hrtimer.h> | 43 | #include <linux/hrtimer.h> |
44 | #include <linux/uuid.h> | ||
52 | 45 | ||
53 | #include <net/checksum.h> | 46 | #include <net/checksum.h> |
54 | 47 | ||
@@ -66,8 +59,9 @@ | |||
66 | #include "sd.h" | 59 | #include "sd.h" |
67 | #include "scsi_logging.h" | 60 | #include "scsi_logging.h" |
68 | 61 | ||
69 | #define SCSI_DEBUG_VERSION "1.85" | 62 | /* make sure inq_product_rev string corresponds to this version */ |
70 | static const char *scsi_debug_version_date = "20141022"; | 63 | #define SDEBUG_VERSION "1.86" |
64 | static const char *sdebug_version_date = "20160430"; | ||
71 | 65 | ||
72 | #define MY_NAME "scsi_debug" | 66 | #define MY_NAME "scsi_debug" |
73 | 67 | ||
@@ -102,7 +96,6 @@ static const char *scsi_debug_version_date = "20141022"; | |||
102 | /* Additional Sense Code Qualifier (ASCQ) */ | 96 | /* Additional Sense Code Qualifier (ASCQ) */ |
103 | #define ACK_NAK_TO 0x3 | 97 | #define ACK_NAK_TO 0x3 |
104 | 98 | ||
105 | |||
106 | /* Default values for driver parameters */ | 99 | /* Default values for driver parameters */ |
107 | #define DEF_NUM_HOST 1 | 100 | #define DEF_NUM_HOST 1 |
108 | #define DEF_NUM_TGTS 1 | 101 | #define DEF_NUM_TGTS 1 |
@@ -111,7 +104,7 @@ static const char *scsi_debug_version_date = "20141022"; | |||
111 | * (id 0) containing 1 logical unit (lun 0). That is 1 device. | 104 | * (id 0) containing 1 logical unit (lun 0). That is 1 device. |
112 | */ | 105 | */ |
113 | #define DEF_ATO 1 | 106 | #define DEF_ATO 1 |
114 | #define DEF_DELAY 1 /* if > 0 unit is a jiffy */ | 107 | #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ |
115 | #define DEF_DEV_SIZE_MB 8 | 108 | #define DEF_DEV_SIZE_MB 8 |
116 | #define DEF_DIF 0 | 109 | #define DEF_DIF 0 |
117 | #define DEF_DIX 0 | 110 | #define DEF_DIX 0 |
@@ -131,9 +124,9 @@ static const char *scsi_debug_version_date = "20141022"; | |||
131 | #define DEF_OPTS 0 | 124 | #define DEF_OPTS 0 |
132 | #define DEF_OPT_BLKS 1024 | 125 | #define DEF_OPT_BLKS 1024 |
133 | #define DEF_PHYSBLK_EXP 0 | 126 | #define DEF_PHYSBLK_EXP 0 |
134 | #define DEF_PTYPE 0 | 127 | #define DEF_PTYPE TYPE_DISK |
135 | #define DEF_REMOVABLE false | 128 | #define DEF_REMOVABLE false |
136 | #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ | 129 | #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ |
137 | #define DEF_SECTOR_SIZE 512 | 130 | #define DEF_SECTOR_SIZE 512 |
138 | #define DEF_UNMAP_ALIGNMENT 0 | 131 | #define DEF_UNMAP_ALIGNMENT 0 |
139 | #define DEF_UNMAP_GRANULARITY 1 | 132 | #define DEF_UNMAP_GRANULARITY 1 |
@@ -143,43 +136,54 @@ static const char *scsi_debug_version_date = "20141022"; | |||
143 | #define DEF_VPD_USE_HOSTNO 1 | 136 | #define DEF_VPD_USE_HOSTNO 1 |
144 | #define DEF_WRITESAME_LENGTH 0xFFFF | 137 | #define DEF_WRITESAME_LENGTH 0xFFFF |
145 | #define DEF_STRICT 0 | 138 | #define DEF_STRICT 0 |
146 | #define DELAY_OVERRIDDEN -9999 | 139 | #define DEF_STATISTICS false |
147 | 140 | #define DEF_SUBMIT_QUEUES 1 | |
148 | /* bit mask values for scsi_debug_opts */ | 141 | #define DEF_UUID_CTL 0 |
149 | #define SCSI_DEBUG_OPT_NOISE 1 | 142 | #define JDELAY_OVERRIDDEN -9999 |
150 | #define SCSI_DEBUG_OPT_MEDIUM_ERR 2 | 143 | |
151 | #define SCSI_DEBUG_OPT_TIMEOUT 4 | 144 | #define SDEBUG_LUN_0_VAL 0 |
152 | #define SCSI_DEBUG_OPT_RECOVERED_ERR 8 | 145 | |
153 | #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 | 146 | /* bit mask values for sdebug_opts */ |
154 | #define SCSI_DEBUG_OPT_DIF_ERR 32 | 147 | #define SDEBUG_OPT_NOISE 1 |
155 | #define SCSI_DEBUG_OPT_DIX_ERR 64 | 148 | #define SDEBUG_OPT_MEDIUM_ERR 2 |
156 | #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 | 149 | #define SDEBUG_OPT_TIMEOUT 4 |
157 | #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100 | 150 | #define SDEBUG_OPT_RECOVERED_ERR 8 |
158 | #define SCSI_DEBUG_OPT_Q_NOISE 0x200 | 151 | #define SDEBUG_OPT_TRANSPORT_ERR 16 |
159 | #define SCSI_DEBUG_OPT_ALL_TSF 0x400 | 152 | #define SDEBUG_OPT_DIF_ERR 32 |
160 | #define SCSI_DEBUG_OPT_RARE_TSF 0x800 | 153 | #define SDEBUG_OPT_DIX_ERR 64 |
161 | #define SCSI_DEBUG_OPT_N_WCE 0x1000 | 154 | #define SDEBUG_OPT_MAC_TIMEOUT 128 |
162 | #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000 | 155 | #define SDEBUG_OPT_SHORT_TRANSFER 0x100 |
163 | #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000 | 156 | #define SDEBUG_OPT_Q_NOISE 0x200 |
164 | #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000) | 157 | #define SDEBUG_OPT_ALL_TSF 0x400 |
158 | #define SDEBUG_OPT_RARE_TSF 0x800 | ||
159 | #define SDEBUG_OPT_N_WCE 0x1000 | ||
160 | #define SDEBUG_OPT_RESET_NOISE 0x2000 | ||
161 | #define SDEBUG_OPT_NO_CDB_NOISE 0x4000 | ||
162 | #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ | ||
163 | SDEBUG_OPT_RESET_NOISE) | ||
164 | #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ | ||
165 | SDEBUG_OPT_TRANSPORT_ERR | \ | ||
166 | SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ | ||
167 | SDEBUG_OPT_SHORT_TRANSFER) | ||
165 | /* When "every_nth" > 0 then modulo "every_nth" commands: | 168 | /* When "every_nth" > 0 then modulo "every_nth" commands: |
166 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set | 169 | * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set |
167 | * - a RECOVERED_ERROR is simulated on successful read and write | 170 | * - a RECOVERED_ERROR is simulated on successful read and write |
168 | * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. | 171 | * commands if SDEBUG_OPT_RECOVERED_ERR is set. |
169 | * - a TRANSPORT_ERROR is simulated on successful read and write | 172 | * - a TRANSPORT_ERROR is simulated on successful read and write |
170 | * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. | 173 | * commands if SDEBUG_OPT_TRANSPORT_ERR is set. |
171 | * | 174 | * |
172 | * When "every_nth" < 0 then after "- every_nth" commands: | 175 | * When "every_nth" < 0 then after "- every_nth" commands: |
173 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set | 176 | * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set |
174 | * - a RECOVERED_ERROR is simulated on successful read and write | 177 | * - a RECOVERED_ERROR is simulated on successful read and write |
175 | * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. | 178 | * commands if SDEBUG_OPT_RECOVERED_ERR is set. |
176 | * - a TRANSPORT_ERROR is simulated on successful read and write | 179 | * - a TRANSPORT_ERROR is simulated on successful read and write |
177 | * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. | 180 | * commands if _DEBUG_OPT_TRANSPORT_ERR is set. |
178 | * This will continue until some other action occurs (e.g. the user | 181 | * This will continue on every subsequent command until some other action |
179 | * writing a new value (other than -1 or 1) to every_nth via sysfs). | 182 | * occurs (e.g. the user * writing a new value (other than -1 or 1) to |
183 | * every_nth via sysfs). | ||
180 | */ | 184 | */ |
181 | 185 | ||
182 | /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in | 186 | /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in |
183 | * priority order. In the subset implemented here lower numbers have higher | 187 | * priority order. In the subset implemented here lower numbers have higher |
184 | * priority. The UA numbers should be a sequence starting from 0 with | 188 | * priority. The UA numbers should be a sequence starting from 0 with |
185 | * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ | 189 | * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ |
@@ -192,11 +196,7 @@ static const char *scsi_debug_version_date = "20141022"; | |||
192 | #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 | 196 | #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 |
193 | #define SDEBUG_NUM_UAS 7 | 197 | #define SDEBUG_NUM_UAS 7 |
194 | 198 | ||
195 | /* for check_readiness() */ | 199 | /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this |
196 | #define UAS_ONLY 1 /* check for UAs only */ | ||
197 | #define UAS_TUR 0 /* if no UAs then check if media access possible */ | ||
198 | |||
199 | /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this | ||
200 | * sector on read commands: */ | 200 | * sector on read commands: */ |
201 | #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ | 201 | #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ |
202 | #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ | 202 | #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ |
@@ -205,21 +205,108 @@ static const char *scsi_debug_version_date = "20141022"; | |||
205 | * or "peripheral device" addressing (value 0) */ | 205 | * or "peripheral device" addressing (value 0) */ |
206 | #define SAM2_LUN_ADDRESS_METHOD 0 | 206 | #define SAM2_LUN_ADDRESS_METHOD 0 |
207 | 207 | ||
208 | /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued | 208 | /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued |
209 | * (for response) at one time. Can be reduced by max_queue option. Command | 209 | * (for response) per submit queue at one time. Can be reduced by max_queue |
210 | * responses are not queued when delay=0 and ndelay=0. The per-device | 210 | * option. Command responses are not queued when jdelay=0 and ndelay=0. The |
211 | * DEF_CMD_PER_LUN can be changed via sysfs: | 211 | * per-device DEF_CMD_PER_LUN can be changed via sysfs: |
212 | * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed | 212 | * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth |
213 | * SCSI_DEBUG_CANQUEUE. */ | 213 | * but cannot exceed SDEBUG_CANQUEUE . |
214 | #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */ | 214 | */ |
215 | #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG) | 215 | #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ |
216 | #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) | ||
216 | #define DEF_CMD_PER_LUN 255 | 217 | #define DEF_CMD_PER_LUN 255 |
217 | 218 | ||
218 | #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE | 219 | #define F_D_IN 1 |
219 | #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" | 220 | #define F_D_OUT 2 |
220 | #endif | 221 | #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ |
222 | #define F_D_UNKN 8 | ||
223 | #define F_RL_WLUN_OK 0x10 | ||
224 | #define F_SKIP_UA 0x20 | ||
225 | #define F_DELAY_OVERR 0x40 | ||
226 | #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ | ||
227 | #define F_SA_HIGH 0x100 /* as used by variable length cdbs */ | ||
228 | #define F_INV_OP 0x200 | ||
229 | #define F_FAKE_RW 0x400 | ||
230 | #define F_M_ACCESS 0x800 /* media access */ | ||
231 | |||
232 | #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) | ||
233 | #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) | ||
234 | #define FF_SA (F_SA_HIGH | F_SA_LOW) | ||
235 | |||
236 | #define SDEBUG_MAX_PARTS 4 | ||
237 | |||
238 | #define SDEBUG_MAX_CMD_LEN 32 | ||
239 | |||
240 | |||
241 | struct sdebug_dev_info { | ||
242 | struct list_head dev_list; | ||
243 | unsigned int channel; | ||
244 | unsigned int target; | ||
245 | u64 lun; | ||
246 | uuid_be lu_name; | ||
247 | struct sdebug_host_info *sdbg_host; | ||
248 | unsigned long uas_bm[1]; | ||
249 | atomic_t num_in_q; | ||
250 | atomic_t stopped; | ||
251 | bool used; | ||
252 | }; | ||
253 | |||
254 | struct sdebug_host_info { | ||
255 | struct list_head host_list; | ||
256 | struct Scsi_Host *shost; | ||
257 | struct device dev; | ||
258 | struct list_head dev_info_list; | ||
259 | }; | ||
260 | |||
261 | #define to_sdebug_host(d) \ | ||
262 | container_of(d, struct sdebug_host_info, dev) | ||
263 | |||
264 | struct sdebug_defer { | ||
265 | struct hrtimer hrt; | ||
266 | struct execute_work ew; | ||
267 | int sqa_idx; /* index of sdebug_queue array */ | ||
268 | int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ | ||
269 | int issuing_cpu; | ||
270 | }; | ||
271 | |||
272 | struct sdebug_queued_cmd { | ||
273 | /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue | ||
274 | * instance indicates this slot is in use. | ||
275 | */ | ||
276 | struct sdebug_defer *sd_dp; | ||
277 | struct scsi_cmnd *a_cmnd; | ||
278 | unsigned int inj_recovered:1; | ||
279 | unsigned int inj_transport:1; | ||
280 | unsigned int inj_dif:1; | ||
281 | unsigned int inj_dix:1; | ||
282 | unsigned int inj_short:1; | ||
283 | }; | ||
284 | |||
285 | struct sdebug_queue { | ||
286 | struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE]; | ||
287 | unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS]; | ||
288 | spinlock_t qc_lock; | ||
289 | atomic_t blocked; /* to temporarily stop more being queued */ | ||
290 | }; | ||
291 | |||
292 | static atomic_t sdebug_cmnd_count; /* number of incoming commands */ | ||
293 | static atomic_t sdebug_completions; /* count of deferred completions */ | ||
294 | static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ | ||
295 | static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ | ||
296 | |||
297 | struct opcode_info_t { | ||
298 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ | ||
299 | /* for terminating element */ | ||
300 | u8 opcode; /* if num_attached > 0, preferred */ | ||
301 | u16 sa; /* service action */ | ||
302 | u32 flags; /* OR-ed set of SDEB_F_* */ | ||
303 | int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); | ||
304 | const struct opcode_info_t *arrp; /* num_attached elements or NULL */ | ||
305 | u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ | ||
306 | /* ignore cdb bytes after position 15 */ | ||
307 | }; | ||
221 | 308 | ||
222 | /* SCSI opcodes (first byte of cdb) mapped onto these indexes */ | 309 | /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ |
223 | enum sdeb_opcode_index { | 310 | enum sdeb_opcode_index { |
224 | SDEB_I_INVALID_OPCODE = 0, | 311 | SDEB_I_INVALID_OPCODE = 0, |
225 | SDEB_I_INQUIRY = 1, | 312 | SDEB_I_INQUIRY = 1, |
@@ -254,6 +341,7 @@ enum sdeb_opcode_index { | |||
254 | SDEB_I_LAST_ELEMENT = 30, /* keep this last */ | 341 | SDEB_I_LAST_ELEMENT = 30, /* keep this last */ |
255 | }; | 342 | }; |
256 | 343 | ||
344 | |||
257 | static const unsigned char opcode_ind_arr[256] = { | 345 | static const unsigned char opcode_ind_arr[256] = { |
258 | /* 0x0; 0x0->0x1f: 6 byte cdbs */ | 346 | /* 0x0; 0x0->0x1f: 6 byte cdbs */ |
259 | SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, | 347 | SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, |
@@ -274,7 +362,7 @@ static const unsigned char opcode_ind_arr[256] = { | |||
274 | 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, | 362 | 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, |
275 | SDEB_I_RELEASE, | 363 | SDEB_I_RELEASE, |
276 | 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, | 364 | 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, |
277 | /* 0x60; 0x60->0x7d are reserved */ | 365 | /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ |
278 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 366 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
279 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 367 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
280 | 0, SDEB_I_VARIABLE_LEN, | 368 | 0, SDEB_I_VARIABLE_LEN, |
@@ -297,24 +385,6 @@ static const unsigned char opcode_ind_arr[256] = { | |||
297 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 385 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
298 | }; | 386 | }; |
299 | 387 | ||
300 | #define F_D_IN 1 | ||
301 | #define F_D_OUT 2 | ||
302 | #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ | ||
303 | #define F_D_UNKN 8 | ||
304 | #define F_RL_WLUN_OK 0x10 | ||
305 | #define F_SKIP_UA 0x20 | ||
306 | #define F_DELAY_OVERR 0x40 | ||
307 | #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ | ||
308 | #define F_SA_HIGH 0x100 /* as used by variable length cdbs */ | ||
309 | #define F_INV_OP 0x200 | ||
310 | #define F_FAKE_RW 0x400 | ||
311 | #define F_M_ACCESS 0x800 /* media access */ | ||
312 | |||
313 | #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) | ||
314 | #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) | ||
315 | #define FF_SA (F_SA_HIGH | F_SA_LOW) | ||
316 | |||
317 | struct sdebug_dev_info; | ||
318 | static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); | 388 | static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); |
319 | static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); | 389 | static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); |
320 | static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); | 390 | static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); |
@@ -337,18 +407,6 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); | |||
337 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); | 407 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); |
338 | static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); | 408 | static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); |
339 | 409 | ||
340 | struct opcode_info_t { | ||
341 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff | ||
342 | * for terminating element */ | ||
343 | u8 opcode; /* if num_attached > 0, preferred */ | ||
344 | u16 sa; /* service action */ | ||
345 | u32 flags; /* OR-ed set of SDEB_F_* */ | ||
346 | int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); | ||
347 | const struct opcode_info_t *arrp; /* num_attached elements or NULL */ | ||
348 | u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ | ||
349 | /* ignore cdb bytes after position 15 */ | ||
350 | }; | ||
351 | |||
352 | static const struct opcode_info_t msense_iarr[1] = { | 410 | static const struct opcode_info_t msense_iarr[1] = { |
353 | {0, 0x1a, 0, F_D_IN, NULL, NULL, | 411 | {0, 0x1a, 0, F_D_IN, NULL, NULL, |
354 | {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 412 | {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, |
@@ -509,61 +567,52 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { | |||
509 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 567 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, |
510 | }; | 568 | }; |
511 | 569 | ||
512 | struct sdebug_scmd_extra_t { | 570 | static int sdebug_add_host = DEF_NUM_HOST; |
513 | bool inj_recovered; | 571 | static int sdebug_ato = DEF_ATO; |
514 | bool inj_transport; | 572 | static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ |
515 | bool inj_dif; | 573 | static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB; |
516 | bool inj_dix; | 574 | static int sdebug_dif = DEF_DIF; |
517 | bool inj_short; | 575 | static int sdebug_dix = DEF_DIX; |
518 | }; | 576 | static int sdebug_dsense = DEF_D_SENSE; |
519 | 577 | static int sdebug_every_nth = DEF_EVERY_NTH; | |
520 | static int scsi_debug_add_host = DEF_NUM_HOST; | 578 | static int sdebug_fake_rw = DEF_FAKE_RW; |
521 | static int scsi_debug_ato = DEF_ATO; | 579 | static unsigned int sdebug_guard = DEF_GUARD; |
522 | static int scsi_debug_delay = DEF_DELAY; | 580 | static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED; |
523 | static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; | 581 | static int sdebug_max_luns = DEF_MAX_LUNS; |
524 | static int scsi_debug_dif = DEF_DIF; | 582 | static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */ |
525 | static int scsi_debug_dix = DEF_DIX; | ||
526 | static int scsi_debug_dsense = DEF_D_SENSE; | ||
527 | static int scsi_debug_every_nth = DEF_EVERY_NTH; | ||
528 | static int scsi_debug_fake_rw = DEF_FAKE_RW; | ||
529 | static unsigned int scsi_debug_guard = DEF_GUARD; | ||
530 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; | ||
531 | static int scsi_debug_max_luns = DEF_MAX_LUNS; | ||
532 | static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; | ||
533 | static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ | 583 | static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ |
534 | static int scsi_debug_ndelay = DEF_NDELAY; | 584 | static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */ |
535 | static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; | 585 | static int sdebug_no_lun_0 = DEF_NO_LUN_0; |
536 | static int scsi_debug_no_uld = 0; | 586 | static int sdebug_no_uld; |
537 | static int scsi_debug_num_parts = DEF_NUM_PARTS; | 587 | static int sdebug_num_parts = DEF_NUM_PARTS; |
538 | static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ | 588 | static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ |
539 | static int scsi_debug_opt_blks = DEF_OPT_BLKS; | 589 | static int sdebug_opt_blks = DEF_OPT_BLKS; |
540 | static int scsi_debug_opts = DEF_OPTS; | 590 | static int sdebug_opts = DEF_OPTS; |
541 | static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; | 591 | static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; |
542 | static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ | 592 | static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ |
543 | static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; | 593 | static int sdebug_scsi_level = DEF_SCSI_LEVEL; |
544 | static int scsi_debug_sector_size = DEF_SECTOR_SIZE; | 594 | static int sdebug_sector_size = DEF_SECTOR_SIZE; |
545 | static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; | 595 | static int sdebug_virtual_gb = DEF_VIRTUAL_GB; |
546 | static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; | 596 | static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; |
547 | static unsigned int scsi_debug_lbpu = DEF_LBPU; | 597 | static unsigned int sdebug_lbpu = DEF_LBPU; |
548 | static unsigned int scsi_debug_lbpws = DEF_LBPWS; | 598 | static unsigned int sdebug_lbpws = DEF_LBPWS; |
549 | static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; | 599 | static unsigned int sdebug_lbpws10 = DEF_LBPWS10; |
550 | static unsigned int scsi_debug_lbprz = DEF_LBPRZ; | 600 | static unsigned int sdebug_lbprz = DEF_LBPRZ; |
551 | static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; | 601 | static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT; |
552 | static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; | 602 | static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; |
553 | static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; | 603 | static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; |
554 | static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; | 604 | static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; |
555 | static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; | 605 | static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; |
556 | static bool scsi_debug_removable = DEF_REMOVABLE; | 606 | static int sdebug_uuid_ctl = DEF_UUID_CTL; |
557 | static bool scsi_debug_clustering; | 607 | static bool sdebug_removable = DEF_REMOVABLE; |
558 | static bool scsi_debug_host_lock = DEF_HOST_LOCK; | 608 | static bool sdebug_clustering; |
559 | static bool scsi_debug_strict = DEF_STRICT; | 609 | static bool sdebug_host_lock = DEF_HOST_LOCK; |
610 | static bool sdebug_strict = DEF_STRICT; | ||
560 | static bool sdebug_any_injecting_opt; | 611 | static bool sdebug_any_injecting_opt; |
561 | 612 | static bool sdebug_verbose; | |
562 | static atomic_t sdebug_cmnd_count; | 613 | static bool have_dif_prot; |
563 | static atomic_t sdebug_completions; | 614 | static bool sdebug_statistics = DEF_STATISTICS; |
564 | static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */ | 615 | static bool sdebug_mq_active; |
565 | |||
566 | #define DEV_READONLY(TGT) (0) | ||
567 | 616 | ||
568 | static unsigned int sdebug_store_sectors; | 617 | static unsigned int sdebug_store_sectors; |
569 | static sector_t sdebug_capacity; /* in sectors */ | 618 | static sector_t sdebug_capacity; /* in sectors */ |
@@ -574,59 +623,10 @@ static int sdebug_heads; /* heads per disk */ | |||
574 | static int sdebug_cylinders_per; /* cylinders per surface */ | 623 | static int sdebug_cylinders_per; /* cylinders per surface */ |
575 | static int sdebug_sectors_per; /* sectors per cylinder */ | 624 | static int sdebug_sectors_per; /* sectors per cylinder */ |
576 | 625 | ||
577 | #define SDEBUG_MAX_PARTS 4 | ||
578 | |||
579 | #define SCSI_DEBUG_MAX_CMD_LEN 32 | ||
580 | |||
581 | static unsigned int scsi_debug_lbp(void) | ||
582 | { | ||
583 | return ((0 == scsi_debug_fake_rw) && | ||
584 | (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10)); | ||
585 | } | ||
586 | |||
587 | struct sdebug_dev_info { | ||
588 | struct list_head dev_list; | ||
589 | unsigned int channel; | ||
590 | unsigned int target; | ||
591 | u64 lun; | ||
592 | struct sdebug_host_info *sdbg_host; | ||
593 | unsigned long uas_bm[1]; | ||
594 | atomic_t num_in_q; | ||
595 | char stopped; /* TODO: should be atomic */ | ||
596 | bool used; | ||
597 | }; | ||
598 | |||
599 | struct sdebug_host_info { | ||
600 | struct list_head host_list; | ||
601 | struct Scsi_Host *shost; | ||
602 | struct device dev; | ||
603 | struct list_head dev_info_list; | ||
604 | }; | ||
605 | |||
606 | #define to_sdebug_host(d) \ | ||
607 | container_of(d, struct sdebug_host_info, dev) | ||
608 | |||
609 | static LIST_HEAD(sdebug_host_list); | 626 | static LIST_HEAD(sdebug_host_list); |
610 | static DEFINE_SPINLOCK(sdebug_host_list_lock); | 627 | static DEFINE_SPINLOCK(sdebug_host_list_lock); |
611 | 628 | ||
612 | 629 | static unsigned char *fake_storep; /* ramdisk storage */ | |
613 | struct sdebug_hrtimer { /* ... is derived from hrtimer */ | ||
614 | struct hrtimer hrt; /* must be first element */ | ||
615 | int qa_indx; | ||
616 | }; | ||
617 | |||
618 | struct sdebug_queued_cmd { | ||
619 | /* in_use flagged by a bit in queued_in_use_bm[] */ | ||
620 | struct timer_list *cmnd_timerp; | ||
621 | struct tasklet_struct *tletp; | ||
622 | struct sdebug_hrtimer *sd_hrtp; | ||
623 | struct scsi_cmnd * a_cmnd; | ||
624 | }; | ||
625 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; | ||
626 | static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS]; | ||
627 | |||
628 | |||
629 | static unsigned char * fake_storep; /* ramdisk storage */ | ||
630 | static struct sd_dif_tuple *dif_storep; /* protection info */ | 630 | static struct sd_dif_tuple *dif_storep; /* protection info */ |
631 | static void *map_storep; /* provisioning map */ | 631 | static void *map_storep; /* provisioning map */ |
632 | 632 | ||
@@ -640,7 +640,9 @@ static int dix_writes; | |||
640 | static int dix_reads; | 640 | static int dix_reads; |
641 | static int dif_errors; | 641 | static int dif_errors; |
642 | 642 | ||
643 | static DEFINE_SPINLOCK(queued_arr_lock); | 643 | static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ |
644 | static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */ | ||
645 | |||
644 | static DEFINE_RWLOCK(atomic_rw); | 646 | static DEFINE_RWLOCK(atomic_rw); |
645 | 647 | ||
646 | static char sdebug_proc_name[] = MY_NAME; | 648 | static char sdebug_proc_name[] = MY_NAME; |
@@ -662,19 +664,22 @@ static const int illegal_condition_result = | |||
662 | static const int device_qfull_result = | 664 | static const int device_qfull_result = |
663 | (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL; | 665 | (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL; |
664 | 666 | ||
665 | static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | 667 | |
666 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, | 668 | /* Only do the extra work involved in logical block provisioning if one or |
667 | 0, 0, 0, 0}; | 669 | * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing |
668 | static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, | 670 | * real reads and writes (i.e. not skipping them for speed). |
669 | 0, 0, 0x2, 0x4b}; | 671 | */ |
670 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, | 672 | static inline bool scsi_debug_lbp(void) |
671 | 0, 0, 0x0, 0x0}; | 673 | { |
674 | return 0 == sdebug_fake_rw && | ||
675 | (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); | ||
676 | } | ||
672 | 677 | ||
673 | static void *fake_store(unsigned long long lba) | 678 | static void *fake_store(unsigned long long lba) |
674 | { | 679 | { |
675 | lba = do_div(lba, sdebug_store_sectors); | 680 | lba = do_div(lba, sdebug_store_sectors); |
676 | 681 | ||
677 | return fake_storep + lba * scsi_debug_sector_size; | 682 | return fake_storep + lba * sdebug_sector_size; |
678 | } | 683 | } |
679 | 684 | ||
680 | static struct sd_dif_tuple *dif_store(sector_t sector) | 685 | static struct sd_dif_tuple *dif_store(sector_t sector) |
@@ -684,9 +689,6 @@ static struct sd_dif_tuple *dif_store(sector_t sector) | |||
684 | return dif_storep + sector; | 689 | return dif_storep + sector; |
685 | } | 690 | } |
686 | 691 | ||
687 | static int sdebug_add_adapter(void); | ||
688 | static void sdebug_remove_adapter(void); | ||
689 | |||
690 | static void sdebug_max_tgts_luns(void) | 692 | static void sdebug_max_tgts_luns(void) |
691 | { | 693 | { |
692 | struct sdebug_host_info *sdbg_host; | 694 | struct sdebug_host_info *sdbg_host; |
@@ -696,11 +698,11 @@ static void sdebug_max_tgts_luns(void) | |||
696 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { | 698 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { |
697 | hpnt = sdbg_host->shost; | 699 | hpnt = sdbg_host->shost; |
698 | if ((hpnt->this_id >= 0) && | 700 | if ((hpnt->this_id >= 0) && |
699 | (scsi_debug_num_tgts > hpnt->this_id)) | 701 | (sdebug_num_tgts > hpnt->this_id)) |
700 | hpnt->max_id = scsi_debug_num_tgts + 1; | 702 | hpnt->max_id = sdebug_num_tgts + 1; |
701 | else | 703 | else |
702 | hpnt->max_id = scsi_debug_num_tgts; | 704 | hpnt->max_id = sdebug_num_tgts; |
703 | /* scsi_debug_max_luns; */ | 705 | /* sdebug_max_luns; */ |
704 | hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; | 706 | hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; |
705 | } | 707 | } |
706 | spin_unlock(&sdebug_host_list_lock); | 708 | spin_unlock(&sdebug_host_list_lock); |
@@ -709,9 +711,9 @@ static void sdebug_max_tgts_luns(void) | |||
709 | enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; | 711 | enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; |
710 | 712 | ||
711 | /* Set in_bit to -1 to indicate no bit position of invalid field */ | 713 | /* Set in_bit to -1 to indicate no bit position of invalid field */ |
712 | static void | 714 | static void mk_sense_invalid_fld(struct scsi_cmnd *scp, |
713 | mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, | 715 | enum sdeb_cmd_data c_d, |
714 | int in_byte, int in_bit) | 716 | int in_byte, int in_bit) |
715 | { | 717 | { |
716 | unsigned char *sbuff; | 718 | unsigned char *sbuff; |
717 | u8 sks[4]; | 719 | u8 sks[4]; |
@@ -725,8 +727,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, | |||
725 | } | 727 | } |
726 | asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; | 728 | asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; |
727 | memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); | 729 | memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); |
728 | scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST, | 730 | scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0); |
729 | asc, 0); | ||
730 | memset(sks, 0, sizeof(sks)); | 731 | memset(sks, 0, sizeof(sks)); |
731 | sks[0] = 0x80; | 732 | sks[0] = 0x80; |
732 | if (c_d) | 733 | if (c_d) |
@@ -736,7 +737,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, | |||
736 | sks[0] |= 0x7 & in_bit; | 737 | sks[0] |= 0x7 & in_bit; |
737 | } | 738 | } |
738 | put_unaligned_be16(in_byte, sks + 1); | 739 | put_unaligned_be16(in_byte, sks + 1); |
739 | if (scsi_debug_dsense) { | 740 | if (sdebug_dsense) { |
740 | sl = sbuff[7] + 8; | 741 | sl = sbuff[7] + 8; |
741 | sbuff[7] = sl; | 742 | sbuff[7] = sl; |
742 | sbuff[sl] = 0x2; | 743 | sbuff[sl] = 0x2; |
@@ -744,7 +745,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, | |||
744 | memcpy(sbuff + sl + 4, sks, 3); | 745 | memcpy(sbuff + sl + 4, sks, 3); |
745 | } else | 746 | } else |
746 | memcpy(sbuff + 15, sks, 3); | 747 | memcpy(sbuff + 15, sks, 3); |
747 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 748 | if (sdebug_verbose) |
748 | sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" | 749 | sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" |
749 | "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", | 750 | "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", |
750 | my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); | 751 | my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); |
@@ -762,23 +763,22 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) | |||
762 | } | 763 | } |
763 | memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); | 764 | memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); |
764 | 765 | ||
765 | scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); | 766 | scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq); |
766 | 767 | ||
767 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 768 | if (sdebug_verbose) |
768 | sdev_printk(KERN_INFO, scp->device, | 769 | sdev_printk(KERN_INFO, scp->device, |
769 | "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", | 770 | "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", |
770 | my_name, key, asc, asq); | 771 | my_name, key, asc, asq); |
771 | } | 772 | } |
772 | 773 | ||
773 | static void | 774 | static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) |
774 | mk_sense_invalid_opcode(struct scsi_cmnd *scp) | ||
775 | { | 775 | { |
776 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); | 776 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); |
777 | } | 777 | } |
778 | 778 | ||
779 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | 779 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
780 | { | 780 | { |
781 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { | 781 | if (sdebug_verbose) { |
782 | if (0x1261 == cmd) | 782 | if (0x1261 == cmd) |
783 | sdev_printk(KERN_INFO, dev, | 783 | sdev_printk(KERN_INFO, dev, |
784 | "%s: BLKFLSBUF [0x1261]\n", __func__); | 784 | "%s: BLKFLSBUF [0x1261]\n", __func__); |
@@ -810,11 +810,9 @@ static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) | |||
810 | spin_unlock(&sdebug_host_list_lock); | 810 | spin_unlock(&sdebug_host_list_lock); |
811 | } | 811 | } |
812 | 812 | ||
813 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | 813 | static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
814 | struct sdebug_dev_info * devip) | ||
815 | { | 814 | { |
816 | int k; | 815 | int k; |
817 | bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); | ||
818 | 816 | ||
819 | k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); | 817 | k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); |
820 | if (k != SDEBUG_NUM_UAS) { | 818 | if (k != SDEBUG_NUM_UAS) { |
@@ -822,40 +820,41 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | |||
822 | 820 | ||
823 | switch (k) { | 821 | switch (k) { |
824 | case SDEBUG_UA_POR: | 822 | case SDEBUG_UA_POR: |
825 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 823 | mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, |
826 | UA_RESET_ASC, POWER_ON_RESET_ASCQ); | 824 | POWER_ON_RESET_ASCQ); |
827 | if (debug) | 825 | if (sdebug_verbose) |
828 | cp = "power on reset"; | 826 | cp = "power on reset"; |
829 | break; | 827 | break; |
830 | case SDEBUG_UA_BUS_RESET: | 828 | case SDEBUG_UA_BUS_RESET: |
831 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 829 | mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, |
832 | UA_RESET_ASC, BUS_RESET_ASCQ); | 830 | BUS_RESET_ASCQ); |
833 | if (debug) | 831 | if (sdebug_verbose) |
834 | cp = "bus reset"; | 832 | cp = "bus reset"; |
835 | break; | 833 | break; |
836 | case SDEBUG_UA_MODE_CHANGED: | 834 | case SDEBUG_UA_MODE_CHANGED: |
837 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 835 | mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, |
838 | UA_CHANGED_ASC, MODE_CHANGED_ASCQ); | 836 | MODE_CHANGED_ASCQ); |
839 | if (debug) | 837 | if (sdebug_verbose) |
840 | cp = "mode parameters changed"; | 838 | cp = "mode parameters changed"; |
841 | break; | 839 | break; |
842 | case SDEBUG_UA_CAPACITY_CHANGED: | 840 | case SDEBUG_UA_CAPACITY_CHANGED: |
843 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 841 | mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, |
844 | UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); | 842 | CAPACITY_CHANGED_ASCQ); |
845 | if (debug) | 843 | if (sdebug_verbose) |
846 | cp = "capacity data changed"; | 844 | cp = "capacity data changed"; |
847 | break; | 845 | break; |
848 | case SDEBUG_UA_MICROCODE_CHANGED: | 846 | case SDEBUG_UA_MICROCODE_CHANGED: |
849 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 847 | mk_sense_buffer(scp, UNIT_ATTENTION, |
850 | TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ); | 848 | TARGET_CHANGED_ASC, |
851 | if (debug) | 849 | MICROCODE_CHANGED_ASCQ); |
850 | if (sdebug_verbose) | ||
852 | cp = "microcode has been changed"; | 851 | cp = "microcode has been changed"; |
853 | break; | 852 | break; |
854 | case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: | 853 | case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: |
855 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 854 | mk_sense_buffer(scp, UNIT_ATTENTION, |
856 | TARGET_CHANGED_ASC, | 855 | TARGET_CHANGED_ASC, |
857 | MICROCODE_CHANGED_WO_RESET_ASCQ); | 856 | MICROCODE_CHANGED_WO_RESET_ASCQ); |
858 | if (debug) | 857 | if (sdebug_verbose) |
859 | cp = "microcode has been changed without reset"; | 858 | cp = "microcode has been changed without reset"; |
860 | break; | 859 | break; |
861 | case SDEBUG_UA_LUNS_CHANGED: | 860 | case SDEBUG_UA_LUNS_CHANGED: |
@@ -864,40 +863,30 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | |||
864 | * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN | 863 | * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN |
865 | * on the target, until a REPORT LUNS command is | 864 | * on the target, until a REPORT LUNS command is |
866 | * received. SPC-4 behavior is to report it only once. | 865 | * received. SPC-4 behavior is to report it only once. |
867 | * NOTE: scsi_debug_scsi_level does not use the same | 866 | * NOTE: sdebug_scsi_level does not use the same |
868 | * values as struct scsi_device->scsi_level. | 867 | * values as struct scsi_device->scsi_level. |
869 | */ | 868 | */ |
870 | if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */ | 869 | if (sdebug_scsi_level >= 6) /* SPC-4 and above */ |
871 | clear_luns_changed_on_target(devip); | 870 | clear_luns_changed_on_target(devip); |
872 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | 871 | mk_sense_buffer(scp, UNIT_ATTENTION, |
873 | TARGET_CHANGED_ASC, | 872 | TARGET_CHANGED_ASC, |
874 | LUNS_CHANGED_ASCQ); | 873 | LUNS_CHANGED_ASCQ); |
875 | if (debug) | 874 | if (sdebug_verbose) |
876 | cp = "reported luns data has changed"; | 875 | cp = "reported luns data has changed"; |
877 | break; | 876 | break; |
878 | default: | 877 | default: |
879 | pr_warn("%s: unexpected unit attention code=%d\n", | 878 | pr_warn("unexpected unit attention code=%d\n", k); |
880 | __func__, k); | 879 | if (sdebug_verbose) |
881 | if (debug) | ||
882 | cp = "unknown"; | 880 | cp = "unknown"; |
883 | break; | 881 | break; |
884 | } | 882 | } |
885 | clear_bit(k, devip->uas_bm); | 883 | clear_bit(k, devip->uas_bm); |
886 | if (debug) | 884 | if (sdebug_verbose) |
887 | sdev_printk(KERN_INFO, SCpnt->device, | 885 | sdev_printk(KERN_INFO, scp->device, |
888 | "%s reports: Unit attention: %s\n", | 886 | "%s reports: Unit attention: %s\n", |
889 | my_name, cp); | 887 | my_name, cp); |
890 | return check_condition_result; | 888 | return check_condition_result; |
891 | } | 889 | } |
892 | if ((UAS_TUR == uas_only) && devip->stopped) { | ||
893 | mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY, | ||
894 | 0x2); | ||
895 | if (debug) | ||
896 | sdev_printk(KERN_INFO, SCpnt->device, | ||
897 | "%s reports: Not ready: %s\n", my_name, | ||
898 | "initializing command required"); | ||
899 | return check_condition_result; | ||
900 | } | ||
901 | return 0; | 890 | return 0; |
902 | } | 891 | } |
903 | 892 | ||
@@ -911,7 +900,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, | |||
911 | if (!sdb->length) | 900 | if (!sdb->length) |
912 | return 0; | 901 | return 0; |
913 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) | 902 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) |
914 | return (DID_ERROR << 16); | 903 | return DID_ERROR << 16; |
915 | 904 | ||
916 | act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, | 905 | act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, |
917 | arr, arr_len); | 906 | arr, arr_len); |
@@ -935,13 +924,17 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, | |||
935 | 924 | ||
936 | static const char * inq_vendor_id = "Linux "; | 925 | static const char * inq_vendor_id = "Linux "; |
937 | static const char * inq_product_id = "scsi_debug "; | 926 | static const char * inq_product_id = "scsi_debug "; |
938 | static const char *inq_product_rev = "0184"; /* version less '.' */ | 927 | static const char *inq_product_rev = "0186"; /* version less '.' */ |
928 | /* Use some locally assigned NAAs for SAS addresses. */ | ||
929 | static const u64 naa3_comp_a = 0x3222222000000000ULL; | ||
930 | static const u64 naa3_comp_b = 0x3333333000000000ULL; | ||
931 | static const u64 naa3_comp_c = 0x3111111000000000ULL; | ||
939 | 932 | ||
940 | /* Device identification VPD page. Returns number of bytes placed in arr */ | 933 | /* Device identification VPD page. Returns number of bytes placed in arr */ |
941 | static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | 934 | static int inquiry_vpd_83(unsigned char *arr, int port_group_id, |
942 | int target_dev_id, int dev_id_num, | 935 | int target_dev_id, int dev_id_num, |
943 | const char * dev_id_str, | 936 | const char *dev_id_str, int dev_id_str_len, |
944 | int dev_id_str_len) | 937 | const uuid_be *lu_name) |
945 | { | 938 | { |
946 | int num, port_a; | 939 | int num, port_a; |
947 | char b[32]; | 940 | char b[32]; |
@@ -958,19 +951,25 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | |||
958 | arr[3] = num; | 951 | arr[3] = num; |
959 | num += 4; | 952 | num += 4; |
960 | if (dev_id_num >= 0) { | 953 | if (dev_id_num >= 0) { |
961 | /* NAA-5, Logical unit identifier (binary) */ | 954 | if (sdebug_uuid_ctl) { |
962 | arr[num++] = 0x1; /* binary (not necessarily sas) */ | 955 | /* Locally assigned UUID */ |
963 | arr[num++] = 0x3; /* PIV=0, lu, naa */ | 956 | arr[num++] = 0x1; /* binary (not necessarily sas) */ |
964 | arr[num++] = 0x0; | 957 | arr[num++] = 0xa; /* PIV=0, lu, naa */ |
965 | arr[num++] = 0x8; | 958 | arr[num++] = 0x0; |
966 | arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */ | 959 | arr[num++] = 0x12; |
967 | arr[num++] = 0x33; | 960 | arr[num++] = 0x10; /* uuid type=1, locally assigned */ |
968 | arr[num++] = 0x33; | 961 | arr[num++] = 0x0; |
969 | arr[num++] = 0x30; | 962 | memcpy(arr + num, lu_name, 16); |
970 | arr[num++] = (dev_id_num >> 24); | 963 | num += 16; |
971 | arr[num++] = (dev_id_num >> 16) & 0xff; | 964 | } else { |
972 | arr[num++] = (dev_id_num >> 8) & 0xff; | 965 | /* NAA-3, Logical unit identifier (binary) */ |
973 | arr[num++] = dev_id_num & 0xff; | 966 | arr[num++] = 0x1; /* binary (not necessarily sas) */ |
967 | arr[num++] = 0x3; /* PIV=0, lu, naa */ | ||
968 | arr[num++] = 0x0; | ||
969 | arr[num++] = 0x8; | ||
970 | put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num); | ||
971 | num += 8; | ||
972 | } | ||
974 | /* Target relative port number */ | 973 | /* Target relative port number */ |
975 | arr[num++] = 0x61; /* proto=sas, binary */ | 974 | arr[num++] = 0x61; /* proto=sas, binary */ |
976 | arr[num++] = 0x94; /* PIV=1, target port, rel port */ | 975 | arr[num++] = 0x94; /* PIV=1, target port, rel port */ |
@@ -981,47 +980,35 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | |||
981 | arr[num++] = 0x0; | 980 | arr[num++] = 0x0; |
982 | arr[num++] = 0x1; /* relative port A */ | 981 | arr[num++] = 0x1; /* relative port A */ |
983 | } | 982 | } |
984 | /* NAA-5, Target port identifier */ | 983 | /* NAA-3, Target port identifier */ |
985 | arr[num++] = 0x61; /* proto=sas, binary */ | 984 | arr[num++] = 0x61; /* proto=sas, binary */ |
986 | arr[num++] = 0x93; /* piv=1, target port, naa */ | 985 | arr[num++] = 0x93; /* piv=1, target port, naa */ |
987 | arr[num++] = 0x0; | 986 | arr[num++] = 0x0; |
988 | arr[num++] = 0x8; | 987 | arr[num++] = 0x8; |
989 | arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ | 988 | put_unaligned_be64(naa3_comp_a + port_a, arr + num); |
990 | arr[num++] = 0x22; | 989 | num += 8; |
991 | arr[num++] = 0x22; | 990 | /* NAA-3, Target port group identifier */ |
992 | arr[num++] = 0x20; | ||
993 | arr[num++] = (port_a >> 24); | ||
994 | arr[num++] = (port_a >> 16) & 0xff; | ||
995 | arr[num++] = (port_a >> 8) & 0xff; | ||
996 | arr[num++] = port_a & 0xff; | ||
997 | /* NAA-5, Target port group identifier */ | ||
998 | arr[num++] = 0x61; /* proto=sas, binary */ | 991 | arr[num++] = 0x61; /* proto=sas, binary */ |
999 | arr[num++] = 0x95; /* piv=1, target port group id */ | 992 | arr[num++] = 0x95; /* piv=1, target port group id */ |
1000 | arr[num++] = 0x0; | 993 | arr[num++] = 0x0; |
1001 | arr[num++] = 0x4; | 994 | arr[num++] = 0x4; |
1002 | arr[num++] = 0; | 995 | arr[num++] = 0; |
1003 | arr[num++] = 0; | 996 | arr[num++] = 0; |
1004 | arr[num++] = (port_group_id >> 8) & 0xff; | 997 | put_unaligned_be16(port_group_id, arr + num); |
1005 | arr[num++] = port_group_id & 0xff; | 998 | num += 2; |
1006 | /* NAA-5, Target device identifier */ | 999 | /* NAA-3, Target device identifier */ |
1007 | arr[num++] = 0x61; /* proto=sas, binary */ | 1000 | arr[num++] = 0x61; /* proto=sas, binary */ |
1008 | arr[num++] = 0xa3; /* piv=1, target device, naa */ | 1001 | arr[num++] = 0xa3; /* piv=1, target device, naa */ |
1009 | arr[num++] = 0x0; | 1002 | arr[num++] = 0x0; |
1010 | arr[num++] = 0x8; | 1003 | arr[num++] = 0x8; |
1011 | arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ | 1004 | put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num); |
1012 | arr[num++] = 0x22; | 1005 | num += 8; |
1013 | arr[num++] = 0x22; | ||
1014 | arr[num++] = 0x20; | ||
1015 | arr[num++] = (target_dev_id >> 24); | ||
1016 | arr[num++] = (target_dev_id >> 16) & 0xff; | ||
1017 | arr[num++] = (target_dev_id >> 8) & 0xff; | ||
1018 | arr[num++] = target_dev_id & 0xff; | ||
1019 | /* SCSI name string: Target device identifier */ | 1006 | /* SCSI name string: Target device identifier */ |
1020 | arr[num++] = 0x63; /* proto=sas, UTF-8 */ | 1007 | arr[num++] = 0x63; /* proto=sas, UTF-8 */ |
1021 | arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ | 1008 | arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ |
1022 | arr[num++] = 0x0; | 1009 | arr[num++] = 0x0; |
1023 | arr[num++] = 24; | 1010 | arr[num++] = 24; |
1024 | memcpy(arr + num, "naa.52222220", 12); | 1011 | memcpy(arr + num, "naa.32222220", 12); |
1025 | num += 12; | 1012 | num += 12; |
1026 | snprintf(b, sizeof(b), "%08X", target_dev_id); | 1013 | snprintf(b, sizeof(b), "%08X", target_dev_id); |
1027 | memcpy(arr + num, b, 8); | 1014 | memcpy(arr + num, b, 8); |
@@ -1031,7 +1018,6 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | |||
1031 | return num; | 1018 | return num; |
1032 | } | 1019 | } |
1033 | 1020 | ||
1034 | |||
1035 | static unsigned char vpd84_data[] = { | 1021 | static unsigned char vpd84_data[] = { |
1036 | /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, | 1022 | /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, |
1037 | 0x22,0x22,0x22,0x0,0xbb,0x1, | 1023 | 0x22,0x22,0x22,0x0,0xbb,0x1, |
@@ -1039,14 +1025,14 @@ static unsigned char vpd84_data[] = { | |||
1039 | }; | 1025 | }; |
1040 | 1026 | ||
1041 | /* Software interface identification VPD page */ | 1027 | /* Software interface identification VPD page */ |
1042 | static int inquiry_evpd_84(unsigned char * arr) | 1028 | static int inquiry_vpd_84(unsigned char *arr) |
1043 | { | 1029 | { |
1044 | memcpy(arr, vpd84_data, sizeof(vpd84_data)); | 1030 | memcpy(arr, vpd84_data, sizeof(vpd84_data)); |
1045 | return sizeof(vpd84_data); | 1031 | return sizeof(vpd84_data); |
1046 | } | 1032 | } |
1047 | 1033 | ||
1048 | /* Management network addresses VPD page */ | 1034 | /* Management network addresses VPD page */ |
1049 | static int inquiry_evpd_85(unsigned char * arr) | 1035 | static int inquiry_vpd_85(unsigned char *arr) |
1050 | { | 1036 | { |
1051 | int num = 0; | 1037 | int num = 0; |
1052 | const char * na1 = "https://www.kernel.org/config"; | 1038 | const char * na1 = "https://www.kernel.org/config"; |
@@ -1081,7 +1067,7 @@ static int inquiry_evpd_85(unsigned char * arr) | |||
1081 | } | 1067 | } |
1082 | 1068 | ||
1083 | /* SCSI ports VPD page */ | 1069 | /* SCSI ports VPD page */ |
1084 | static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) | 1070 | static int inquiry_vpd_88(unsigned char *arr, int target_dev_id) |
1085 | { | 1071 | { |
1086 | int num = 0; | 1072 | int num = 0; |
1087 | int port_a, port_b; | 1073 | int port_a, port_b; |
@@ -1101,15 +1087,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) | |||
1101 | arr[num++] = 0x93; /* PIV=1, target port, NAA */ | 1087 | arr[num++] = 0x93; /* PIV=1, target port, NAA */ |
1102 | arr[num++] = 0x0; /* reserved */ | 1088 | arr[num++] = 0x0; /* reserved */ |
1103 | arr[num++] = 0x8; /* length */ | 1089 | arr[num++] = 0x8; /* length */ |
1104 | arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ | 1090 | put_unaligned_be64(naa3_comp_a + port_a, arr + num); |
1105 | arr[num++] = 0x22; | 1091 | num += 8; |
1106 | arr[num++] = 0x22; | ||
1107 | arr[num++] = 0x20; | ||
1108 | arr[num++] = (port_a >> 24); | ||
1109 | arr[num++] = (port_a >> 16) & 0xff; | ||
1110 | arr[num++] = (port_a >> 8) & 0xff; | ||
1111 | arr[num++] = port_a & 0xff; | ||
1112 | |||
1113 | arr[num++] = 0x0; /* reserved */ | 1092 | arr[num++] = 0x0; /* reserved */ |
1114 | arr[num++] = 0x0; /* reserved */ | 1093 | arr[num++] = 0x0; /* reserved */ |
1115 | arr[num++] = 0x0; | 1094 | arr[num++] = 0x0; |
@@ -1123,14 +1102,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) | |||
1123 | arr[num++] = 0x93; /* PIV=1, target port, NAA */ | 1102 | arr[num++] = 0x93; /* PIV=1, target port, NAA */ |
1124 | arr[num++] = 0x0; /* reserved */ | 1103 | arr[num++] = 0x0; /* reserved */ |
1125 | arr[num++] = 0x8; /* length */ | 1104 | arr[num++] = 0x8; /* length */ |
1126 | arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ | 1105 | put_unaligned_be64(naa3_comp_a + port_b, arr + num); |
1127 | arr[num++] = 0x22; | 1106 | num += 8; |
1128 | arr[num++] = 0x22; | ||
1129 | arr[num++] = 0x20; | ||
1130 | arr[num++] = (port_b >> 24); | ||
1131 | arr[num++] = (port_b >> 16) & 0xff; | ||
1132 | arr[num++] = (port_b >> 8) & 0xff; | ||
1133 | arr[num++] = port_b & 0xff; | ||
1134 | 1107 | ||
1135 | return num; | 1108 | return num; |
1136 | } | 1109 | } |
@@ -1181,7 +1154,7 @@ static unsigned char vpd89_data[] = { | |||
1181 | }; | 1154 | }; |
1182 | 1155 | ||
1183 | /* ATA Information VPD page */ | 1156 | /* ATA Information VPD page */ |
1184 | static int inquiry_evpd_89(unsigned char * arr) | 1157 | static int inquiry_vpd_89(unsigned char *arr) |
1185 | { | 1158 | { |
1186 | memcpy(arr, vpd89_data, sizeof(vpd89_data)); | 1159 | memcpy(arr, vpd89_data, sizeof(vpd89_data)); |
1187 | return sizeof(vpd89_data); | 1160 | return sizeof(vpd89_data); |
@@ -1196,47 +1169,42 @@ static unsigned char vpdb0_data[] = { | |||
1196 | }; | 1169 | }; |
1197 | 1170 | ||
1198 | /* Block limits VPD page (SBC-3) */ | 1171 | /* Block limits VPD page (SBC-3) */ |
1199 | static int inquiry_evpd_b0(unsigned char * arr) | 1172 | static int inquiry_vpd_b0(unsigned char *arr) |
1200 | { | 1173 | { |
1201 | unsigned int gran; | 1174 | unsigned int gran; |
1202 | 1175 | ||
1203 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); | 1176 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); |
1204 | 1177 | ||
1205 | /* Optimal transfer length granularity */ | 1178 | /* Optimal transfer length granularity */ |
1206 | gran = 1 << scsi_debug_physblk_exp; | 1179 | gran = 1 << sdebug_physblk_exp; |
1207 | arr[2] = (gran >> 8) & 0xff; | 1180 | put_unaligned_be16(gran, arr + 2); |
1208 | arr[3] = gran & 0xff; | ||
1209 | 1181 | ||
1210 | /* Maximum Transfer Length */ | 1182 | /* Maximum Transfer Length */ |
1211 | if (sdebug_store_sectors > 0x400) { | 1183 | if (sdebug_store_sectors > 0x400) |
1212 | arr[4] = (sdebug_store_sectors >> 24) & 0xff; | 1184 | put_unaligned_be32(sdebug_store_sectors, arr + 4); |
1213 | arr[5] = (sdebug_store_sectors >> 16) & 0xff; | ||
1214 | arr[6] = (sdebug_store_sectors >> 8) & 0xff; | ||
1215 | arr[7] = sdebug_store_sectors & 0xff; | ||
1216 | } | ||
1217 | 1185 | ||
1218 | /* Optimal Transfer Length */ | 1186 | /* Optimal Transfer Length */ |
1219 | put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); | 1187 | put_unaligned_be32(sdebug_opt_blks, &arr[8]); |
1220 | 1188 | ||
1221 | if (scsi_debug_lbpu) { | 1189 | if (sdebug_lbpu) { |
1222 | /* Maximum Unmap LBA Count */ | 1190 | /* Maximum Unmap LBA Count */ |
1223 | put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); | 1191 | put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]); |
1224 | 1192 | ||
1225 | /* Maximum Unmap Block Descriptor Count */ | 1193 | /* Maximum Unmap Block Descriptor Count */ |
1226 | put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); | 1194 | put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]); |
1227 | } | 1195 | } |
1228 | 1196 | ||
1229 | /* Unmap Granularity Alignment */ | 1197 | /* Unmap Granularity Alignment */ |
1230 | if (scsi_debug_unmap_alignment) { | 1198 | if (sdebug_unmap_alignment) { |
1231 | put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); | 1199 | put_unaligned_be32(sdebug_unmap_alignment, &arr[28]); |
1232 | arr[28] |= 0x80; /* UGAVALID */ | 1200 | arr[28] |= 0x80; /* UGAVALID */ |
1233 | } | 1201 | } |
1234 | 1202 | ||
1235 | /* Optimal Unmap Granularity */ | 1203 | /* Optimal Unmap Granularity */ |
1236 | put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); | 1204 | put_unaligned_be32(sdebug_unmap_granularity, &arr[24]); |
1237 | 1205 | ||
1238 | /* Maximum WRITE SAME Length */ | 1206 | /* Maximum WRITE SAME Length */ |
1239 | put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); | 1207 | put_unaligned_be64(sdebug_write_same_length, &arr[32]); |
1240 | 1208 | ||
1241 | return 0x3c; /* Mandatory page length for Logical Block Provisioning */ | 1209 | return 0x3c; /* Mandatory page length for Logical Block Provisioning */ |
1242 | 1210 | ||
@@ -1244,7 +1212,7 @@ static int inquiry_evpd_b0(unsigned char * arr) | |||
1244 | } | 1212 | } |
1245 | 1213 | ||
1246 | /* Block device characteristics VPD page (SBC-3) */ | 1214 | /* Block device characteristics VPD page (SBC-3) */ |
1247 | static int inquiry_evpd_b1(unsigned char *arr) | 1215 | static int inquiry_vpd_b1(unsigned char *arr) |
1248 | { | 1216 | { |
1249 | memset(arr, 0, 0x3c); | 1217 | memset(arr, 0, 0x3c); |
1250 | arr[0] = 0; | 1218 | arr[0] = 0; |
@@ -1255,24 +1223,22 @@ static int inquiry_evpd_b1(unsigned char *arr) | |||
1255 | return 0x3c; | 1223 | return 0x3c; |
1256 | } | 1224 | } |
1257 | 1225 | ||
1258 | /* Logical block provisioning VPD page (SBC-3) */ | 1226 | /* Logical block provisioning VPD page (SBC-4) */ |
1259 | static int inquiry_evpd_b2(unsigned char *arr) | 1227 | static int inquiry_vpd_b2(unsigned char *arr) |
1260 | { | 1228 | { |
1261 | memset(arr, 0, 0x4); | 1229 | memset(arr, 0, 0x4); |
1262 | arr[0] = 0; /* threshold exponent */ | 1230 | arr[0] = 0; /* threshold exponent */ |
1263 | 1231 | if (sdebug_lbpu) | |
1264 | if (scsi_debug_lbpu) | ||
1265 | arr[1] = 1 << 7; | 1232 | arr[1] = 1 << 7; |
1266 | 1233 | if (sdebug_lbpws) | |
1267 | if (scsi_debug_lbpws) | ||
1268 | arr[1] |= 1 << 6; | 1234 | arr[1] |= 1 << 6; |
1269 | 1235 | if (sdebug_lbpws10) | |
1270 | if (scsi_debug_lbpws10) | ||
1271 | arr[1] |= 1 << 5; | 1236 | arr[1] |= 1 << 5; |
1272 | 1237 | if (sdebug_lbprz && scsi_debug_lbp()) | |
1273 | if (scsi_debug_lbprz) | 1238 | arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */ |
1274 | arr[1] |= 1 << 2; | 1239 | /* anc_sup=0; dp=0 (no provisioning group descriptor) */ |
1275 | 1240 | /* minimum_percentage=0; provisioning_type=0 (unknown) */ | |
1241 | /* threshold_percentage=0 */ | ||
1276 | return 0x4; | 1242 | return 0x4; |
1277 | } | 1243 | } |
1278 | 1244 | ||
@@ -1285,19 +1251,20 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1285 | unsigned char * arr; | 1251 | unsigned char * arr; |
1286 | unsigned char *cmd = scp->cmnd; | 1252 | unsigned char *cmd = scp->cmnd; |
1287 | int alloc_len, n, ret; | 1253 | int alloc_len, n, ret; |
1288 | bool have_wlun; | 1254 | bool have_wlun, is_disk; |
1289 | 1255 | ||
1290 | alloc_len = (cmd[3] << 8) + cmd[4]; | 1256 | alloc_len = get_unaligned_be16(cmd + 3); |
1291 | arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); | 1257 | arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); |
1292 | if (! arr) | 1258 | if (! arr) |
1293 | return DID_REQUEUE << 16; | 1259 | return DID_REQUEUE << 16; |
1294 | have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS); | 1260 | is_disk = (sdebug_ptype == TYPE_DISK); |
1261 | have_wlun = scsi_is_wlun(scp->device->lun); | ||
1295 | if (have_wlun) | 1262 | if (have_wlun) |
1296 | pq_pdt = 0x1e; /* present, wlun */ | 1263 | pq_pdt = TYPE_WLUN; /* present, wlun */ |
1297 | else if (scsi_debug_no_lun_0 && (0 == devip->lun)) | 1264 | else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) |
1298 | pq_pdt = 0x7f; /* not present, no device type */ | 1265 | pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ |
1299 | else | 1266 | else |
1300 | pq_pdt = (scsi_debug_ptype & 0x1f); | 1267 | pq_pdt = (sdebug_ptype & 0x1f); |
1301 | arr[0] = pq_pdt; | 1268 | arr[0] = pq_pdt; |
1302 | if (0x2 & cmd[1]) { /* CMDDT bit set */ | 1269 | if (0x2 & cmd[1]) { /* CMDDT bit set */ |
1303 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); | 1270 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); |
@@ -1310,7 +1277,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1310 | 1277 | ||
1311 | port_group_id = (((host_no + 1) & 0x7f) << 8) + | 1278 | port_group_id = (((host_no + 1) & 0x7f) << 8) + |
1312 | (devip->channel & 0x7f); | 1279 | (devip->channel & 0x7f); |
1313 | if (0 == scsi_debug_vpd_use_hostno) | 1280 | if (sdebug_vpd_use_hostno == 0) |
1314 | host_no = 0; | 1281 | host_no = 0; |
1315 | lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + | 1282 | lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + |
1316 | (devip->target * 1000) + devip->lun); | 1283 | (devip->target * 1000) + devip->lun); |
@@ -1328,11 +1295,12 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1328 | arr[n++] = 0x86; /* extended inquiry */ | 1295 | arr[n++] = 0x86; /* extended inquiry */ |
1329 | arr[n++] = 0x87; /* mode page policy */ | 1296 | arr[n++] = 0x87; /* mode page policy */ |
1330 | arr[n++] = 0x88; /* SCSI ports */ | 1297 | arr[n++] = 0x88; /* SCSI ports */ |
1331 | arr[n++] = 0x89; /* ATA information */ | 1298 | if (is_disk) { /* SBC only */ |
1332 | arr[n++] = 0xb0; /* Block limits (SBC) */ | 1299 | arr[n++] = 0x89; /* ATA information */ |
1333 | arr[n++] = 0xb1; /* Block characteristics (SBC) */ | 1300 | arr[n++] = 0xb0; /* Block limits */ |
1334 | if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ | 1301 | arr[n++] = 0xb1; /* Block characteristics */ |
1335 | arr[n++] = 0xb2; | 1302 | arr[n++] = 0xb2; /* Logical Block Prov */ |
1303 | } | ||
1336 | arr[3] = n - 4; /* number of supported VPD pages */ | 1304 | arr[3] = n - 4; /* number of supported VPD pages */ |
1337 | } else if (0x80 == cmd[2]) { /* unit serial number */ | 1305 | } else if (0x80 == cmd[2]) { /* unit serial number */ |
1338 | arr[1] = cmd[2]; /*sanity */ | 1306 | arr[1] = cmd[2]; /*sanity */ |
@@ -1340,21 +1308,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1340 | memcpy(&arr[4], lu_id_str, len); | 1308 | memcpy(&arr[4], lu_id_str, len); |
1341 | } else if (0x83 == cmd[2]) { /* device identification */ | 1309 | } else if (0x83 == cmd[2]) { /* device identification */ |
1342 | arr[1] = cmd[2]; /*sanity */ | 1310 | arr[1] = cmd[2]; /*sanity */ |
1343 | arr[3] = inquiry_evpd_83(&arr[4], port_group_id, | 1311 | arr[3] = inquiry_vpd_83(&arr[4], port_group_id, |
1344 | target_dev_id, lu_id_num, | 1312 | target_dev_id, lu_id_num, |
1345 | lu_id_str, len); | 1313 | lu_id_str, len, |
1314 | &devip->lu_name); | ||
1346 | } else if (0x84 == cmd[2]) { /* Software interface ident. */ | 1315 | } else if (0x84 == cmd[2]) { /* Software interface ident. */ |
1347 | arr[1] = cmd[2]; /*sanity */ | 1316 | arr[1] = cmd[2]; /*sanity */ |
1348 | arr[3] = inquiry_evpd_84(&arr[4]); | 1317 | arr[3] = inquiry_vpd_84(&arr[4]); |
1349 | } else if (0x85 == cmd[2]) { /* Management network addresses */ | 1318 | } else if (0x85 == cmd[2]) { /* Management network addresses */ |
1350 | arr[1] = cmd[2]; /*sanity */ | 1319 | arr[1] = cmd[2]; /*sanity */ |
1351 | arr[3] = inquiry_evpd_85(&arr[4]); | 1320 | arr[3] = inquiry_vpd_85(&arr[4]); |
1352 | } else if (0x86 == cmd[2]) { /* extended inquiry */ | 1321 | } else if (0x86 == cmd[2]) { /* extended inquiry */ |
1353 | arr[1] = cmd[2]; /*sanity */ | 1322 | arr[1] = cmd[2]; /*sanity */ |
1354 | arr[3] = 0x3c; /* number of following entries */ | 1323 | arr[3] = 0x3c; /* number of following entries */ |
1355 | if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) | 1324 | if (sdebug_dif == SD_DIF_TYPE3_PROTECTION) |
1356 | arr[4] = 0x4; /* SPT: GRD_CHK:1 */ | 1325 | arr[4] = 0x4; /* SPT: GRD_CHK:1 */ |
1357 | else if (scsi_debug_dif) | 1326 | else if (have_dif_prot) |
1358 | arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ | 1327 | arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ |
1359 | else | 1328 | else |
1360 | arr[4] = 0x0; /* no protection stuff */ | 1329 | arr[4] = 0x0; /* no protection stuff */ |
@@ -1368,39 +1337,38 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1368 | arr[10] = 0x82; /* mlus, per initiator port */ | 1337 | arr[10] = 0x82; /* mlus, per initiator port */ |
1369 | } else if (0x88 == cmd[2]) { /* SCSI Ports */ | 1338 | } else if (0x88 == cmd[2]) { /* SCSI Ports */ |
1370 | arr[1] = cmd[2]; /*sanity */ | 1339 | arr[1] = cmd[2]; /*sanity */ |
1371 | arr[3] = inquiry_evpd_88(&arr[4], target_dev_id); | 1340 | arr[3] = inquiry_vpd_88(&arr[4], target_dev_id); |
1372 | } else if (0x89 == cmd[2]) { /* ATA information */ | 1341 | } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */ |
1373 | arr[1] = cmd[2]; /*sanity */ | 1342 | arr[1] = cmd[2]; /*sanity */ |
1374 | n = inquiry_evpd_89(&arr[4]); | 1343 | n = inquiry_vpd_89(&arr[4]); |
1375 | arr[2] = (n >> 8); | 1344 | put_unaligned_be16(n, arr + 2); |
1376 | arr[3] = (n & 0xff); | 1345 | } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */ |
1377 | } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ | ||
1378 | arr[1] = cmd[2]; /*sanity */ | 1346 | arr[1] = cmd[2]; /*sanity */ |
1379 | arr[3] = inquiry_evpd_b0(&arr[4]); | 1347 | arr[3] = inquiry_vpd_b0(&arr[4]); |
1380 | } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ | 1348 | } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */ |
1381 | arr[1] = cmd[2]; /*sanity */ | 1349 | arr[1] = cmd[2]; /*sanity */ |
1382 | arr[3] = inquiry_evpd_b1(&arr[4]); | 1350 | arr[3] = inquiry_vpd_b1(&arr[4]); |
1383 | } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ | 1351 | } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */ |
1384 | arr[1] = cmd[2]; /*sanity */ | 1352 | arr[1] = cmd[2]; /*sanity */ |
1385 | arr[3] = inquiry_evpd_b2(&arr[4]); | 1353 | arr[3] = inquiry_vpd_b2(&arr[4]); |
1386 | } else { | 1354 | } else { |
1387 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); | 1355 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); |
1388 | kfree(arr); | 1356 | kfree(arr); |
1389 | return check_condition_result; | 1357 | return check_condition_result; |
1390 | } | 1358 | } |
1391 | len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); | 1359 | len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); |
1392 | ret = fill_from_dev_buffer(scp, arr, | 1360 | ret = fill_from_dev_buffer(scp, arr, |
1393 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); | 1361 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); |
1394 | kfree(arr); | 1362 | kfree(arr); |
1395 | return ret; | 1363 | return ret; |
1396 | } | 1364 | } |
1397 | /* drops through here for a standard inquiry */ | 1365 | /* drops through here for a standard inquiry */ |
1398 | arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */ | 1366 | arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */ |
1399 | arr[2] = scsi_debug_scsi_level; | 1367 | arr[2] = sdebug_scsi_level; |
1400 | arr[3] = 2; /* response_data_format==2 */ | 1368 | arr[3] = 2; /* response_data_format==2 */ |
1401 | arr[4] = SDEBUG_LONG_INQ_SZ - 5; | 1369 | arr[4] = SDEBUG_LONG_INQ_SZ - 5; |
1402 | arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */ | 1370 | arr[5] = (int)have_dif_prot; /* PROTECT bit */ |
1403 | if (0 == scsi_debug_vpd_use_hostno) | 1371 | if (sdebug_vpd_use_hostno == 0) |
1404 | arr[5] = 0x10; /* claim: implicit TGPS */ | 1372 | arr[5] = 0x10; /* claim: implicit TGPS */ |
1405 | arr[6] = 0x10; /* claim: MultiP */ | 1373 | arr[6] = 0x10; /* claim: MultiP */ |
1406 | /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ | 1374 | /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ |
@@ -1409,21 +1377,26 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1409 | memcpy(&arr[16], inq_product_id, 16); | 1377 | memcpy(&arr[16], inq_product_id, 16); |
1410 | memcpy(&arr[32], inq_product_rev, 4); | 1378 | memcpy(&arr[32], inq_product_rev, 4); |
1411 | /* version descriptors (2 bytes each) follow */ | 1379 | /* version descriptors (2 bytes each) follow */ |
1412 | arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */ | 1380 | put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ |
1413 | arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */ | 1381 | put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ |
1414 | n = 62; | 1382 | n = 62; |
1415 | if (scsi_debug_ptype == 0) { | 1383 | if (is_disk) { /* SBC-4 no version claimed */ |
1416 | arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */ | 1384 | put_unaligned_be16(0x600, arr + n); |
1417 | } else if (scsi_debug_ptype == 1) { | 1385 | n += 2; |
1418 | arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */ | 1386 | } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ |
1419 | } | 1387 | put_unaligned_be16(0x525, arr + n); |
1420 | arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */ | 1388 | n += 2; |
1389 | } | ||
1390 | put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ | ||
1421 | ret = fill_from_dev_buffer(scp, arr, | 1391 | ret = fill_from_dev_buffer(scp, arr, |
1422 | min(alloc_len, SDEBUG_LONG_INQ_SZ)); | 1392 | min(alloc_len, SDEBUG_LONG_INQ_SZ)); |
1423 | kfree(arr); | 1393 | kfree(arr); |
1424 | return ret; | 1394 | return ret; |
1425 | } | 1395 | } |
1426 | 1396 | ||
1397 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, | ||
1398 | 0, 0, 0x0, 0x0}; | ||
1399 | |||
1427 | static int resp_requests(struct scsi_cmnd * scp, | 1400 | static int resp_requests(struct scsi_cmnd * scp, |
1428 | struct sdebug_dev_info * devip) | 1401 | struct sdebug_dev_info * devip) |
1429 | { | 1402 | { |
@@ -1452,7 +1425,7 @@ static int resp_requests(struct scsi_cmnd * scp, | |||
1452 | } | 1425 | } |
1453 | } else { | 1426 | } else { |
1454 | memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); | 1427 | memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); |
1455 | if (arr[0] >= 0x70 && dsense == scsi_debug_dsense) | 1428 | if (arr[0] >= 0x70 && dsense == sdebug_dsense) |
1456 | ; /* have sense and formats match */ | 1429 | ; /* have sense and formats match */ |
1457 | else if (arr[0] <= 0x70) { | 1430 | else if (arr[0] <= 0x70) { |
1458 | if (dsense) { | 1431 | if (dsense) { |
@@ -1489,24 +1462,25 @@ static int resp_start_stop(struct scsi_cmnd * scp, | |||
1489 | struct sdebug_dev_info * devip) | 1462 | struct sdebug_dev_info * devip) |
1490 | { | 1463 | { |
1491 | unsigned char *cmd = scp->cmnd; | 1464 | unsigned char *cmd = scp->cmnd; |
1492 | int power_cond, start; | 1465 | int power_cond, stop; |
1493 | 1466 | ||
1494 | power_cond = (cmd[4] & 0xf0) >> 4; | 1467 | power_cond = (cmd[4] & 0xf0) >> 4; |
1495 | if (power_cond) { | 1468 | if (power_cond) { |
1496 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); | 1469 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); |
1497 | return check_condition_result; | 1470 | return check_condition_result; |
1498 | } | 1471 | } |
1499 | start = cmd[4] & 1; | 1472 | stop = !(cmd[4] & 1); |
1500 | if (start == devip->stopped) | 1473 | atomic_xchg(&devip->stopped, stop); |
1501 | devip->stopped = !start; | ||
1502 | return 0; | 1474 | return 0; |
1503 | } | 1475 | } |
1504 | 1476 | ||
1505 | static sector_t get_sdebug_capacity(void) | 1477 | static sector_t get_sdebug_capacity(void) |
1506 | { | 1478 | { |
1507 | if (scsi_debug_virtual_gb > 0) | 1479 | static const unsigned int gibibyte = 1073741824; |
1508 | return (sector_t)scsi_debug_virtual_gb * | 1480 | |
1509 | (1073741824 / scsi_debug_sector_size); | 1481 | if (sdebug_virtual_gb > 0) |
1482 | return (sector_t)sdebug_virtual_gb * | ||
1483 | (gibibyte / sdebug_sector_size); | ||
1510 | else | 1484 | else |
1511 | return sdebug_store_sectors; | 1485 | return sdebug_store_sectors; |
1512 | } | 1486 | } |
@@ -1523,18 +1497,10 @@ static int resp_readcap(struct scsi_cmnd * scp, | |||
1523 | memset(arr, 0, SDEBUG_READCAP_ARR_SZ); | 1497 | memset(arr, 0, SDEBUG_READCAP_ARR_SZ); |
1524 | if (sdebug_capacity < 0xffffffff) { | 1498 | if (sdebug_capacity < 0xffffffff) { |
1525 | capac = (unsigned int)sdebug_capacity - 1; | 1499 | capac = (unsigned int)sdebug_capacity - 1; |
1526 | arr[0] = (capac >> 24); | 1500 | put_unaligned_be32(capac, arr + 0); |
1527 | arr[1] = (capac >> 16) & 0xff; | 1501 | } else |
1528 | arr[2] = (capac >> 8) & 0xff; | 1502 | put_unaligned_be32(0xffffffff, arr + 0); |
1529 | arr[3] = capac & 0xff; | 1503 | put_unaligned_be16(sdebug_sector_size, arr + 6); |
1530 | } else { | ||
1531 | arr[0] = 0xff; | ||
1532 | arr[1] = 0xff; | ||
1533 | arr[2] = 0xff; | ||
1534 | arr[3] = 0xff; | ||
1535 | } | ||
1536 | arr[6] = (scsi_debug_sector_size >> 8) & 0xff; | ||
1537 | arr[7] = scsi_debug_sector_size & 0xff; | ||
1538 | return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); | 1504 | return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); |
1539 | } | 1505 | } |
1540 | 1506 | ||
@@ -1544,34 +1510,31 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
1544 | { | 1510 | { |
1545 | unsigned char *cmd = scp->cmnd; | 1511 | unsigned char *cmd = scp->cmnd; |
1546 | unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; | 1512 | unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; |
1547 | unsigned long long capac; | 1513 | int alloc_len; |
1548 | int k, alloc_len; | ||
1549 | 1514 | ||
1550 | alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) | 1515 | alloc_len = get_unaligned_be32(cmd + 10); |
1551 | + cmd[13]); | ||
1552 | /* following just in case virtual_gb changed */ | 1516 | /* following just in case virtual_gb changed */ |
1553 | sdebug_capacity = get_sdebug_capacity(); | 1517 | sdebug_capacity = get_sdebug_capacity(); |
1554 | memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); | 1518 | memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); |
1555 | capac = sdebug_capacity - 1; | 1519 | put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); |
1556 | for (k = 0; k < 8; ++k, capac >>= 8) | 1520 | put_unaligned_be32(sdebug_sector_size, arr + 8); |
1557 | arr[7 - k] = capac & 0xff; | 1521 | arr[13] = sdebug_physblk_exp & 0xf; |
1558 | arr[8] = (scsi_debug_sector_size >> 24) & 0xff; | 1522 | arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f; |
1559 | arr[9] = (scsi_debug_sector_size >> 16) & 0xff; | ||
1560 | arr[10] = (scsi_debug_sector_size >> 8) & 0xff; | ||
1561 | arr[11] = scsi_debug_sector_size & 0xff; | ||
1562 | arr[13] = scsi_debug_physblk_exp & 0xf; | ||
1563 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; | ||
1564 | 1523 | ||
1565 | if (scsi_debug_lbp()) { | 1524 | if (scsi_debug_lbp()) { |
1566 | arr[14] |= 0x80; /* LBPME */ | 1525 | arr[14] |= 0x80; /* LBPME */ |
1567 | if (scsi_debug_lbprz) | 1526 | /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in |
1568 | arr[14] |= 0x40; /* LBPRZ */ | 1527 | * the LB Provisioning VPD page is 3 bits. Note that lbprz=2 |
1528 | * in the wider field maps to 0 in this field. | ||
1529 | */ | ||
1530 | if (sdebug_lbprz & 1) /* precisely what the draft requires */ | ||
1531 | arr[14] |= 0x40; | ||
1569 | } | 1532 | } |
1570 | 1533 | ||
1571 | arr[15] = scsi_debug_lowest_aligned & 0xff; | 1534 | arr[15] = sdebug_lowest_aligned & 0xff; |
1572 | 1535 | ||
1573 | if (scsi_debug_dif) { | 1536 | if (have_dif_prot) { |
1574 | arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ | 1537 | arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ |
1575 | arr[12] |= 1; /* PROT_EN */ | 1538 | arr[12] |= 1; /* PROT_EN */ |
1576 | } | 1539 | } |
1577 | 1540 | ||
@@ -1590,9 +1553,7 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, | |||
1590 | int n, ret, alen, rlen; | 1553 | int n, ret, alen, rlen; |
1591 | int port_group_a, port_group_b, port_a, port_b; | 1554 | int port_group_a, port_group_b, port_a, port_b; |
1592 | 1555 | ||
1593 | alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) | 1556 | alen = get_unaligned_be32(cmd + 6); |
1594 | + cmd[9]); | ||
1595 | |||
1596 | arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); | 1557 | arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); |
1597 | if (! arr) | 1558 | if (! arr) |
1598 | return DID_REQUEUE << 16; | 1559 | return DID_REQUEUE << 16; |
@@ -1605,49 +1566,46 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, | |||
1605 | port_a = 0x1; /* relative port A */ | 1566 | port_a = 0x1; /* relative port A */ |
1606 | port_b = 0x2; /* relative port B */ | 1567 | port_b = 0x2; /* relative port B */ |
1607 | port_group_a = (((host_no + 1) & 0x7f) << 8) + | 1568 | port_group_a = (((host_no + 1) & 0x7f) << 8) + |
1608 | (devip->channel & 0x7f); | 1569 | (devip->channel & 0x7f); |
1609 | port_group_b = (((host_no + 1) & 0x7f) << 8) + | 1570 | port_group_b = (((host_no + 1) & 0x7f) << 8) + |
1610 | (devip->channel & 0x7f) + 0x80; | 1571 | (devip->channel & 0x7f) + 0x80; |
1611 | 1572 | ||
1612 | /* | 1573 | /* |
1613 | * The asymmetric access state is cycled according to the host_id. | 1574 | * The asymmetric access state is cycled according to the host_id. |
1614 | */ | 1575 | */ |
1615 | n = 4; | 1576 | n = 4; |
1616 | if (0 == scsi_debug_vpd_use_hostno) { | 1577 | if (sdebug_vpd_use_hostno == 0) { |
1617 | arr[n++] = host_no % 3; /* Asymm access state */ | 1578 | arr[n++] = host_no % 3; /* Asymm access state */ |
1618 | arr[n++] = 0x0F; /* claim: all states are supported */ | 1579 | arr[n++] = 0x0F; /* claim: all states are supported */ |
1619 | } else { | 1580 | } else { |
1620 | arr[n++] = 0x0; /* Active/Optimized path */ | 1581 | arr[n++] = 0x0; /* Active/Optimized path */ |
1621 | arr[n++] = 0x01; /* claim: only support active/optimized paths */ | 1582 | arr[n++] = 0x01; /* only support active/optimized paths */ |
1622 | } | 1583 | } |
1623 | arr[n++] = (port_group_a >> 8) & 0xff; | 1584 | put_unaligned_be16(port_group_a, arr + n); |
1624 | arr[n++] = port_group_a & 0xff; | 1585 | n += 2; |
1625 | arr[n++] = 0; /* Reserved */ | 1586 | arr[n++] = 0; /* Reserved */ |
1626 | arr[n++] = 0; /* Status code */ | 1587 | arr[n++] = 0; /* Status code */ |
1627 | arr[n++] = 0; /* Vendor unique */ | 1588 | arr[n++] = 0; /* Vendor unique */ |
1628 | arr[n++] = 0x1; /* One port per group */ | 1589 | arr[n++] = 0x1; /* One port per group */ |
1629 | arr[n++] = 0; /* Reserved */ | 1590 | arr[n++] = 0; /* Reserved */ |
1630 | arr[n++] = 0; /* Reserved */ | 1591 | arr[n++] = 0; /* Reserved */ |
1631 | arr[n++] = (port_a >> 8) & 0xff; | 1592 | put_unaligned_be16(port_a, arr + n); |
1632 | arr[n++] = port_a & 0xff; | 1593 | n += 2; |
1633 | arr[n++] = 3; /* Port unavailable */ | 1594 | arr[n++] = 3; /* Port unavailable */ |
1634 | arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ | 1595 | arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ |
1635 | arr[n++] = (port_group_b >> 8) & 0xff; | 1596 | put_unaligned_be16(port_group_b, arr + n); |
1636 | arr[n++] = port_group_b & 0xff; | 1597 | n += 2; |
1637 | arr[n++] = 0; /* Reserved */ | 1598 | arr[n++] = 0; /* Reserved */ |
1638 | arr[n++] = 0; /* Status code */ | 1599 | arr[n++] = 0; /* Status code */ |
1639 | arr[n++] = 0; /* Vendor unique */ | 1600 | arr[n++] = 0; /* Vendor unique */ |
1640 | arr[n++] = 0x1; /* One port per group */ | 1601 | arr[n++] = 0x1; /* One port per group */ |
1641 | arr[n++] = 0; /* Reserved */ | 1602 | arr[n++] = 0; /* Reserved */ |
1642 | arr[n++] = 0; /* Reserved */ | 1603 | arr[n++] = 0; /* Reserved */ |
1643 | arr[n++] = (port_b >> 8) & 0xff; | 1604 | put_unaligned_be16(port_b, arr + n); |
1644 | arr[n++] = port_b & 0xff; | 1605 | n += 2; |
1645 | 1606 | ||
1646 | rlen = n - 4; | 1607 | rlen = n - 4; |
1647 | arr[0] = (rlen >> 24) & 0xff; | 1608 | put_unaligned_be32(rlen, arr + 0); |
1648 | arr[1] = (rlen >> 16) & 0xff; | ||
1649 | arr[2] = (rlen >> 8) & 0xff; | ||
1650 | arr[3] = rlen & 0xff; | ||
1651 | 1609 | ||
1652 | /* | 1610 | /* |
1653 | * Return the smallest value of either | 1611 | * Return the smallest value of either |
@@ -1662,8 +1620,8 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, | |||
1662 | return ret; | 1620 | return ret; |
1663 | } | 1621 | } |
1664 | 1622 | ||
1665 | static int | 1623 | static int resp_rsup_opcodes(struct scsi_cmnd *scp, |
1666 | resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 1624 | struct sdebug_dev_info *devip) |
1667 | { | 1625 | { |
1668 | bool rctd; | 1626 | bool rctd; |
1669 | u8 reporting_opts, req_opcode, sdeb_i, supp; | 1627 | u8 reporting_opts, req_opcode, sdeb_i, supp; |
@@ -1813,8 +1771,8 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
1813 | return errsts; | 1771 | return errsts; |
1814 | } | 1772 | } |
1815 | 1773 | ||
1816 | static int | 1774 | static int resp_rsup_tmfs(struct scsi_cmnd *scp, |
1817 | resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 1775 | struct sdebug_dev_info *devip) |
1818 | { | 1776 | { |
1819 | bool repd; | 1777 | bool repd; |
1820 | u32 alloc_len, len; | 1778 | u32 alloc_len, len; |
@@ -1871,17 +1829,19 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target) | |||
1871 | 0, 0, 0, 0, 0x40, 0, 0, 0}; | 1829 | 0, 0, 0, 0, 0x40, 0, 0, 0}; |
1872 | 1830 | ||
1873 | memcpy(p, format_pg, sizeof(format_pg)); | 1831 | memcpy(p, format_pg, sizeof(format_pg)); |
1874 | p[10] = (sdebug_sectors_per >> 8) & 0xff; | 1832 | put_unaligned_be16(sdebug_sectors_per, p + 10); |
1875 | p[11] = sdebug_sectors_per & 0xff; | 1833 | put_unaligned_be16(sdebug_sector_size, p + 12); |
1876 | p[12] = (scsi_debug_sector_size >> 8) & 0xff; | 1834 | if (sdebug_removable) |
1877 | p[13] = scsi_debug_sector_size & 0xff; | ||
1878 | if (scsi_debug_removable) | ||
1879 | p[20] |= 0x20; /* should agree with INQUIRY */ | 1835 | p[20] |= 0x20; /* should agree with INQUIRY */ |
1880 | if (1 == pcontrol) | 1836 | if (1 == pcontrol) |
1881 | memset(p + 2, 0, sizeof(format_pg) - 2); | 1837 | memset(p + 2, 0, sizeof(format_pg) - 2); |
1882 | return sizeof(format_pg); | 1838 | return sizeof(format_pg); |
1883 | } | 1839 | } |
1884 | 1840 | ||
1841 | static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | ||
1842 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, | ||
1843 | 0, 0, 0, 0}; | ||
1844 | |||
1885 | static int resp_caching_pg(unsigned char * p, int pcontrol, int target) | 1845 | static int resp_caching_pg(unsigned char * p, int pcontrol, int target) |
1886 | { /* Caching page for mode_sense */ | 1846 | { /* Caching page for mode_sense */ |
1887 | unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, | 1847 | unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, |
@@ -1889,7 +1849,7 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target) | |||
1889 | unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | 1849 | unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, |
1890 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; | 1850 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; |
1891 | 1851 | ||
1892 | if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts) | 1852 | if (SDEBUG_OPT_N_WCE & sdebug_opts) |
1893 | caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ | 1853 | caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ |
1894 | memcpy(p, caching_pg, sizeof(caching_pg)); | 1854 | memcpy(p, caching_pg, sizeof(caching_pg)); |
1895 | if (1 == pcontrol) | 1855 | if (1 == pcontrol) |
@@ -1899,6 +1859,9 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target) | |||
1899 | return sizeof(caching_pg); | 1859 | return sizeof(caching_pg); |
1900 | } | 1860 | } |
1901 | 1861 | ||
1862 | static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, | ||
1863 | 0, 0, 0x2, 0x4b}; | ||
1864 | |||
1902 | static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) | 1865 | static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) |
1903 | { /* Control mode page for mode_sense */ | 1866 | { /* Control mode page for mode_sense */ |
1904 | unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, | 1867 | unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, |
@@ -1906,12 +1869,12 @@ static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) | |||
1906 | unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, | 1869 | unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, |
1907 | 0, 0, 0x2, 0x4b}; | 1870 | 0, 0, 0x2, 0x4b}; |
1908 | 1871 | ||
1909 | if (scsi_debug_dsense) | 1872 | if (sdebug_dsense) |
1910 | ctrl_m_pg[2] |= 0x4; | 1873 | ctrl_m_pg[2] |= 0x4; |
1911 | else | 1874 | else |
1912 | ctrl_m_pg[2] &= ~0x4; | 1875 | ctrl_m_pg[2] &= ~0x4; |
1913 | 1876 | ||
1914 | if (scsi_debug_ato) | 1877 | if (sdebug_ato) |
1915 | ctrl_m_pg[5] |= 0x80; /* ATO=1 */ | 1878 | ctrl_m_pg[5] |= 0x80; /* ATO=1 */ |
1916 | 1879 | ||
1917 | memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); | 1880 | memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); |
@@ -1955,31 +1918,29 @@ static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target, | |||
1955 | { /* SAS phy control and discover mode page for mode_sense */ | 1918 | { /* SAS phy control and discover mode page for mode_sense */ |
1956 | unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, | 1919 | unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, |
1957 | 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, | 1920 | 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, |
1958 | 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, | 1921 | 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ |
1959 | 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, | 1922 | 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ |
1960 | 0x2, 0, 0, 0, 0, 0, 0, 0, | 1923 | 0x2, 0, 0, 0, 0, 0, 0, 0, |
1961 | 0x88, 0x99, 0, 0, 0, 0, 0, 0, | 1924 | 0x88, 0x99, 0, 0, 0, 0, 0, 0, |
1962 | 0, 0, 0, 0, 0, 0, 0, 0, | 1925 | 0, 0, 0, 0, 0, 0, 0, 0, |
1963 | 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, | 1926 | 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, |
1964 | 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, | 1927 | 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ |
1965 | 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, | 1928 | 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ |
1966 | 0x3, 0, 0, 0, 0, 0, 0, 0, | 1929 | 0x3, 0, 0, 0, 0, 0, 0, 0, |
1967 | 0x88, 0x99, 0, 0, 0, 0, 0, 0, | 1930 | 0x88, 0x99, 0, 0, 0, 0, 0, 0, |
1968 | 0, 0, 0, 0, 0, 0, 0, 0, | 1931 | 0, 0, 0, 0, 0, 0, 0, 0, |
1969 | }; | 1932 | }; |
1970 | int port_a, port_b; | 1933 | int port_a, port_b; |
1971 | 1934 | ||
1935 | put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16); | ||
1936 | put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24); | ||
1937 | put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64); | ||
1938 | put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72); | ||
1972 | port_a = target_dev_id + 1; | 1939 | port_a = target_dev_id + 1; |
1973 | port_b = port_a + 1; | 1940 | port_b = port_a + 1; |
1974 | memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); | 1941 | memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); |
1975 | p[20] = (port_a >> 24); | 1942 | put_unaligned_be32(port_a, p + 20); |
1976 | p[21] = (port_a >> 16) & 0xff; | 1943 | put_unaligned_be32(port_b, p + 48 + 20); |
1977 | p[22] = (port_a >> 8) & 0xff; | ||
1978 | p[23] = port_a & 0xff; | ||
1979 | p[48 + 20] = (port_b >> 24); | ||
1980 | p[48 + 21] = (port_b >> 16) & 0xff; | ||
1981 | p[48 + 22] = (port_b >> 8) & 0xff; | ||
1982 | p[48 + 23] = port_b & 0xff; | ||
1983 | if (1 == pcontrol) | 1944 | if (1 == pcontrol) |
1984 | memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); | 1945 | memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); |
1985 | return sizeof(sas_pcd_m_pg); | 1946 | return sizeof(sas_pcd_m_pg); |
@@ -1999,29 +1960,30 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol) | |||
1999 | 1960 | ||
2000 | #define SDEBUG_MAX_MSENSE_SZ 256 | 1961 | #define SDEBUG_MAX_MSENSE_SZ 256 |
2001 | 1962 | ||
2002 | static int | 1963 | static int resp_mode_sense(struct scsi_cmnd *scp, |
2003 | resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 1964 | struct sdebug_dev_info *devip) |
2004 | { | 1965 | { |
2005 | unsigned char dbd, llbaa; | ||
2006 | int pcontrol, pcode, subpcode, bd_len; | 1966 | int pcontrol, pcode, subpcode, bd_len; |
2007 | unsigned char dev_spec; | 1967 | unsigned char dev_spec; |
2008 | int k, alloc_len, msense_6, offset, len, target_dev_id; | 1968 | int alloc_len, offset, len, target_dev_id; |
2009 | int target = scp->device->id; | 1969 | int target = scp->device->id; |
2010 | unsigned char * ap; | 1970 | unsigned char * ap; |
2011 | unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; | 1971 | unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; |
2012 | unsigned char *cmd = scp->cmnd; | 1972 | unsigned char *cmd = scp->cmnd; |
1973 | bool dbd, llbaa, msense_6, is_disk, bad_pcode; | ||
2013 | 1974 | ||
2014 | dbd = !!(cmd[1] & 0x8); | 1975 | dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ |
2015 | pcontrol = (cmd[2] & 0xc0) >> 6; | 1976 | pcontrol = (cmd[2] & 0xc0) >> 6; |
2016 | pcode = cmd[2] & 0x3f; | 1977 | pcode = cmd[2] & 0x3f; |
2017 | subpcode = cmd[3]; | 1978 | subpcode = cmd[3]; |
2018 | msense_6 = (MODE_SENSE == cmd[0]); | 1979 | msense_6 = (MODE_SENSE == cmd[0]); |
2019 | llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10); | 1980 | llbaa = msense_6 ? false : !!(cmd[1] & 0x10); |
2020 | if ((0 == scsi_debug_ptype) && (0 == dbd)) | 1981 | is_disk = (sdebug_ptype == TYPE_DISK); |
1982 | if (is_disk && !dbd) | ||
2021 | bd_len = llbaa ? 16 : 8; | 1983 | bd_len = llbaa ? 16 : 8; |
2022 | else | 1984 | else |
2023 | bd_len = 0; | 1985 | bd_len = 0; |
2024 | alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); | 1986 | alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); |
2025 | memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); | 1987 | memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); |
2026 | if (0x3 == pcontrol) { /* Saving values not supported */ | 1988 | if (0x3 == pcontrol) { /* Saving values not supported */ |
2027 | mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); | 1989 | mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); |
@@ -2029,9 +1991,9 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2029 | } | 1991 | } |
2030 | target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + | 1992 | target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + |
2031 | (devip->target * 1000) - 3; | 1993 | (devip->target * 1000) - 3; |
2032 | /* set DPOFUA bit for disks */ | 1994 | /* for disks set DPOFUA bit and clear write protect (WP) bit */ |
2033 | if (0 == scsi_debug_ptype) | 1995 | if (is_disk) |
2034 | dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10; | 1996 | dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ |
2035 | else | 1997 | else |
2036 | dev_spec = 0x0; | 1998 | dev_spec = 0x0; |
2037 | if (msense_6) { | 1999 | if (msense_6) { |
@@ -2050,30 +2012,16 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2050 | sdebug_capacity = get_sdebug_capacity(); | 2012 | sdebug_capacity = get_sdebug_capacity(); |
2051 | 2013 | ||
2052 | if (8 == bd_len) { | 2014 | if (8 == bd_len) { |
2053 | if (sdebug_capacity > 0xfffffffe) { | 2015 | if (sdebug_capacity > 0xfffffffe) |
2054 | ap[0] = 0xff; | 2016 | put_unaligned_be32(0xffffffff, ap + 0); |
2055 | ap[1] = 0xff; | 2017 | else |
2056 | ap[2] = 0xff; | 2018 | put_unaligned_be32(sdebug_capacity, ap + 0); |
2057 | ap[3] = 0xff; | 2019 | put_unaligned_be16(sdebug_sector_size, ap + 6); |
2058 | } else { | ||
2059 | ap[0] = (sdebug_capacity >> 24) & 0xff; | ||
2060 | ap[1] = (sdebug_capacity >> 16) & 0xff; | ||
2061 | ap[2] = (sdebug_capacity >> 8) & 0xff; | ||
2062 | ap[3] = sdebug_capacity & 0xff; | ||
2063 | } | ||
2064 | ap[6] = (scsi_debug_sector_size >> 8) & 0xff; | ||
2065 | ap[7] = scsi_debug_sector_size & 0xff; | ||
2066 | offset += bd_len; | 2020 | offset += bd_len; |
2067 | ap = arr + offset; | 2021 | ap = arr + offset; |
2068 | } else if (16 == bd_len) { | 2022 | } else if (16 == bd_len) { |
2069 | unsigned long long capac = sdebug_capacity; | 2023 | put_unaligned_be64((u64)sdebug_capacity, ap + 0); |
2070 | 2024 | put_unaligned_be32(sdebug_sector_size, ap + 12); | |
2071 | for (k = 0; k < 8; ++k, capac >>= 8) | ||
2072 | ap[7 - k] = capac & 0xff; | ||
2073 | ap[12] = (scsi_debug_sector_size >> 24) & 0xff; | ||
2074 | ap[13] = (scsi_debug_sector_size >> 16) & 0xff; | ||
2075 | ap[14] = (scsi_debug_sector_size >> 8) & 0xff; | ||
2076 | ap[15] = scsi_debug_sector_size & 0xff; | ||
2077 | offset += bd_len; | 2025 | offset += bd_len; |
2078 | ap = arr + offset; | 2026 | ap = arr + offset; |
2079 | } | 2027 | } |
@@ -2083,6 +2031,8 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2083 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); | 2031 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); |
2084 | return check_condition_result; | 2032 | return check_condition_result; |
2085 | } | 2033 | } |
2034 | bad_pcode = false; | ||
2035 | |||
2086 | switch (pcode) { | 2036 | switch (pcode) { |
2087 | case 0x1: /* Read-Write error recovery page, direct access */ | 2037 | case 0x1: /* Read-Write error recovery page, direct access */ |
2088 | len = resp_err_recov_pg(ap, pcontrol, target); | 2038 | len = resp_err_recov_pg(ap, pcontrol, target); |
@@ -2093,12 +2043,18 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2093 | offset += len; | 2043 | offset += len; |
2094 | break; | 2044 | break; |
2095 | case 0x3: /* Format device page, direct access */ | 2045 | case 0x3: /* Format device page, direct access */ |
2096 | len = resp_format_pg(ap, pcontrol, target); | 2046 | if (is_disk) { |
2097 | offset += len; | 2047 | len = resp_format_pg(ap, pcontrol, target); |
2048 | offset += len; | ||
2049 | } else | ||
2050 | bad_pcode = true; | ||
2098 | break; | 2051 | break; |
2099 | case 0x8: /* Caching page, direct access */ | 2052 | case 0x8: /* Caching page, direct access */ |
2100 | len = resp_caching_pg(ap, pcontrol, target); | 2053 | if (is_disk) { |
2101 | offset += len; | 2054 | len = resp_caching_pg(ap, pcontrol, target); |
2055 | offset += len; | ||
2056 | } else | ||
2057 | bad_pcode = true; | ||
2102 | break; | 2058 | break; |
2103 | case 0xa: /* Control Mode page, all devices */ | 2059 | case 0xa: /* Control Mode page, all devices */ |
2104 | len = resp_ctrl_m_pg(ap, pcontrol, target); | 2060 | len = resp_ctrl_m_pg(ap, pcontrol, target); |
@@ -2127,8 +2083,12 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2127 | if ((0 == subpcode) || (0xff == subpcode)) { | 2083 | if ((0 == subpcode) || (0xff == subpcode)) { |
2128 | len = resp_err_recov_pg(ap, pcontrol, target); | 2084 | len = resp_err_recov_pg(ap, pcontrol, target); |
2129 | len += resp_disconnect_pg(ap + len, pcontrol, target); | 2085 | len += resp_disconnect_pg(ap + len, pcontrol, target); |
2130 | len += resp_format_pg(ap + len, pcontrol, target); | 2086 | if (is_disk) { |
2131 | len += resp_caching_pg(ap + len, pcontrol, target); | 2087 | len += resp_format_pg(ap + len, pcontrol, |
2088 | target); | ||
2089 | len += resp_caching_pg(ap + len, pcontrol, | ||
2090 | target); | ||
2091 | } | ||
2132 | len += resp_ctrl_m_pg(ap + len, pcontrol, target); | 2092 | len += resp_ctrl_m_pg(ap + len, pcontrol, target); |
2133 | len += resp_sas_sf_m_pg(ap + len, pcontrol, target); | 2093 | len += resp_sas_sf_m_pg(ap + len, pcontrol, target); |
2134 | if (0xff == subpcode) { | 2094 | if (0xff == subpcode) { |
@@ -2137,29 +2097,31 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2137 | len += resp_sas_sha_m_spg(ap + len, pcontrol); | 2097 | len += resp_sas_sha_m_spg(ap + len, pcontrol); |
2138 | } | 2098 | } |
2139 | len += resp_iec_m_pg(ap + len, pcontrol, target); | 2099 | len += resp_iec_m_pg(ap + len, pcontrol, target); |
2100 | offset += len; | ||
2140 | } else { | 2101 | } else { |
2141 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); | 2102 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); |
2142 | return check_condition_result; | 2103 | return check_condition_result; |
2143 | } | 2104 | } |
2144 | offset += len; | ||
2145 | break; | 2105 | break; |
2146 | default: | 2106 | default: |
2107 | bad_pcode = true; | ||
2108 | break; | ||
2109 | } | ||
2110 | if (bad_pcode) { | ||
2147 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); | 2111 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); |
2148 | return check_condition_result; | 2112 | return check_condition_result; |
2149 | } | 2113 | } |
2150 | if (msense_6) | 2114 | if (msense_6) |
2151 | arr[0] = offset - 1; | 2115 | arr[0] = offset - 1; |
2152 | else { | 2116 | else |
2153 | arr[0] = ((offset - 2) >> 8) & 0xff; | 2117 | put_unaligned_be16((offset - 2), arr + 0); |
2154 | arr[1] = (offset - 2) & 0xff; | ||
2155 | } | ||
2156 | return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); | 2118 | return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); |
2157 | } | 2119 | } |
2158 | 2120 | ||
2159 | #define SDEBUG_MAX_MSELECT_SZ 512 | 2121 | #define SDEBUG_MAX_MSELECT_SZ 512 |
2160 | 2122 | ||
2161 | static int | 2123 | static int resp_mode_select(struct scsi_cmnd *scp, |
2162 | resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 2124 | struct sdebug_dev_info *devip) |
2163 | { | 2125 | { |
2164 | int pf, sp, ps, md_len, bd_len, off, spf, pg_len; | 2126 | int pf, sp, ps, md_len, bd_len, off, spf, pg_len; |
2165 | int param_len, res, mpage; | 2127 | int param_len, res, mpage; |
@@ -2170,21 +2132,20 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2170 | memset(arr, 0, sizeof(arr)); | 2132 | memset(arr, 0, sizeof(arr)); |
2171 | pf = cmd[1] & 0x10; | 2133 | pf = cmd[1] & 0x10; |
2172 | sp = cmd[1] & 0x1; | 2134 | sp = cmd[1] & 0x1; |
2173 | param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); | 2135 | param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7); |
2174 | if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { | 2136 | if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { |
2175 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); | 2137 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); |
2176 | return check_condition_result; | 2138 | return check_condition_result; |
2177 | } | 2139 | } |
2178 | res = fetch_to_dev_buffer(scp, arr, param_len); | 2140 | res = fetch_to_dev_buffer(scp, arr, param_len); |
2179 | if (-1 == res) | 2141 | if (-1 == res) |
2180 | return (DID_ERROR << 16); | 2142 | return DID_ERROR << 16; |
2181 | else if ((res < param_len) && | 2143 | else if (sdebug_verbose && (res < param_len)) |
2182 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | ||
2183 | sdev_printk(KERN_INFO, scp->device, | 2144 | sdev_printk(KERN_INFO, scp->device, |
2184 | "%s: cdb indicated=%d, IO sent=%d bytes\n", | 2145 | "%s: cdb indicated=%d, IO sent=%d bytes\n", |
2185 | __func__, param_len, res); | 2146 | __func__, param_len, res); |
2186 | md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); | 2147 | md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); |
2187 | bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); | 2148 | bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); |
2188 | if (md_len > 2) { | 2149 | if (md_len > 2) { |
2189 | mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); | 2150 | mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); |
2190 | return check_condition_result; | 2151 | return check_condition_result; |
@@ -2197,7 +2158,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2197 | return check_condition_result; | 2158 | return check_condition_result; |
2198 | } | 2159 | } |
2199 | spf = !!(arr[off] & 0x40); | 2160 | spf = !!(arr[off] & 0x40); |
2200 | pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : | 2161 | pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) : |
2201 | (arr[off + 1] + 2); | 2162 | (arr[off + 1] + 2); |
2202 | if ((pg_len + off) > param_len) { | 2163 | if ((pg_len + off) > param_len) { |
2203 | mk_sense_buffer(scp, ILLEGAL_REQUEST, | 2164 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
@@ -2216,7 +2177,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2216 | if (ctrl_m_pg[1] == arr[off + 1]) { | 2177 | if (ctrl_m_pg[1] == arr[off + 1]) { |
2217 | memcpy(ctrl_m_pg + 2, arr + off + 2, | 2178 | memcpy(ctrl_m_pg + 2, arr + off + 2, |
2218 | sizeof(ctrl_m_pg) - 2); | 2179 | sizeof(ctrl_m_pg) - 2); |
2219 | scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); | 2180 | sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); |
2220 | goto set_mode_changed_ua; | 2181 | goto set_mode_changed_ua; |
2221 | } | 2182 | } |
2222 | break; | 2183 | break; |
@@ -2279,7 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
2279 | pcontrol = (cmd[2] & 0xc0) >> 6; | 2240 | pcontrol = (cmd[2] & 0xc0) >> 6; |
2280 | pcode = cmd[2] & 0x3f; | 2241 | pcode = cmd[2] & 0x3f; |
2281 | subpcode = cmd[3] & 0xff; | 2242 | subpcode = cmd[3] & 0xff; |
2282 | alloc_len = (cmd[7] << 8) + cmd[8]; | 2243 | alloc_len = get_unaligned_be16(cmd + 7); |
2283 | arr[0] = pcode; | 2244 | arr[0] = pcode; |
2284 | if (0 == subpcode) { | 2245 | if (0 == subpcode) { |
2285 | switch (pcode) { | 2246 | switch (pcode) { |
@@ -2336,7 +2297,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
2336 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); | 2297 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); |
2337 | return check_condition_result; | 2298 | return check_condition_result; |
2338 | } | 2299 | } |
2339 | len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); | 2300 | len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); |
2340 | return fill_from_dev_buffer(scp, arr, | 2301 | return fill_from_dev_buffer(scp, arr, |
2341 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); | 2302 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); |
2342 | } | 2303 | } |
@@ -2358,8 +2319,8 @@ static int check_device_access_params(struct scsi_cmnd *scp, | |||
2358 | } | 2319 | } |
2359 | 2320 | ||
2360 | /* Returns number of bytes copied or -1 if error. */ | 2321 | /* Returns number of bytes copied or -1 if error. */ |
2361 | static int | 2322 | static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, |
2362 | do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) | 2323 | bool do_write) |
2363 | { | 2324 | { |
2364 | int ret; | 2325 | int ret; |
2365 | u64 block, rest = 0; | 2326 | u64 block, rest = 0; |
@@ -2384,15 +2345,15 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) | |||
2384 | rest = block + num - sdebug_store_sectors; | 2345 | rest = block + num - sdebug_store_sectors; |
2385 | 2346 | ||
2386 | ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, | 2347 | ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, |
2387 | fake_storep + (block * scsi_debug_sector_size), | 2348 | fake_storep + (block * sdebug_sector_size), |
2388 | (num - rest) * scsi_debug_sector_size, 0, do_write); | 2349 | (num - rest) * sdebug_sector_size, 0, do_write); |
2389 | if (ret != (num - rest) * scsi_debug_sector_size) | 2350 | if (ret != (num - rest) * sdebug_sector_size) |
2390 | return ret; | 2351 | return ret; |
2391 | 2352 | ||
2392 | if (rest) { | 2353 | if (rest) { |
2393 | ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, | 2354 | ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, |
2394 | fake_storep, rest * scsi_debug_sector_size, | 2355 | fake_storep, rest * sdebug_sector_size, |
2395 | (num - rest) * scsi_debug_sector_size, do_write); | 2356 | (num - rest) * sdebug_sector_size, do_write); |
2396 | } | 2357 | } |
2397 | 2358 | ||
2398 | return ret; | 2359 | return ret; |
@@ -2401,13 +2362,12 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) | |||
2401 | /* If fake_store(lba,num) compares equal to arr(num), then copy top half of | 2362 | /* If fake_store(lba,num) compares equal to arr(num), then copy top half of |
2402 | * arr into fake_store(lba,num) and return true. If comparison fails then | 2363 | * arr into fake_store(lba,num) and return true. If comparison fails then |
2403 | * return false. */ | 2364 | * return false. */ |
2404 | static bool | 2365 | static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) |
2405 | comp_write_worker(u64 lba, u32 num, const u8 *arr) | ||
2406 | { | 2366 | { |
2407 | bool res; | 2367 | bool res; |
2408 | u64 block, rest = 0; | 2368 | u64 block, rest = 0; |
2409 | u32 store_blks = sdebug_store_sectors; | 2369 | u32 store_blks = sdebug_store_sectors; |
2410 | u32 lb_size = scsi_debug_sector_size; | 2370 | u32 lb_size = sdebug_sector_size; |
2411 | 2371 | ||
2412 | block = do_div(lba, store_blks); | 2372 | block = do_div(lba, store_blks); |
2413 | if (block + num > store_blks) | 2373 | if (block + num > store_blks) |
@@ -2434,7 +2394,7 @@ static __be16 dif_compute_csum(const void *buf, int len) | |||
2434 | { | 2394 | { |
2435 | __be16 csum; | 2395 | __be16 csum; |
2436 | 2396 | ||
2437 | if (scsi_debug_guard) | 2397 | if (sdebug_guard) |
2438 | csum = (__force __be16)ip_compute_csum(buf, len); | 2398 | csum = (__force __be16)ip_compute_csum(buf, len); |
2439 | else | 2399 | else |
2440 | csum = cpu_to_be16(crc_t10dif(buf, len)); | 2400 | csum = cpu_to_be16(crc_t10dif(buf, len)); |
@@ -2445,7 +2405,7 @@ static __be16 dif_compute_csum(const void *buf, int len) | |||
2445 | static int dif_verify(struct sd_dif_tuple *sdt, const void *data, | 2405 | static int dif_verify(struct sd_dif_tuple *sdt, const void *data, |
2446 | sector_t sector, u32 ei_lba) | 2406 | sector_t sector, u32 ei_lba) |
2447 | { | 2407 | { |
2448 | __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); | 2408 | __be16 csum = dif_compute_csum(data, sdebug_sector_size); |
2449 | 2409 | ||
2450 | if (sdt->guard_tag != csum) { | 2410 | if (sdt->guard_tag != csum) { |
2451 | pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", | 2411 | pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", |
@@ -2454,13 +2414,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data, | |||
2454 | be16_to_cpu(csum)); | 2414 | be16_to_cpu(csum)); |
2455 | return 0x01; | 2415 | return 0x01; |
2456 | } | 2416 | } |
2457 | if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && | 2417 | if (sdebug_dif == SD_DIF_TYPE1_PROTECTION && |
2458 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { | 2418 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { |
2459 | pr_err("REF check failed on sector %lu\n", | 2419 | pr_err("REF check failed on sector %lu\n", |
2460 | (unsigned long)sector); | 2420 | (unsigned long)sector); |
2461 | return 0x03; | 2421 | return 0x03; |
2462 | } | 2422 | } |
2463 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 2423 | if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && |
2464 | be32_to_cpu(sdt->ref_tag) != ei_lba) { | 2424 | be32_to_cpu(sdt->ref_tag) != ei_lba) { |
2465 | pr_err("REF check failed on sector %lu\n", | 2425 | pr_err("REF check failed on sector %lu\n", |
2466 | (unsigned long)sector); | 2426 | (unsigned long)sector); |
@@ -2541,10 +2501,10 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, | |||
2541 | return 0; | 2501 | return 0; |
2542 | } | 2502 | } |
2543 | 2503 | ||
2544 | static int | 2504 | static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
2545 | resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | ||
2546 | { | 2505 | { |
2547 | u8 *cmd = scp->cmnd; | 2506 | u8 *cmd = scp->cmnd; |
2507 | struct sdebug_queued_cmd *sqcp; | ||
2548 | u64 lba; | 2508 | u64 lba; |
2549 | u32 num; | 2509 | u32 num; |
2550 | u32 ei_lba; | 2510 | u32 ei_lba; |
@@ -2591,40 +2551,43 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2591 | check_prot = false; | 2551 | check_prot = false; |
2592 | break; | 2552 | break; |
2593 | } | 2553 | } |
2594 | if (check_prot) { | 2554 | if (unlikely(have_dif_prot && check_prot)) { |
2595 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 2555 | if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && |
2596 | (cmd[1] & 0xe0)) { | 2556 | (cmd[1] & 0xe0)) { |
2597 | mk_sense_invalid_opcode(scp); | 2557 | mk_sense_invalid_opcode(scp); |
2598 | return check_condition_result; | 2558 | return check_condition_result; |
2599 | } | 2559 | } |
2600 | if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || | 2560 | if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || |
2601 | scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && | 2561 | sdebug_dif == SD_DIF_TYPE3_PROTECTION) && |
2602 | (cmd[1] & 0xe0) == 0) | 2562 | (cmd[1] & 0xe0) == 0) |
2603 | sdev_printk(KERN_ERR, scp->device, "Unprotected RD " | 2563 | sdev_printk(KERN_ERR, scp->device, "Unprotected RD " |
2604 | "to DIF device\n"); | 2564 | "to DIF device\n"); |
2605 | } | 2565 | } |
2606 | if (sdebug_any_injecting_opt) { | 2566 | if (unlikely(sdebug_any_injecting_opt)) { |
2607 | struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); | 2567 | sqcp = (struct sdebug_queued_cmd *)scp->host_scribble; |
2608 | 2568 | ||
2609 | if (ep->inj_short) | 2569 | if (sqcp) { |
2610 | num /= 2; | 2570 | if (sqcp->inj_short) |
2611 | } | 2571 | num /= 2; |
2572 | } | ||
2573 | } else | ||
2574 | sqcp = NULL; | ||
2612 | 2575 | ||
2613 | /* inline check_device_access_params() */ | 2576 | /* inline check_device_access_params() */ |
2614 | if (lba + num > sdebug_capacity) { | 2577 | if (unlikely(lba + num > sdebug_capacity)) { |
2615 | mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); | 2578 | mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); |
2616 | return check_condition_result; | 2579 | return check_condition_result; |
2617 | } | 2580 | } |
2618 | /* transfer length excessive (tie in to block limits VPD page) */ | 2581 | /* transfer length excessive (tie in to block limits VPD page) */ |
2619 | if (num > sdebug_store_sectors) { | 2582 | if (unlikely(num > sdebug_store_sectors)) { |
2620 | /* needs work to find which cdb byte 'num' comes from */ | 2583 | /* needs work to find which cdb byte 'num' comes from */ |
2621 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); | 2584 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); |
2622 | return check_condition_result; | 2585 | return check_condition_result; |
2623 | } | 2586 | } |
2624 | 2587 | ||
2625 | if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && | 2588 | if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && |
2626 | (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && | 2589 | (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && |
2627 | ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { | 2590 | ((lba + num) > OPT_MEDIUM_ERR_ADDR))) { |
2628 | /* claim unrecoverable read error */ | 2591 | /* claim unrecoverable read error */ |
2629 | mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); | 2592 | mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); |
2630 | /* set info field and valid bit for fixed descriptor */ | 2593 | /* set info field and valid bit for fixed descriptor */ |
@@ -2641,7 +2604,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2641 | read_lock_irqsave(&atomic_rw, iflags); | 2604 | read_lock_irqsave(&atomic_rw, iflags); |
2642 | 2605 | ||
2643 | /* DIX + T10 DIF */ | 2606 | /* DIX + T10 DIF */ |
2644 | if (scsi_debug_dix && scsi_prot_sg_count(scp)) { | 2607 | if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { |
2645 | int prot_ret = prot_verify_read(scp, lba, num, ei_lba); | 2608 | int prot_ret = prot_verify_read(scp, lba, num, ei_lba); |
2646 | 2609 | ||
2647 | if (prot_ret) { | 2610 | if (prot_ret) { |
@@ -2653,27 +2616,25 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2653 | 2616 | ||
2654 | ret = do_device_access(scp, lba, num, false); | 2617 | ret = do_device_access(scp, lba, num, false); |
2655 | read_unlock_irqrestore(&atomic_rw, iflags); | 2618 | read_unlock_irqrestore(&atomic_rw, iflags); |
2656 | if (ret == -1) | 2619 | if (unlikely(ret == -1)) |
2657 | return DID_ERROR << 16; | 2620 | return DID_ERROR << 16; |
2658 | 2621 | ||
2659 | scsi_in(scp)->resid = scsi_bufflen(scp) - ret; | 2622 | scsi_in(scp)->resid = scsi_bufflen(scp) - ret; |
2660 | 2623 | ||
2661 | if (sdebug_any_injecting_opt) { | 2624 | if (unlikely(sqcp)) { |
2662 | struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); | 2625 | if (sqcp->inj_recovered) { |
2663 | |||
2664 | if (ep->inj_recovered) { | ||
2665 | mk_sense_buffer(scp, RECOVERED_ERROR, | 2626 | mk_sense_buffer(scp, RECOVERED_ERROR, |
2666 | THRESHOLD_EXCEEDED, 0); | 2627 | THRESHOLD_EXCEEDED, 0); |
2667 | return check_condition_result; | 2628 | return check_condition_result; |
2668 | } else if (ep->inj_transport) { | 2629 | } else if (sqcp->inj_transport) { |
2669 | mk_sense_buffer(scp, ABORTED_COMMAND, | 2630 | mk_sense_buffer(scp, ABORTED_COMMAND, |
2670 | TRANSPORT_PROBLEM, ACK_NAK_TO); | 2631 | TRANSPORT_PROBLEM, ACK_NAK_TO); |
2671 | return check_condition_result; | 2632 | return check_condition_result; |
2672 | } else if (ep->inj_dif) { | 2633 | } else if (sqcp->inj_dif) { |
2673 | /* Logical block guard check failed */ | 2634 | /* Logical block guard check failed */ |
2674 | mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); | 2635 | mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); |
2675 | return illegal_condition_result; | 2636 | return illegal_condition_result; |
2676 | } else if (ep->inj_dix) { | 2637 | } else if (sqcp->inj_dix) { |
2677 | mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); | 2638 | mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); |
2678 | return illegal_condition_result; | 2639 | return illegal_condition_result; |
2679 | } | 2640 | } |
@@ -2750,13 +2711,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, | |||
2750 | 2711 | ||
2751 | ret = dif_verify(sdt, daddr, sector, ei_lba); | 2712 | ret = dif_verify(sdt, daddr, sector, ei_lba); |
2752 | if (ret) { | 2713 | if (ret) { |
2753 | dump_sector(daddr, scsi_debug_sector_size); | 2714 | dump_sector(daddr, sdebug_sector_size); |
2754 | goto out; | 2715 | goto out; |
2755 | } | 2716 | } |
2756 | 2717 | ||
2757 | sector++; | 2718 | sector++; |
2758 | ei_lba++; | 2719 | ei_lba++; |
2759 | dpage_offset += scsi_debug_sector_size; | 2720 | dpage_offset += sdebug_sector_size; |
2760 | } | 2721 | } |
2761 | diter.consumed = dpage_offset; | 2722 | diter.consumed = dpage_offset; |
2762 | sg_miter_stop(&diter); | 2723 | sg_miter_stop(&diter); |
@@ -2777,24 +2738,18 @@ out: | |||
2777 | 2738 | ||
2778 | static unsigned long lba_to_map_index(sector_t lba) | 2739 | static unsigned long lba_to_map_index(sector_t lba) |
2779 | { | 2740 | { |
2780 | if (scsi_debug_unmap_alignment) { | 2741 | if (sdebug_unmap_alignment) |
2781 | lba += scsi_debug_unmap_granularity - | 2742 | lba += sdebug_unmap_granularity - sdebug_unmap_alignment; |
2782 | scsi_debug_unmap_alignment; | 2743 | sector_div(lba, sdebug_unmap_granularity); |
2783 | } | ||
2784 | sector_div(lba, scsi_debug_unmap_granularity); | ||
2785 | |||
2786 | return lba; | 2744 | return lba; |
2787 | } | 2745 | } |
2788 | 2746 | ||
2789 | static sector_t map_index_to_lba(unsigned long index) | 2747 | static sector_t map_index_to_lba(unsigned long index) |
2790 | { | 2748 | { |
2791 | sector_t lba = index * scsi_debug_unmap_granularity; | 2749 | sector_t lba = index * sdebug_unmap_granularity; |
2792 | |||
2793 | if (scsi_debug_unmap_alignment) { | ||
2794 | lba -= scsi_debug_unmap_granularity - | ||
2795 | scsi_debug_unmap_alignment; | ||
2796 | } | ||
2797 | 2750 | ||
2751 | if (sdebug_unmap_alignment) | ||
2752 | lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; | ||
2798 | return lba; | 2753 | return lba; |
2799 | } | 2754 | } |
2800 | 2755 | ||
@@ -2815,7 +2770,6 @@ static unsigned int map_state(sector_t lba, unsigned int *num) | |||
2815 | 2770 | ||
2816 | end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); | 2771 | end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); |
2817 | *num = end - lba; | 2772 | *num = end - lba; |
2818 | |||
2819 | return mapped; | 2773 | return mapped; |
2820 | } | 2774 | } |
2821 | 2775 | ||
@@ -2841,27 +2795,27 @@ static void unmap_region(sector_t lba, unsigned int len) | |||
2841 | unsigned long index = lba_to_map_index(lba); | 2795 | unsigned long index = lba_to_map_index(lba); |
2842 | 2796 | ||
2843 | if (lba == map_index_to_lba(index) && | 2797 | if (lba == map_index_to_lba(index) && |
2844 | lba + scsi_debug_unmap_granularity <= end && | 2798 | lba + sdebug_unmap_granularity <= end && |
2845 | index < map_size) { | 2799 | index < map_size) { |
2846 | clear_bit(index, map_storep); | 2800 | clear_bit(index, map_storep); |
2847 | if (scsi_debug_lbprz) { | 2801 | if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */ |
2848 | memset(fake_storep + | 2802 | memset(fake_storep + |
2849 | lba * scsi_debug_sector_size, 0, | 2803 | lba * sdebug_sector_size, |
2850 | scsi_debug_sector_size * | 2804 | (sdebug_lbprz & 1) ? 0 : 0xff, |
2851 | scsi_debug_unmap_granularity); | 2805 | sdebug_sector_size * |
2806 | sdebug_unmap_granularity); | ||
2852 | } | 2807 | } |
2853 | if (dif_storep) { | 2808 | if (dif_storep) { |
2854 | memset(dif_storep + lba, 0xff, | 2809 | memset(dif_storep + lba, 0xff, |
2855 | sizeof(*dif_storep) * | 2810 | sizeof(*dif_storep) * |
2856 | scsi_debug_unmap_granularity); | 2811 | sdebug_unmap_granularity); |
2857 | } | 2812 | } |
2858 | } | 2813 | } |
2859 | lba = map_index_to_lba(index + 1); | 2814 | lba = map_index_to_lba(index + 1); |
2860 | } | 2815 | } |
2861 | } | 2816 | } |
2862 | 2817 | ||
2863 | static int | 2818 | static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
2864 | resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | ||
2865 | { | 2819 | { |
2866 | u8 *cmd = scp->cmnd; | 2820 | u8 *cmd = scp->cmnd; |
2867 | u64 lba; | 2821 | u64 lba; |
@@ -2910,26 +2864,26 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2910 | check_prot = false; | 2864 | check_prot = false; |
2911 | break; | 2865 | break; |
2912 | } | 2866 | } |
2913 | if (check_prot) { | 2867 | if (unlikely(have_dif_prot && check_prot)) { |
2914 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 2868 | if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && |
2915 | (cmd[1] & 0xe0)) { | 2869 | (cmd[1] & 0xe0)) { |
2916 | mk_sense_invalid_opcode(scp); | 2870 | mk_sense_invalid_opcode(scp); |
2917 | return check_condition_result; | 2871 | return check_condition_result; |
2918 | } | 2872 | } |
2919 | if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || | 2873 | if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || |
2920 | scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && | 2874 | sdebug_dif == SD_DIF_TYPE3_PROTECTION) && |
2921 | (cmd[1] & 0xe0) == 0) | 2875 | (cmd[1] & 0xe0) == 0) |
2922 | sdev_printk(KERN_ERR, scp->device, "Unprotected WR " | 2876 | sdev_printk(KERN_ERR, scp->device, "Unprotected WR " |
2923 | "to DIF device\n"); | 2877 | "to DIF device\n"); |
2924 | } | 2878 | } |
2925 | 2879 | ||
2926 | /* inline check_device_access_params() */ | 2880 | /* inline check_device_access_params() */ |
2927 | if (lba + num > sdebug_capacity) { | 2881 | if (unlikely(lba + num > sdebug_capacity)) { |
2928 | mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); | 2882 | mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); |
2929 | return check_condition_result; | 2883 | return check_condition_result; |
2930 | } | 2884 | } |
2931 | /* transfer length excessive (tie in to block limits VPD page) */ | 2885 | /* transfer length excessive (tie in to block limits VPD page) */ |
2932 | if (num > sdebug_store_sectors) { | 2886 | if (unlikely(num > sdebug_store_sectors)) { |
2933 | /* needs work to find which cdb byte 'num' comes from */ | 2887 | /* needs work to find which cdb byte 'num' comes from */ |
2934 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); | 2888 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); |
2935 | return check_condition_result; | 2889 | return check_condition_result; |
@@ -2938,7 +2892,7 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2938 | write_lock_irqsave(&atomic_rw, iflags); | 2892 | write_lock_irqsave(&atomic_rw, iflags); |
2939 | 2893 | ||
2940 | /* DIX + T10 DIF */ | 2894 | /* DIX + T10 DIF */ |
2941 | if (scsi_debug_dix && scsi_prot_sg_count(scp)) { | 2895 | if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { |
2942 | int prot_ret = prot_verify_write(scp, lba, num, ei_lba); | 2896 | int prot_ret = prot_verify_write(scp, lba, num, ei_lba); |
2943 | 2897 | ||
2944 | if (prot_ret) { | 2898 | if (prot_ret) { |
@@ -2949,43 +2903,46 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
2949 | } | 2903 | } |
2950 | 2904 | ||
2951 | ret = do_device_access(scp, lba, num, true); | 2905 | ret = do_device_access(scp, lba, num, true); |
2952 | if (scsi_debug_lbp()) | 2906 | if (unlikely(scsi_debug_lbp())) |
2953 | map_region(lba, num); | 2907 | map_region(lba, num); |
2954 | write_unlock_irqrestore(&atomic_rw, iflags); | 2908 | write_unlock_irqrestore(&atomic_rw, iflags); |
2955 | if (-1 == ret) | 2909 | if (unlikely(-1 == ret)) |
2956 | return (DID_ERROR << 16); | 2910 | return DID_ERROR << 16; |
2957 | else if ((ret < (num * scsi_debug_sector_size)) && | 2911 | else if (unlikely(sdebug_verbose && |
2958 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 2912 | (ret < (num * sdebug_sector_size)))) |
2959 | sdev_printk(KERN_INFO, scp->device, | 2913 | sdev_printk(KERN_INFO, scp->device, |
2960 | "%s: write: cdb indicated=%u, IO sent=%d bytes\n", | 2914 | "%s: write: cdb indicated=%u, IO sent=%d bytes\n", |
2961 | my_name, num * scsi_debug_sector_size, ret); | 2915 | my_name, num * sdebug_sector_size, ret); |
2962 | 2916 | ||
2963 | if (sdebug_any_injecting_opt) { | 2917 | if (unlikely(sdebug_any_injecting_opt)) { |
2964 | struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); | 2918 | struct sdebug_queued_cmd *sqcp = |
2919 | (struct sdebug_queued_cmd *)scp->host_scribble; | ||
2965 | 2920 | ||
2966 | if (ep->inj_recovered) { | 2921 | if (sqcp) { |
2967 | mk_sense_buffer(scp, RECOVERED_ERROR, | 2922 | if (sqcp->inj_recovered) { |
2968 | THRESHOLD_EXCEEDED, 0); | 2923 | mk_sense_buffer(scp, RECOVERED_ERROR, |
2969 | return check_condition_result; | 2924 | THRESHOLD_EXCEEDED, 0); |
2970 | } else if (ep->inj_dif) { | 2925 | return check_condition_result; |
2971 | /* Logical block guard check failed */ | 2926 | } else if (sqcp->inj_dif) { |
2972 | mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); | 2927 | /* Logical block guard check failed */ |
2973 | return illegal_condition_result; | 2928 | mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); |
2974 | } else if (ep->inj_dix) { | 2929 | return illegal_condition_result; |
2975 | mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); | 2930 | } else if (sqcp->inj_dix) { |
2976 | return illegal_condition_result; | 2931 | mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); |
2932 | return illegal_condition_result; | ||
2933 | } | ||
2977 | } | 2934 | } |
2978 | } | 2935 | } |
2979 | return 0; | 2936 | return 0; |
2980 | } | 2937 | } |
2981 | 2938 | ||
2982 | static int | 2939 | static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, |
2983 | resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, | 2940 | u32 ei_lba, bool unmap, bool ndob) |
2984 | bool unmap, bool ndob) | ||
2985 | { | 2941 | { |
2986 | unsigned long iflags; | 2942 | unsigned long iflags; |
2987 | unsigned long long i; | 2943 | unsigned long long i; |
2988 | int ret; | 2944 | int ret; |
2945 | u64 lba_off; | ||
2989 | 2946 | ||
2990 | ret = check_device_access_params(scp, lba, num); | 2947 | ret = check_device_access_params(scp, lba, num); |
2991 | if (ret) | 2948 | if (ret) |
@@ -2998,31 +2955,29 @@ resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, | |||
2998 | goto out; | 2955 | goto out; |
2999 | } | 2956 | } |
3000 | 2957 | ||
2958 | lba_off = lba * sdebug_sector_size; | ||
3001 | /* if ndob then zero 1 logical block, else fetch 1 logical block */ | 2959 | /* if ndob then zero 1 logical block, else fetch 1 logical block */ |
3002 | if (ndob) { | 2960 | if (ndob) { |
3003 | memset(fake_storep + (lba * scsi_debug_sector_size), 0, | 2961 | memset(fake_storep + lba_off, 0, sdebug_sector_size); |
3004 | scsi_debug_sector_size); | ||
3005 | ret = 0; | 2962 | ret = 0; |
3006 | } else | 2963 | } else |
3007 | ret = fetch_to_dev_buffer(scp, fake_storep + | 2964 | ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, |
3008 | (lba * scsi_debug_sector_size), | 2965 | sdebug_sector_size); |
3009 | scsi_debug_sector_size); | ||
3010 | 2966 | ||
3011 | if (-1 == ret) { | 2967 | if (-1 == ret) { |
3012 | write_unlock_irqrestore(&atomic_rw, iflags); | 2968 | write_unlock_irqrestore(&atomic_rw, iflags); |
3013 | return (DID_ERROR << 16); | 2969 | return DID_ERROR << 16; |
3014 | } else if ((ret < (num * scsi_debug_sector_size)) && | 2970 | } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) |
3015 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | ||
3016 | sdev_printk(KERN_INFO, scp->device, | 2971 | sdev_printk(KERN_INFO, scp->device, |
3017 | "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", | 2972 | "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", |
3018 | my_name, "write same", | 2973 | my_name, "write same", |
3019 | num * scsi_debug_sector_size, ret); | 2974 | num * sdebug_sector_size, ret); |
3020 | 2975 | ||
3021 | /* Copy first sector to remaining blocks */ | 2976 | /* Copy first sector to remaining blocks */ |
3022 | for (i = 1 ; i < num ; i++) | 2977 | for (i = 1 ; i < num ; i++) |
3023 | memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), | 2978 | memcpy(fake_storep + ((lba + i) * sdebug_sector_size), |
3024 | fake_storep + (lba * scsi_debug_sector_size), | 2979 | fake_storep + lba_off, |
3025 | scsi_debug_sector_size); | 2980 | sdebug_sector_size); |
3026 | 2981 | ||
3027 | if (scsi_debug_lbp()) | 2982 | if (scsi_debug_lbp()) |
3028 | map_region(lba, num); | 2983 | map_region(lba, num); |
@@ -3032,8 +2987,8 @@ out: | |||
3032 | return 0; | 2987 | return 0; |
3033 | } | 2988 | } |
3034 | 2989 | ||
3035 | static int | 2990 | static int resp_write_same_10(struct scsi_cmnd *scp, |
3036 | resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 2991 | struct sdebug_dev_info *devip) |
3037 | { | 2992 | { |
3038 | u8 *cmd = scp->cmnd; | 2993 | u8 *cmd = scp->cmnd; |
3039 | u32 lba; | 2994 | u32 lba; |
@@ -3042,7 +2997,7 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3042 | bool unmap = false; | 2997 | bool unmap = false; |
3043 | 2998 | ||
3044 | if (cmd[1] & 0x8) { | 2999 | if (cmd[1] & 0x8) { |
3045 | if (scsi_debug_lbpws10 == 0) { | 3000 | if (sdebug_lbpws10 == 0) { |
3046 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); | 3001 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); |
3047 | return check_condition_result; | 3002 | return check_condition_result; |
3048 | } else | 3003 | } else |
@@ -3050,15 +3005,15 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3050 | } | 3005 | } |
3051 | lba = get_unaligned_be32(cmd + 2); | 3006 | lba = get_unaligned_be32(cmd + 2); |
3052 | num = get_unaligned_be16(cmd + 7); | 3007 | num = get_unaligned_be16(cmd + 7); |
3053 | if (num > scsi_debug_write_same_length) { | 3008 | if (num > sdebug_write_same_length) { |
3054 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); | 3009 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); |
3055 | return check_condition_result; | 3010 | return check_condition_result; |
3056 | } | 3011 | } |
3057 | return resp_write_same(scp, lba, num, ei_lba, unmap, false); | 3012 | return resp_write_same(scp, lba, num, ei_lba, unmap, false); |
3058 | } | 3013 | } |
3059 | 3014 | ||
3060 | static int | 3015 | static int resp_write_same_16(struct scsi_cmnd *scp, |
3061 | resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3016 | struct sdebug_dev_info *devip) |
3062 | { | 3017 | { |
3063 | u8 *cmd = scp->cmnd; | 3018 | u8 *cmd = scp->cmnd; |
3064 | u64 lba; | 3019 | u64 lba; |
@@ -3068,7 +3023,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3068 | bool ndob = false; | 3023 | bool ndob = false; |
3069 | 3024 | ||
3070 | if (cmd[1] & 0x8) { /* UNMAP */ | 3025 | if (cmd[1] & 0x8) { /* UNMAP */ |
3071 | if (scsi_debug_lbpws == 0) { | 3026 | if (sdebug_lbpws == 0) { |
3072 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); | 3027 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); |
3073 | return check_condition_result; | 3028 | return check_condition_result; |
3074 | } else | 3029 | } else |
@@ -3078,7 +3033,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3078 | ndob = true; | 3033 | ndob = true; |
3079 | lba = get_unaligned_be64(cmd + 2); | 3034 | lba = get_unaligned_be64(cmd + 2); |
3080 | num = get_unaligned_be32(cmd + 10); | 3035 | num = get_unaligned_be32(cmd + 10); |
3081 | if (num > scsi_debug_write_same_length) { | 3036 | if (num > sdebug_write_same_length) { |
3082 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); | 3037 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); |
3083 | return check_condition_result; | 3038 | return check_condition_result; |
3084 | } | 3039 | } |
@@ -3088,8 +3043,8 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3088 | /* Note the mode field is in the same position as the (lower) service action | 3043 | /* Note the mode field is in the same position as the (lower) service action |
3089 | * field. For the Report supported operation codes command, SPC-4 suggests | 3044 | * field. For the Report supported operation codes command, SPC-4 suggests |
3090 | * each mode of this command should be reported separately; for future. */ | 3045 | * each mode of this command should be reported separately; for future. */ |
3091 | static int | 3046 | static int resp_write_buffer(struct scsi_cmnd *scp, |
3092 | resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3047 | struct sdebug_dev_info *devip) |
3093 | { | 3048 | { |
3094 | u8 *cmd = scp->cmnd; | 3049 | u8 *cmd = scp->cmnd; |
3095 | struct scsi_device *sdp = scp->device; | 3050 | struct scsi_device *sdp = scp->device; |
@@ -3134,15 +3089,15 @@ resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3134 | return 0; | 3089 | return 0; |
3135 | } | 3090 | } |
3136 | 3091 | ||
3137 | static int | 3092 | static int resp_comp_write(struct scsi_cmnd *scp, |
3138 | resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3093 | struct sdebug_dev_info *devip) |
3139 | { | 3094 | { |
3140 | u8 *cmd = scp->cmnd; | 3095 | u8 *cmd = scp->cmnd; |
3141 | u8 *arr; | 3096 | u8 *arr; |
3142 | u8 *fake_storep_hold; | 3097 | u8 *fake_storep_hold; |
3143 | u64 lba; | 3098 | u64 lba; |
3144 | u32 dnum; | 3099 | u32 dnum; |
3145 | u32 lb_size = scsi_debug_sector_size; | 3100 | u32 lb_size = sdebug_sector_size; |
3146 | u8 num; | 3101 | u8 num; |
3147 | unsigned long iflags; | 3102 | unsigned long iflags; |
3148 | int ret; | 3103 | int ret; |
@@ -3152,13 +3107,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3152 | num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ | 3107 | num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ |
3153 | if (0 == num) | 3108 | if (0 == num) |
3154 | return 0; /* degenerate case, not an error */ | 3109 | return 0; /* degenerate case, not an error */ |
3155 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 3110 | if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && |
3156 | (cmd[1] & 0xe0)) { | 3111 | (cmd[1] & 0xe0)) { |
3157 | mk_sense_invalid_opcode(scp); | 3112 | mk_sense_invalid_opcode(scp); |
3158 | return check_condition_result; | 3113 | return check_condition_result; |
3159 | } | 3114 | } |
3160 | if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || | 3115 | if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || |
3161 | scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && | 3116 | sdebug_dif == SD_DIF_TYPE3_PROTECTION) && |
3162 | (cmd[1] & 0xe0) == 0) | 3117 | (cmd[1] & 0xe0) == 0) |
3163 | sdev_printk(KERN_ERR, scp->device, "Unprotected WR " | 3118 | sdev_printk(KERN_ERR, scp->device, "Unprotected WR " |
3164 | "to DIF device\n"); | 3119 | "to DIF device\n"); |
@@ -3193,8 +3148,7 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3193 | if (ret == -1) { | 3148 | if (ret == -1) { |
3194 | retval = DID_ERROR << 16; | 3149 | retval = DID_ERROR << 16; |
3195 | goto cleanup; | 3150 | goto cleanup; |
3196 | } else if ((ret < (dnum * lb_size)) && | 3151 | } else if (sdebug_verbose && (ret < (dnum * lb_size))) |
3197 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | ||
3198 | sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " | 3152 | sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " |
3199 | "indicated=%u, IO sent=%d bytes\n", my_name, | 3153 | "indicated=%u, IO sent=%d bytes\n", my_name, |
3200 | dnum * lb_size, ret); | 3154 | dnum * lb_size, ret); |
@@ -3217,8 +3171,7 @@ struct unmap_block_desc { | |||
3217 | __be32 __reserved; | 3171 | __be32 __reserved; |
3218 | }; | 3172 | }; |
3219 | 3173 | ||
3220 | static int | 3174 | static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
3221 | resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | ||
3222 | { | 3175 | { |
3223 | unsigned char *buf; | 3176 | unsigned char *buf; |
3224 | struct unmap_block_desc *desc; | 3177 | struct unmap_block_desc *desc; |
@@ -3233,12 +3186,12 @@ resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3233 | BUG_ON(scsi_bufflen(scp) != payload_len); | 3186 | BUG_ON(scsi_bufflen(scp) != payload_len); |
3234 | 3187 | ||
3235 | descriptors = (payload_len - 8) / 16; | 3188 | descriptors = (payload_len - 8) / 16; |
3236 | if (descriptors > scsi_debug_unmap_max_desc) { | 3189 | if (descriptors > sdebug_unmap_max_desc) { |
3237 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); | 3190 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); |
3238 | return check_condition_result; | 3191 | return check_condition_result; |
3239 | } | 3192 | } |
3240 | 3193 | ||
3241 | buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); | 3194 | buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); |
3242 | if (!buf) { | 3195 | if (!buf) { |
3243 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, | 3196 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
3244 | INSUFF_RES_ASCQ); | 3197 | INSUFF_RES_ASCQ); |
@@ -3276,8 +3229,8 @@ out: | |||
3276 | 3229 | ||
3277 | #define SDEBUG_GET_LBA_STATUS_LEN 32 | 3230 | #define SDEBUG_GET_LBA_STATUS_LEN 32 |
3278 | 3231 | ||
3279 | static int | 3232 | static int resp_get_lba_status(struct scsi_cmnd *scp, |
3280 | resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3233 | struct sdebug_dev_info *devip) |
3281 | { | 3234 | { |
3282 | u8 *cmd = scp->cmnd; | 3235 | u8 *cmd = scp->cmnd; |
3283 | u64 lba; | 3236 | u64 lba; |
@@ -3316,63 +3269,94 @@ resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3316 | return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); | 3269 | return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); |
3317 | } | 3270 | } |
3318 | 3271 | ||
3319 | #define SDEBUG_RLUN_ARR_SZ 256 | 3272 | /* Even though each pseudo target has a REPORT LUNS "well known logical unit" |
3320 | 3273 | * (W-LUN), the normal Linux scanning logic does not associate it with a | |
3321 | static int resp_report_luns(struct scsi_cmnd * scp, | 3274 | * device (e.g. /dev/sg7). The following magic will make that association: |
3322 | struct sdebug_dev_info * devip) | 3275 | * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan" |
3276 | * where <n> is a host number. If there are multiple targets in a host then | ||
3277 | * the above will associate a W-LUN to each target. To only get a W-LUN | ||
3278 | * for target 2, then use "echo '- 2 49409' > scan" . | ||
3279 | */ | ||
3280 | static int resp_report_luns(struct scsi_cmnd *scp, | ||
3281 | struct sdebug_dev_info *devip) | ||
3323 | { | 3282 | { |
3283 | unsigned char *cmd = scp->cmnd; | ||
3324 | unsigned int alloc_len; | 3284 | unsigned int alloc_len; |
3325 | int lun_cnt, i, upper, num, n, want_wlun, shortish; | 3285 | unsigned char select_report; |
3326 | u64 lun; | 3286 | u64 lun; |
3327 | unsigned char *cmd = scp->cmnd; | 3287 | struct scsi_lun *lun_p; |
3328 | int select_report = (int)cmd[2]; | 3288 | u8 *arr; |
3329 | struct scsi_lun *one_lun; | 3289 | unsigned int lun_cnt; /* normal LUN count (max: 256) */ |
3330 | unsigned char arr[SDEBUG_RLUN_ARR_SZ]; | 3290 | unsigned int wlun_cnt; /* report luns W-LUN count */ |
3331 | unsigned char * max_addr; | 3291 | unsigned int tlun_cnt; /* total LUN count */ |
3292 | unsigned int rlen; /* response length (in bytes) */ | ||
3293 | int i, res; | ||
3332 | 3294 | ||
3333 | clear_luns_changed_on_target(devip); | 3295 | clear_luns_changed_on_target(devip); |
3334 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); | 3296 | |
3335 | shortish = (alloc_len < 4); | 3297 | select_report = cmd[2]; |
3336 | if (shortish || (select_report > 2)) { | 3298 | alloc_len = get_unaligned_be32(cmd + 6); |
3337 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1); | 3299 | |
3300 | if (alloc_len < 4) { | ||
3301 | pr_err("alloc len too small %d\n", alloc_len); | ||
3302 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); | ||
3338 | return check_condition_result; | 3303 | return check_condition_result; |
3339 | } | 3304 | } |
3340 | /* can produce response with up to 16k luns (lun 0 to lun 16383) */ | 3305 | |
3341 | memset(arr, 0, SDEBUG_RLUN_ARR_SZ); | 3306 | switch (select_report) { |
3342 | lun_cnt = scsi_debug_max_luns; | 3307 | case 0: /* all LUNs apart from W-LUNs */ |
3343 | if (1 == select_report) | 3308 | lun_cnt = sdebug_max_luns; |
3309 | wlun_cnt = 0; | ||
3310 | break; | ||
3311 | case 1: /* only W-LUNs */ | ||
3344 | lun_cnt = 0; | 3312 | lun_cnt = 0; |
3345 | else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) | 3313 | wlun_cnt = 1; |
3314 | break; | ||
3315 | case 2: /* all LUNs */ | ||
3316 | lun_cnt = sdebug_max_luns; | ||
3317 | wlun_cnt = 1; | ||
3318 | break; | ||
3319 | case 0x10: /* only administrative LUs */ | ||
3320 | case 0x11: /* see SPC-5 */ | ||
3321 | case 0x12: /* only subsiduary LUs owned by referenced LU */ | ||
3322 | default: | ||
3323 | pr_debug("select report invalid %d\n", select_report); | ||
3324 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); | ||
3325 | return check_condition_result; | ||
3326 | } | ||
3327 | |||
3328 | if (sdebug_no_lun_0 && (lun_cnt > 0)) | ||
3346 | --lun_cnt; | 3329 | --lun_cnt; |
3347 | want_wlun = (select_report > 0) ? 1 : 0; | 3330 | |
3348 | num = lun_cnt + want_wlun; | 3331 | tlun_cnt = lun_cnt + wlun_cnt; |
3349 | arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; | 3332 | |
3350 | arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; | 3333 | rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8; |
3351 | n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / | 3334 | arr = vmalloc(rlen); |
3352 | sizeof(struct scsi_lun)), num); | 3335 | if (!arr) { |
3353 | if (n < num) { | 3336 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
3354 | want_wlun = 0; | 3337 | INSUFF_RES_ASCQ); |
3355 | lun_cnt = n; | 3338 | return check_condition_result; |
3356 | } | 3339 | } |
3357 | one_lun = (struct scsi_lun *) &arr[8]; | 3340 | memset(arr, 0, rlen); |
3358 | max_addr = arr + SDEBUG_RLUN_ARR_SZ; | 3341 | pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", |
3359 | for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0); | 3342 | select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); |
3360 | ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr)); | 3343 | |
3361 | i++, lun++) { | 3344 | /* luns start at byte 8 in response following the header */ |
3362 | upper = (lun >> 8) & 0x3f; | 3345 | lun_p = (struct scsi_lun *)&arr[8]; |
3363 | if (upper) | 3346 | |
3364 | one_lun[i].scsi_lun[0] = | 3347 | /* LUNs use single level peripheral device addressing method */ |
3365 | (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); | 3348 | lun = sdebug_no_lun_0 ? 1 : 0; |
3366 | one_lun[i].scsi_lun[1] = lun & 0xff; | 3349 | for (i = 0; i < lun_cnt; i++) |
3367 | } | 3350 | int_to_scsilun(lun++, lun_p++); |
3368 | if (want_wlun) { | 3351 | |
3369 | one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff; | 3352 | if (wlun_cnt) |
3370 | one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff; | 3353 | int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++); |
3371 | i++; | 3354 | |
3372 | } | 3355 | put_unaligned_be32(rlen - 8, &arr[0]); |
3373 | alloc_len = (unsigned char *)(one_lun + i) - arr; | 3356 | |
3374 | return fill_from_dev_buffer(scp, arr, | 3357 | res = fill_from_dev_buffer(scp, arr, rlen); |
3375 | min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); | 3358 | vfree(arr); |
3359 | return res; | ||
3376 | } | 3360 | } |
3377 | 3361 | ||
3378 | static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | 3362 | static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, |
@@ -3385,7 +3369,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | |||
3385 | struct sg_mapping_iter miter; | 3369 | struct sg_mapping_iter miter; |
3386 | 3370 | ||
3387 | /* better not to use temporary buffer. */ | 3371 | /* better not to use temporary buffer. */ |
3388 | buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); | 3372 | buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); |
3389 | if (!buf) { | 3373 | if (!buf) { |
3390 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, | 3374 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
3391 | INSUFF_RES_ASCQ); | 3375 | INSUFF_RES_ASCQ); |
@@ -3411,8 +3395,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | |||
3411 | return 0; | 3395 | return 0; |
3412 | } | 3396 | } |
3413 | 3397 | ||
3414 | static int | 3398 | static int resp_xdwriteread_10(struct scsi_cmnd *scp, |
3415 | resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3399 | struct sdebug_dev_info *devip) |
3416 | { | 3400 | { |
3417 | u8 *cmd = scp->cmnd; | 3401 | u8 *cmd = scp->cmnd; |
3418 | u64 lba; | 3402 | u64 lba; |
@@ -3437,41 +3421,66 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3437 | return resp_xdwriteread(scp, lba, num, devip); | 3421 | return resp_xdwriteread(scp, lba, num, devip); |
3438 | } | 3422 | } |
3439 | 3423 | ||
3440 | /* When timer or tasklet goes off this function is called. */ | 3424 | static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) |
3441 | static void sdebug_q_cmd_complete(unsigned long indx) | ||
3442 | { | 3425 | { |
3443 | int qa_indx; | 3426 | struct sdebug_queue *sqp = sdebug_q_arr; |
3427 | |||
3428 | if (sdebug_mq_active) { | ||
3429 | u32 tag = blk_mq_unique_tag(cmnd->request); | ||
3430 | u16 hwq = blk_mq_unique_tag_to_hwq(tag); | ||
3431 | |||
3432 | if (unlikely(hwq >= submit_queues)) { | ||
3433 | pr_warn("Unexpected hwq=%d, apply modulo\n", hwq); | ||
3434 | hwq %= submit_queues; | ||
3435 | } | ||
3436 | pr_debug("tag=%u, hwq=%d\n", tag, hwq); | ||
3437 | return sqp + hwq; | ||
3438 | } else | ||
3439 | return sqp; | ||
3440 | } | ||
3441 | |||
3442 | /* Queued (deferred) command completions converge here. */ | ||
3443 | static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) | ||
3444 | { | ||
3445 | int qc_idx; | ||
3444 | int retiring = 0; | 3446 | int retiring = 0; |
3445 | unsigned long iflags; | 3447 | unsigned long iflags; |
3448 | struct sdebug_queue *sqp; | ||
3446 | struct sdebug_queued_cmd *sqcp; | 3449 | struct sdebug_queued_cmd *sqcp; |
3447 | struct scsi_cmnd *scp; | 3450 | struct scsi_cmnd *scp; |
3448 | struct sdebug_dev_info *devip; | 3451 | struct sdebug_dev_info *devip; |
3449 | 3452 | ||
3450 | atomic_inc(&sdebug_completions); | 3453 | qc_idx = sd_dp->qc_idx; |
3451 | qa_indx = indx; | 3454 | sqp = sdebug_q_arr + sd_dp->sqa_idx; |
3452 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { | 3455 | if (sdebug_statistics) { |
3453 | pr_err("wild qa_indx=%d\n", qa_indx); | 3456 | atomic_inc(&sdebug_completions); |
3457 | if (raw_smp_processor_id() != sd_dp->issuing_cpu) | ||
3458 | atomic_inc(&sdebug_miss_cpus); | ||
3459 | } | ||
3460 | if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) { | ||
3461 | pr_err("wild qc_idx=%d\n", qc_idx); | ||
3454 | return; | 3462 | return; |
3455 | } | 3463 | } |
3456 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3464 | spin_lock_irqsave(&sqp->qc_lock, iflags); |
3457 | sqcp = &queued_arr[qa_indx]; | 3465 | sqcp = &sqp->qc_arr[qc_idx]; |
3458 | scp = sqcp->a_cmnd; | 3466 | scp = sqcp->a_cmnd; |
3459 | if (NULL == scp) { | 3467 | if (unlikely(scp == NULL)) { |
3460 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3468 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3461 | pr_err("scp is NULL\n"); | 3469 | pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n", |
3470 | sd_dp->sqa_idx, qc_idx); | ||
3462 | return; | 3471 | return; |
3463 | } | 3472 | } |
3464 | devip = (struct sdebug_dev_info *)scp->device->hostdata; | 3473 | devip = (struct sdebug_dev_info *)scp->device->hostdata; |
3465 | if (devip) | 3474 | if (likely(devip)) |
3466 | atomic_dec(&devip->num_in_q); | 3475 | atomic_dec(&devip->num_in_q); |
3467 | else | 3476 | else |
3468 | pr_err("devip=NULL\n"); | 3477 | pr_err("devip=NULL\n"); |
3469 | if (atomic_read(&retired_max_queue) > 0) | 3478 | if (unlikely(atomic_read(&retired_max_queue) > 0)) |
3470 | retiring = 1; | 3479 | retiring = 1; |
3471 | 3480 | ||
3472 | sqcp->a_cmnd = NULL; | 3481 | sqcp->a_cmnd = NULL; |
3473 | if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { | 3482 | if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { |
3474 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3483 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3475 | pr_err("Unexpected completion\n"); | 3484 | pr_err("Unexpected completion\n"); |
3476 | return; | 3485 | return; |
3477 | } | 3486 | } |
@@ -3480,105 +3489,71 @@ static void sdebug_q_cmd_complete(unsigned long indx) | |||
3480 | int k, retval; | 3489 | int k, retval; |
3481 | 3490 | ||
3482 | retval = atomic_read(&retired_max_queue); | 3491 | retval = atomic_read(&retired_max_queue); |
3483 | if (qa_indx >= retval) { | 3492 | if (qc_idx >= retval) { |
3484 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3493 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3485 | pr_err("index %d too large\n", retval); | 3494 | pr_err("index %d too large\n", retval); |
3486 | return; | 3495 | return; |
3487 | } | 3496 | } |
3488 | k = find_last_bit(queued_in_use_bm, retval); | 3497 | k = find_last_bit(sqp->in_use_bm, retval); |
3489 | if ((k < scsi_debug_max_queue) || (k == retval)) | 3498 | if ((k < sdebug_max_queue) || (k == retval)) |
3490 | atomic_set(&retired_max_queue, 0); | 3499 | atomic_set(&retired_max_queue, 0); |
3491 | else | 3500 | else |
3492 | atomic_set(&retired_max_queue, k + 1); | 3501 | atomic_set(&retired_max_queue, k + 1); |
3493 | } | 3502 | } |
3494 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3503 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3495 | scp->scsi_done(scp); /* callback to mid level */ | 3504 | scp->scsi_done(scp); /* callback to mid level */ |
3496 | } | 3505 | } |
3497 | 3506 | ||
3498 | /* When high resolution timer goes off this function is called. */ | 3507 | /* When high resolution timer goes off this function is called. */ |
3499 | static enum hrtimer_restart | 3508 | static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer) |
3500 | sdebug_q_cmd_hrt_complete(struct hrtimer *timer) | ||
3501 | { | 3509 | { |
3502 | int qa_indx; | 3510 | struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, |
3503 | int retiring = 0; | 3511 | hrt); |
3504 | unsigned long iflags; | 3512 | sdebug_q_cmd_complete(sd_dp); |
3505 | struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer; | ||
3506 | struct sdebug_queued_cmd *sqcp; | ||
3507 | struct scsi_cmnd *scp; | ||
3508 | struct sdebug_dev_info *devip; | ||
3509 | |||
3510 | atomic_inc(&sdebug_completions); | ||
3511 | qa_indx = sd_hrtp->qa_indx; | ||
3512 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { | ||
3513 | pr_err("wild qa_indx=%d\n", qa_indx); | ||
3514 | goto the_end; | ||
3515 | } | ||
3516 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3517 | sqcp = &queued_arr[qa_indx]; | ||
3518 | scp = sqcp->a_cmnd; | ||
3519 | if (NULL == scp) { | ||
3520 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3521 | pr_err("scp is NULL\n"); | ||
3522 | goto the_end; | ||
3523 | } | ||
3524 | devip = (struct sdebug_dev_info *)scp->device->hostdata; | ||
3525 | if (devip) | ||
3526 | atomic_dec(&devip->num_in_q); | ||
3527 | else | ||
3528 | pr_err("devip=NULL\n"); | ||
3529 | if (atomic_read(&retired_max_queue) > 0) | ||
3530 | retiring = 1; | ||
3531 | |||
3532 | sqcp->a_cmnd = NULL; | ||
3533 | if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { | ||
3534 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3535 | pr_err("Unexpected completion\n"); | ||
3536 | goto the_end; | ||
3537 | } | ||
3538 | |||
3539 | if (unlikely(retiring)) { /* user has reduced max_queue */ | ||
3540 | int k, retval; | ||
3541 | |||
3542 | retval = atomic_read(&retired_max_queue); | ||
3543 | if (qa_indx >= retval) { | ||
3544 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3545 | pr_err("index %d too large\n", retval); | ||
3546 | goto the_end; | ||
3547 | } | ||
3548 | k = find_last_bit(queued_in_use_bm, retval); | ||
3549 | if ((k < scsi_debug_max_queue) || (k == retval)) | ||
3550 | atomic_set(&retired_max_queue, 0); | ||
3551 | else | ||
3552 | atomic_set(&retired_max_queue, k + 1); | ||
3553 | } | ||
3554 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3555 | scp->scsi_done(scp); /* callback to mid level */ | ||
3556 | the_end: | ||
3557 | return HRTIMER_NORESTART; | 3513 | return HRTIMER_NORESTART; |
3558 | } | 3514 | } |
3559 | 3515 | ||
3560 | static struct sdebug_dev_info * | 3516 | /* When work queue schedules work, it calls this function. */ |
3561 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) | 3517 | static void sdebug_q_cmd_wq_complete(struct work_struct *work) |
3518 | { | ||
3519 | struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, | ||
3520 | ew.work); | ||
3521 | sdebug_q_cmd_complete(sd_dp); | ||
3522 | } | ||
3523 | |||
3524 | static bool got_shared_uuid; | ||
3525 | static uuid_be shared_uuid; | ||
3526 | |||
3527 | static struct sdebug_dev_info *sdebug_device_create( | ||
3528 | struct sdebug_host_info *sdbg_host, gfp_t flags) | ||
3562 | { | 3529 | { |
3563 | struct sdebug_dev_info *devip; | 3530 | struct sdebug_dev_info *devip; |
3564 | 3531 | ||
3565 | devip = kzalloc(sizeof(*devip), flags); | 3532 | devip = kzalloc(sizeof(*devip), flags); |
3566 | if (devip) { | 3533 | if (devip) { |
3534 | if (sdebug_uuid_ctl == 1) | ||
3535 | uuid_be_gen(&devip->lu_name); | ||
3536 | else if (sdebug_uuid_ctl == 2) { | ||
3537 | if (got_shared_uuid) | ||
3538 | devip->lu_name = shared_uuid; | ||
3539 | else { | ||
3540 | uuid_be_gen(&shared_uuid); | ||
3541 | got_shared_uuid = true; | ||
3542 | devip->lu_name = shared_uuid; | ||
3543 | } | ||
3544 | } | ||
3567 | devip->sdbg_host = sdbg_host; | 3545 | devip->sdbg_host = sdbg_host; |
3568 | list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); | 3546 | list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); |
3569 | } | 3547 | } |
3570 | return devip; | 3548 | return devip; |
3571 | } | 3549 | } |
3572 | 3550 | ||
3573 | static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | 3551 | static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) |
3574 | { | 3552 | { |
3575 | struct sdebug_host_info * sdbg_host; | 3553 | struct sdebug_host_info *sdbg_host; |
3576 | struct sdebug_dev_info * open_devip = NULL; | 3554 | struct sdebug_dev_info *open_devip = NULL; |
3577 | struct sdebug_dev_info * devip = | 3555 | struct sdebug_dev_info *devip; |
3578 | (struct sdebug_dev_info *)sdev->hostdata; | ||
3579 | 3556 | ||
3580 | if (devip) | ||
3581 | return devip; | ||
3582 | sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); | 3557 | sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); |
3583 | if (!sdbg_host) { | 3558 | if (!sdbg_host) { |
3584 | pr_err("Host info NULL\n"); | 3559 | pr_err("Host info NULL\n"); |
@@ -3614,7 +3589,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | |||
3614 | 3589 | ||
3615 | static int scsi_debug_slave_alloc(struct scsi_device *sdp) | 3590 | static int scsi_debug_slave_alloc(struct scsi_device *sdp) |
3616 | { | 3591 | { |
3617 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 3592 | if (sdebug_verbose) |
3618 | pr_info("slave_alloc <%u %u %u %llu>\n", | 3593 | pr_info("slave_alloc <%u %u %u %llu>\n", |
3619 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 3594 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
3620 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); | 3595 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); |
@@ -3623,19 +3598,22 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) | |||
3623 | 3598 | ||
3624 | static int scsi_debug_slave_configure(struct scsi_device *sdp) | 3599 | static int scsi_debug_slave_configure(struct scsi_device *sdp) |
3625 | { | 3600 | { |
3626 | struct sdebug_dev_info *devip; | 3601 | struct sdebug_dev_info *devip = |
3602 | (struct sdebug_dev_info *)sdp->hostdata; | ||
3627 | 3603 | ||
3628 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 3604 | if (sdebug_verbose) |
3629 | pr_info("slave_configure <%u %u %u %llu>\n", | 3605 | pr_info("slave_configure <%u %u %u %llu>\n", |
3630 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 3606 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
3631 | if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) | 3607 | if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) |
3632 | sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; | 3608 | sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; |
3633 | devip = devInfoReg(sdp); | 3609 | if (devip == NULL) { |
3634 | if (NULL == devip) | 3610 | devip = find_build_dev_info(sdp); |
3635 | return 1; /* no resources, will be marked offline */ | 3611 | if (devip == NULL) |
3612 | return 1; /* no resources, will be marked offline */ | ||
3613 | } | ||
3636 | sdp->hostdata = devip; | 3614 | sdp->hostdata = devip; |
3637 | blk_queue_max_segment_size(sdp->request_queue, -1U); | 3615 | blk_queue_max_segment_size(sdp->request_queue, -1U); |
3638 | if (scsi_debug_no_uld) | 3616 | if (sdebug_no_uld) |
3639 | sdp->no_uld_attach = 1; | 3617 | sdp->no_uld_attach = 1; |
3640 | return 0; | 3618 | return 0; |
3641 | } | 3619 | } |
@@ -3645,7 +3623,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) | |||
3645 | struct sdebug_dev_info *devip = | 3623 | struct sdebug_dev_info *devip = |
3646 | (struct sdebug_dev_info *)sdp->hostdata; | 3624 | (struct sdebug_dev_info *)sdp->hostdata; |
3647 | 3625 | ||
3648 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 3626 | if (sdebug_verbose) |
3649 | pr_info("slave_destroy <%u %u %u %llu>\n", | 3627 | pr_info("slave_destroy <%u %u %u %llu>\n", |
3650 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 3628 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
3651 | if (devip) { | 3629 | if (devip) { |
@@ -3655,135 +3633,130 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) | |||
3655 | } | 3633 | } |
3656 | } | 3634 | } |
3657 | 3635 | ||
3658 | /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ | 3636 | static void stop_qc_helper(struct sdebug_defer *sd_dp) |
3659 | static int stop_queued_cmnd(struct scsi_cmnd *cmnd) | 3637 | { |
3638 | if (!sd_dp) | ||
3639 | return; | ||
3640 | if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) | ||
3641 | hrtimer_cancel(&sd_dp->hrt); | ||
3642 | else if (sdebug_jdelay < 0) | ||
3643 | cancel_work_sync(&sd_dp->ew.work); | ||
3644 | } | ||
3645 | |||
3646 | /* If @cmnd found deletes its timer or work queue and returns true; else | ||
3647 | returns false */ | ||
3648 | static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) | ||
3660 | { | 3649 | { |
3661 | unsigned long iflags; | 3650 | unsigned long iflags; |
3662 | int k, qmax, r_qmax; | 3651 | int j, k, qmax, r_qmax; |
3652 | struct sdebug_queue *sqp; | ||
3663 | struct sdebug_queued_cmd *sqcp; | 3653 | struct sdebug_queued_cmd *sqcp; |
3664 | struct sdebug_dev_info *devip; | 3654 | struct sdebug_dev_info *devip; |
3665 | 3655 | struct sdebug_defer *sd_dp; | |
3666 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3656 | |
3667 | qmax = scsi_debug_max_queue; | 3657 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { |
3668 | r_qmax = atomic_read(&retired_max_queue); | 3658 | spin_lock_irqsave(&sqp->qc_lock, iflags); |
3669 | if (r_qmax > qmax) | 3659 | qmax = sdebug_max_queue; |
3670 | qmax = r_qmax; | 3660 | r_qmax = atomic_read(&retired_max_queue); |
3671 | for (k = 0; k < qmax; ++k) { | 3661 | if (r_qmax > qmax) |
3672 | if (test_bit(k, queued_in_use_bm)) { | 3662 | qmax = r_qmax; |
3673 | sqcp = &queued_arr[k]; | 3663 | for (k = 0; k < qmax; ++k) { |
3674 | if (cmnd == sqcp->a_cmnd) { | 3664 | if (test_bit(k, sqp->in_use_bm)) { |
3665 | sqcp = &sqp->qc_arr[k]; | ||
3666 | if (cmnd != sqcp->a_cmnd) | ||
3667 | continue; | ||
3668 | /* found */ | ||
3675 | devip = (struct sdebug_dev_info *) | 3669 | devip = (struct sdebug_dev_info *) |
3676 | cmnd->device->hostdata; | 3670 | cmnd->device->hostdata; |
3677 | if (devip) | 3671 | if (devip) |
3678 | atomic_dec(&devip->num_in_q); | 3672 | atomic_dec(&devip->num_in_q); |
3679 | sqcp->a_cmnd = NULL; | 3673 | sqcp->a_cmnd = NULL; |
3680 | spin_unlock_irqrestore(&queued_arr_lock, | 3674 | sd_dp = sqcp->sd_dp; |
3681 | iflags); | 3675 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3682 | if (scsi_debug_ndelay > 0) { | 3676 | stop_qc_helper(sd_dp); |
3683 | if (sqcp->sd_hrtp) | 3677 | clear_bit(k, sqp->in_use_bm); |
3684 | hrtimer_cancel( | 3678 | return true; |
3685 | &sqcp->sd_hrtp->hrt); | ||
3686 | } else if (scsi_debug_delay > 0) { | ||
3687 | if (sqcp->cmnd_timerp) | ||
3688 | del_timer_sync( | ||
3689 | sqcp->cmnd_timerp); | ||
3690 | } else if (scsi_debug_delay < 0) { | ||
3691 | if (sqcp->tletp) | ||
3692 | tasklet_kill(sqcp->tletp); | ||
3693 | } | ||
3694 | clear_bit(k, queued_in_use_bm); | ||
3695 | return 1; | ||
3696 | } | 3679 | } |
3697 | } | 3680 | } |
3681 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); | ||
3698 | } | 3682 | } |
3699 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3683 | return false; |
3700 | return 0; | ||
3701 | } | 3684 | } |
3702 | 3685 | ||
3703 | /* Deletes (stops) timers or tasklets of all queued commands */ | 3686 | /* Deletes (stops) timers or work queues of all queued commands */ |
3704 | static void stop_all_queued(void) | 3687 | static void stop_all_queued(void) |
3705 | { | 3688 | { |
3706 | unsigned long iflags; | 3689 | unsigned long iflags; |
3707 | int k; | 3690 | int j, k; |
3691 | struct sdebug_queue *sqp; | ||
3708 | struct sdebug_queued_cmd *sqcp; | 3692 | struct sdebug_queued_cmd *sqcp; |
3709 | struct sdebug_dev_info *devip; | 3693 | struct sdebug_dev_info *devip; |
3710 | 3694 | struct sdebug_defer *sd_dp; | |
3711 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3695 | |
3712 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | 3696 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { |
3713 | if (test_bit(k, queued_in_use_bm)) { | 3697 | spin_lock_irqsave(&sqp->qc_lock, iflags); |
3714 | sqcp = &queued_arr[k]; | 3698 | for (k = 0; k < SDEBUG_CANQUEUE; ++k) { |
3715 | if (sqcp->a_cmnd) { | 3699 | if (test_bit(k, sqp->in_use_bm)) { |
3700 | sqcp = &sqp->qc_arr[k]; | ||
3701 | if (sqcp->a_cmnd == NULL) | ||
3702 | continue; | ||
3716 | devip = (struct sdebug_dev_info *) | 3703 | devip = (struct sdebug_dev_info *) |
3717 | sqcp->a_cmnd->device->hostdata; | 3704 | sqcp->a_cmnd->device->hostdata; |
3718 | if (devip) | 3705 | if (devip) |
3719 | atomic_dec(&devip->num_in_q); | 3706 | atomic_dec(&devip->num_in_q); |
3720 | sqcp->a_cmnd = NULL; | 3707 | sqcp->a_cmnd = NULL; |
3721 | spin_unlock_irqrestore(&queued_arr_lock, | 3708 | sd_dp = sqcp->sd_dp; |
3722 | iflags); | 3709 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3723 | if (scsi_debug_ndelay > 0) { | 3710 | stop_qc_helper(sd_dp); |
3724 | if (sqcp->sd_hrtp) | 3711 | clear_bit(k, sqp->in_use_bm); |
3725 | hrtimer_cancel( | 3712 | spin_lock_irqsave(&sqp->qc_lock, iflags); |
3726 | &sqcp->sd_hrtp->hrt); | ||
3727 | } else if (scsi_debug_delay > 0) { | ||
3728 | if (sqcp->cmnd_timerp) | ||
3729 | del_timer_sync( | ||
3730 | sqcp->cmnd_timerp); | ||
3731 | } else if (scsi_debug_delay < 0) { | ||
3732 | if (sqcp->tletp) | ||
3733 | tasklet_kill(sqcp->tletp); | ||
3734 | } | ||
3735 | clear_bit(k, queued_in_use_bm); | ||
3736 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3737 | } | 3713 | } |
3738 | } | 3714 | } |
3715 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); | ||
3739 | } | 3716 | } |
3740 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3741 | } | 3717 | } |
3742 | 3718 | ||
3743 | /* Free queued command memory on heap */ | 3719 | /* Free queued command memory on heap */ |
3744 | static void free_all_queued(void) | 3720 | static void free_all_queued(void) |
3745 | { | 3721 | { |
3746 | unsigned long iflags; | 3722 | int j, k; |
3747 | int k; | 3723 | struct sdebug_queue *sqp; |
3748 | struct sdebug_queued_cmd *sqcp; | 3724 | struct sdebug_queued_cmd *sqcp; |
3749 | 3725 | ||
3750 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3726 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { |
3751 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | 3727 | for (k = 0; k < SDEBUG_CANQUEUE; ++k) { |
3752 | sqcp = &queued_arr[k]; | 3728 | sqcp = &sqp->qc_arr[k]; |
3753 | kfree(sqcp->cmnd_timerp); | 3729 | kfree(sqcp->sd_dp); |
3754 | sqcp->cmnd_timerp = NULL; | 3730 | sqcp->sd_dp = NULL; |
3755 | kfree(sqcp->tletp); | 3731 | } |
3756 | sqcp->tletp = NULL; | ||
3757 | kfree(sqcp->sd_hrtp); | ||
3758 | sqcp->sd_hrtp = NULL; | ||
3759 | } | 3732 | } |
3760 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3761 | } | 3733 | } |
3762 | 3734 | ||
3763 | static int scsi_debug_abort(struct scsi_cmnd *SCpnt) | 3735 | static int scsi_debug_abort(struct scsi_cmnd *SCpnt) |
3764 | { | 3736 | { |
3737 | bool ok; | ||
3738 | |||
3765 | ++num_aborts; | 3739 | ++num_aborts; |
3766 | if (SCpnt) { | 3740 | if (SCpnt) { |
3767 | if (SCpnt->device && | 3741 | ok = stop_queued_cmnd(SCpnt); |
3768 | (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) | 3742 | if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) |
3769 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", | 3743 | sdev_printk(KERN_INFO, SCpnt->device, |
3770 | __func__); | 3744 | "%s: command%s found\n", __func__, |
3771 | stop_queued_cmnd(SCpnt); | 3745 | ok ? "" : " not"); |
3772 | } | 3746 | } |
3773 | return SUCCESS; | 3747 | return SUCCESS; |
3774 | } | 3748 | } |
3775 | 3749 | ||
3776 | static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) | 3750 | static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) |
3777 | { | 3751 | { |
3778 | struct sdebug_dev_info * devip; | ||
3779 | |||
3780 | ++num_dev_resets; | 3752 | ++num_dev_resets; |
3781 | if (SCpnt && SCpnt->device) { | 3753 | if (SCpnt && SCpnt->device) { |
3782 | struct scsi_device *sdp = SCpnt->device; | 3754 | struct scsi_device *sdp = SCpnt->device; |
3755 | struct sdebug_dev_info *devip = | ||
3756 | (struct sdebug_dev_info *)sdp->hostdata; | ||
3783 | 3757 | ||
3784 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | 3758 | if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) |
3785 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | 3759 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); |
3786 | devip = devInfoReg(sdp); | ||
3787 | if (devip) | 3760 | if (devip) |
3788 | set_bit(SDEBUG_UA_POR, devip->uas_bm); | 3761 | set_bit(SDEBUG_UA_POR, devip->uas_bm); |
3789 | } | 3762 | } |
@@ -3804,7 +3777,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) | |||
3804 | sdp = SCpnt->device; | 3777 | sdp = SCpnt->device; |
3805 | if (!sdp) | 3778 | if (!sdp) |
3806 | goto lie; | 3779 | goto lie; |
3807 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | 3780 | if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) |
3808 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | 3781 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); |
3809 | hp = sdp->host; | 3782 | hp = sdp->host; |
3810 | if (!hp) | 3783 | if (!hp) |
@@ -3819,7 +3792,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) | |||
3819 | ++k; | 3792 | ++k; |
3820 | } | 3793 | } |
3821 | } | 3794 | } |
3822 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | 3795 | if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) |
3823 | sdev_printk(KERN_INFO, sdp, | 3796 | sdev_printk(KERN_INFO, sdp, |
3824 | "%s: %d device(s) found in target\n", __func__, k); | 3797 | "%s: %d device(s) found in target\n", __func__, k); |
3825 | lie: | 3798 | lie: |
@@ -3838,7 +3811,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) | |||
3838 | if (!(SCpnt && SCpnt->device)) | 3811 | if (!(SCpnt && SCpnt->device)) |
3839 | goto lie; | 3812 | goto lie; |
3840 | sdp = SCpnt->device; | 3813 | sdp = SCpnt->device; |
3841 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | 3814 | if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) |
3842 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | 3815 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); |
3843 | hp = sdp->host; | 3816 | hp = sdp->host; |
3844 | if (hp) { | 3817 | if (hp) { |
@@ -3852,7 +3825,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) | |||
3852 | } | 3825 | } |
3853 | } | 3826 | } |
3854 | } | 3827 | } |
3855 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | 3828 | if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) |
3856 | sdev_printk(KERN_INFO, sdp, | 3829 | sdev_printk(KERN_INFO, sdp, |
3857 | "%s: %d device(s) found in host\n", __func__, k); | 3830 | "%s: %d device(s) found in host\n", __func__, k); |
3858 | lie: | 3831 | lie: |
@@ -3866,7 +3839,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) | |||
3866 | int k = 0; | 3839 | int k = 0; |
3867 | 3840 | ||
3868 | ++num_host_resets; | 3841 | ++num_host_resets; |
3869 | if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) | 3842 | if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) |
3870 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); | 3843 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); |
3871 | spin_lock(&sdebug_host_list_lock); | 3844 | spin_lock(&sdebug_host_list_lock); |
3872 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { | 3845 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { |
@@ -3878,7 +3851,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) | |||
3878 | } | 3851 | } |
3879 | spin_unlock(&sdebug_host_list_lock); | 3852 | spin_unlock(&sdebug_host_list_lock); |
3880 | stop_all_queued(); | 3853 | stop_all_queued(); |
3881 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | 3854 | if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) |
3882 | sdev_printk(KERN_INFO, SCpnt->device, | 3855 | sdev_printk(KERN_INFO, SCpnt->device, |
3883 | "%s: %d device(s) found\n", __func__, k); | 3856 | "%s: %d device(s) found\n", __func__, k); |
3884 | return SUCCESS; | 3857 | return SUCCESS; |
@@ -3893,22 +3866,22 @@ static void __init sdebug_build_parts(unsigned char *ramp, | |||
3893 | int heads_by_sects, start_sec, end_sec; | 3866 | int heads_by_sects, start_sec, end_sec; |
3894 | 3867 | ||
3895 | /* assume partition table already zeroed */ | 3868 | /* assume partition table already zeroed */ |
3896 | if ((scsi_debug_num_parts < 1) || (store_size < 1048576)) | 3869 | if ((sdebug_num_parts < 1) || (store_size < 1048576)) |
3897 | return; | 3870 | return; |
3898 | if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { | 3871 | if (sdebug_num_parts > SDEBUG_MAX_PARTS) { |
3899 | scsi_debug_num_parts = SDEBUG_MAX_PARTS; | 3872 | sdebug_num_parts = SDEBUG_MAX_PARTS; |
3900 | pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); | 3873 | pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); |
3901 | } | 3874 | } |
3902 | num_sectors = (int)sdebug_store_sectors; | 3875 | num_sectors = (int)sdebug_store_sectors; |
3903 | sectors_per_part = (num_sectors - sdebug_sectors_per) | 3876 | sectors_per_part = (num_sectors - sdebug_sectors_per) |
3904 | / scsi_debug_num_parts; | 3877 | / sdebug_num_parts; |
3905 | heads_by_sects = sdebug_heads * sdebug_sectors_per; | 3878 | heads_by_sects = sdebug_heads * sdebug_sectors_per; |
3906 | starts[0] = sdebug_sectors_per; | 3879 | starts[0] = sdebug_sectors_per; |
3907 | for (k = 1; k < scsi_debug_num_parts; ++k) | 3880 | for (k = 1; k < sdebug_num_parts; ++k) |
3908 | starts[k] = ((k * sectors_per_part) / heads_by_sects) | 3881 | starts[k] = ((k * sectors_per_part) / heads_by_sects) |
3909 | * heads_by_sects; | 3882 | * heads_by_sects; |
3910 | starts[scsi_debug_num_parts] = num_sectors; | 3883 | starts[sdebug_num_parts] = num_sectors; |
3911 | starts[scsi_debug_num_parts + 1] = 0; | 3884 | starts[sdebug_num_parts + 1] = 0; |
3912 | 3885 | ||
3913 | ramp[510] = 0x55; /* magic partition markings */ | 3886 | ramp[510] = 0x55; /* magic partition markings */ |
3914 | ramp[511] = 0xAA; | 3887 | ramp[511] = 0xAA; |
@@ -3934,67 +3907,118 @@ static void __init sdebug_build_parts(unsigned char *ramp, | |||
3934 | } | 3907 | } |
3935 | } | 3908 | } |
3936 | 3909 | ||
3937 | static int | 3910 | static void block_unblock_all_queues(bool block) |
3938 | schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | 3911 | { |
3939 | int scsi_result, int delta_jiff) | 3912 | int j; |
3913 | struct sdebug_queue *sqp; | ||
3914 | |||
3915 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) | ||
3916 | atomic_set(&sqp->blocked, (int)block); | ||
3917 | } | ||
3918 | |||
3919 | /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 | ||
3920 | * commands will be processed normally before triggers occur. | ||
3921 | */ | ||
3922 | static void tweak_cmnd_count(void) | ||
3923 | { | ||
3924 | int count, modulo; | ||
3925 | |||
3926 | modulo = abs(sdebug_every_nth); | ||
3927 | if (modulo < 2) | ||
3928 | return; | ||
3929 | block_unblock_all_queues(true); | ||
3930 | count = atomic_read(&sdebug_cmnd_count); | ||
3931 | atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); | ||
3932 | block_unblock_all_queues(false); | ||
3933 | } | ||
3934 | |||
3935 | static void clear_queue_stats(void) | ||
3936 | { | ||
3937 | atomic_set(&sdebug_cmnd_count, 0); | ||
3938 | atomic_set(&sdebug_completions, 0); | ||
3939 | atomic_set(&sdebug_miss_cpus, 0); | ||
3940 | atomic_set(&sdebug_a_tsf, 0); | ||
3941 | } | ||
3942 | |||
3943 | static void setup_inject(struct sdebug_queue *sqp, | ||
3944 | struct sdebug_queued_cmd *sqcp) | ||
3945 | { | ||
3946 | if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) | ||
3947 | return; | ||
3948 | sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); | ||
3949 | sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts); | ||
3950 | sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts); | ||
3951 | sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); | ||
3952 | sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); | ||
3953 | } | ||
3954 | |||
3955 | /* Complete the processing of the thread that queued a SCSI command to this | ||
3956 | * driver. It either completes the command by calling cmnd_done() or | ||
3957 | * schedules a hr timer or work queue then returns 0. Returns | ||
3958 | * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. | ||
3959 | */ | ||
3960 | static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | ||
3961 | int scsi_result, int delta_jiff) | ||
3940 | { | 3962 | { |
3941 | unsigned long iflags; | 3963 | unsigned long iflags; |
3942 | int k, num_in_q, qdepth, inject; | 3964 | int k, num_in_q, qdepth, inject; |
3943 | struct sdebug_queued_cmd *sqcp = NULL; | 3965 | struct sdebug_queue *sqp; |
3966 | struct sdebug_queued_cmd *sqcp; | ||
3944 | struct scsi_device *sdp; | 3967 | struct scsi_device *sdp; |
3968 | struct sdebug_defer *sd_dp; | ||
3945 | 3969 | ||
3946 | /* this should never happen */ | 3970 | if (unlikely(devip == NULL)) { |
3947 | if (WARN_ON(!cmnd)) | 3971 | if (scsi_result == 0) |
3948 | return SCSI_MLQUEUE_HOST_BUSY; | 3972 | scsi_result = DID_NO_CONNECT << 16; |
3949 | 3973 | goto respond_in_thread; | |
3950 | if (NULL == devip) { | ||
3951 | pr_warn("called devip == NULL\n"); | ||
3952 | /* no particularly good error to report back */ | ||
3953 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3954 | } | 3974 | } |
3955 | |||
3956 | sdp = cmnd->device; | 3975 | sdp = cmnd->device; |
3957 | 3976 | ||
3958 | if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 3977 | if (unlikely(sdebug_verbose && scsi_result)) |
3959 | sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", | 3978 | sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", |
3960 | __func__, scsi_result); | 3979 | __func__, scsi_result); |
3961 | if (delta_jiff == 0) | 3980 | if (delta_jiff == 0) |
3962 | goto respond_in_thread; | 3981 | goto respond_in_thread; |
3963 | 3982 | ||
3964 | /* schedule the response at a later time if resources permit */ | 3983 | /* schedule the response at a later time if resources permit */ |
3965 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3984 | sqp = get_queue(cmnd); |
3985 | spin_lock_irqsave(&sqp->qc_lock, iflags); | ||
3986 | if (unlikely(atomic_read(&sqp->blocked))) { | ||
3987 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); | ||
3988 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3989 | } | ||
3966 | num_in_q = atomic_read(&devip->num_in_q); | 3990 | num_in_q = atomic_read(&devip->num_in_q); |
3967 | qdepth = cmnd->device->queue_depth; | 3991 | qdepth = cmnd->device->queue_depth; |
3968 | inject = 0; | 3992 | inject = 0; |
3969 | if ((qdepth > 0) && (num_in_q >= qdepth)) { | 3993 | if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { |
3970 | if (scsi_result) { | 3994 | if (scsi_result) { |
3971 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3995 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3972 | goto respond_in_thread; | 3996 | goto respond_in_thread; |
3973 | } else | 3997 | } else |
3974 | scsi_result = device_qfull_result; | 3998 | scsi_result = device_qfull_result; |
3975 | } else if ((scsi_debug_every_nth != 0) && | 3999 | } else if (unlikely(sdebug_every_nth && |
3976 | (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) && | 4000 | (SDEBUG_OPT_RARE_TSF & sdebug_opts) && |
3977 | (scsi_result == 0)) { | 4001 | (scsi_result == 0))) { |
3978 | if ((num_in_q == (qdepth - 1)) && | 4002 | if ((num_in_q == (qdepth - 1)) && |
3979 | (atomic_inc_return(&sdebug_a_tsf) >= | 4003 | (atomic_inc_return(&sdebug_a_tsf) >= |
3980 | abs(scsi_debug_every_nth))) { | 4004 | abs(sdebug_every_nth))) { |
3981 | atomic_set(&sdebug_a_tsf, 0); | 4005 | atomic_set(&sdebug_a_tsf, 0); |
3982 | inject = 1; | 4006 | inject = 1; |
3983 | scsi_result = device_qfull_result; | 4007 | scsi_result = device_qfull_result; |
3984 | } | 4008 | } |
3985 | } | 4009 | } |
3986 | 4010 | ||
3987 | k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); | 4011 | k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue); |
3988 | if (k >= scsi_debug_max_queue) { | 4012 | if (unlikely(k >= sdebug_max_queue)) { |
3989 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 4013 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3990 | if (scsi_result) | 4014 | if (scsi_result) |
3991 | goto respond_in_thread; | 4015 | goto respond_in_thread; |
3992 | else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) | 4016 | else if (SDEBUG_OPT_ALL_TSF & sdebug_opts) |
3993 | scsi_result = device_qfull_result; | 4017 | scsi_result = device_qfull_result; |
3994 | if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) | 4018 | if (SDEBUG_OPT_Q_NOISE & sdebug_opts) |
3995 | sdev_printk(KERN_INFO, sdp, | 4019 | sdev_printk(KERN_INFO, sdp, |
3996 | "%s: max_queue=%d exceeded, %s\n", | 4020 | "%s: max_queue=%d exceeded, %s\n", |
3997 | __func__, scsi_debug_max_queue, | 4021 | __func__, sdebug_max_queue, |
3998 | (scsi_result ? "status: TASK SET FULL" : | 4022 | (scsi_result ? "status: TASK SET FULL" : |
3999 | "report: host busy")); | 4023 | "report: host busy")); |
4000 | if (scsi_result) | 4024 | if (scsi_result) |
@@ -4002,55 +4026,56 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | |||
4002 | else | 4026 | else |
4003 | return SCSI_MLQUEUE_HOST_BUSY; | 4027 | return SCSI_MLQUEUE_HOST_BUSY; |
4004 | } | 4028 | } |
4005 | __set_bit(k, queued_in_use_bm); | 4029 | __set_bit(k, sqp->in_use_bm); |
4006 | atomic_inc(&devip->num_in_q); | 4030 | atomic_inc(&devip->num_in_q); |
4007 | sqcp = &queued_arr[k]; | 4031 | sqcp = &sqp->qc_arr[k]; |
4008 | sqcp->a_cmnd = cmnd; | 4032 | sqcp->a_cmnd = cmnd; |
4033 | cmnd->host_scribble = (unsigned char *)sqcp; | ||
4009 | cmnd->result = scsi_result; | 4034 | cmnd->result = scsi_result; |
4010 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 4035 | sd_dp = sqcp->sd_dp; |
4011 | if (delta_jiff > 0) { | 4036 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
4012 | if (NULL == sqcp->cmnd_timerp) { | 4037 | if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt)) |
4013 | sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list), | 4038 | setup_inject(sqp, sqcp); |
4014 | GFP_ATOMIC); | 4039 | if (delta_jiff > 0 || sdebug_ndelay > 0) { |
4015 | if (NULL == sqcp->cmnd_timerp) | 4040 | ktime_t kt; |
4016 | return SCSI_MLQUEUE_HOST_BUSY; | 4041 | |
4017 | init_timer(sqcp->cmnd_timerp); | 4042 | if (delta_jiff > 0) { |
4018 | } | 4043 | struct timespec ts; |
4019 | sqcp->cmnd_timerp->function = sdebug_q_cmd_complete; | 4044 | |
4020 | sqcp->cmnd_timerp->data = k; | 4045 | jiffies_to_timespec(delta_jiff, &ts); |
4021 | sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff; | 4046 | kt = ktime_set(ts.tv_sec, ts.tv_nsec); |
4022 | add_timer(sqcp->cmnd_timerp); | 4047 | } else |
4023 | } else if (scsi_debug_ndelay > 0) { | 4048 | kt = ktime_set(0, sdebug_ndelay); |
4024 | ktime_t kt = ktime_set(0, scsi_debug_ndelay); | 4049 | if (NULL == sd_dp) { |
4025 | struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp; | 4050 | sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); |
4026 | 4051 | if (NULL == sd_dp) | |
4027 | if (NULL == sd_hp) { | ||
4028 | sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC); | ||
4029 | if (NULL == sd_hp) | ||
4030 | return SCSI_MLQUEUE_HOST_BUSY; | 4052 | return SCSI_MLQUEUE_HOST_BUSY; |
4031 | sqcp->sd_hrtp = sd_hp; | 4053 | sqcp->sd_dp = sd_dp; |
4032 | hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, | 4054 | hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, |
4033 | HRTIMER_MODE_REL); | 4055 | HRTIMER_MODE_REL_PINNED); |
4034 | sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; | 4056 | sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; |
4035 | sd_hp->qa_indx = k; | 4057 | sd_dp->sqa_idx = sqp - sdebug_q_arr; |
4058 | sd_dp->qc_idx = k; | ||
4036 | } | 4059 | } |
4037 | hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); | 4060 | if (sdebug_statistics) |
4038 | } else { /* delay < 0 */ | 4061 | sd_dp->issuing_cpu = raw_smp_processor_id(); |
4039 | if (NULL == sqcp->tletp) { | 4062 | hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); |
4040 | sqcp->tletp = kmalloc(sizeof(*sqcp->tletp), | 4063 | } else { /* jdelay < 0, use work queue */ |
4041 | GFP_ATOMIC); | 4064 | if (NULL == sd_dp) { |
4042 | if (NULL == sqcp->tletp) | 4065 | sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); |
4066 | if (NULL == sd_dp) | ||
4043 | return SCSI_MLQUEUE_HOST_BUSY; | 4067 | return SCSI_MLQUEUE_HOST_BUSY; |
4044 | tasklet_init(sqcp->tletp, | 4068 | sqcp->sd_dp = sd_dp; |
4045 | sdebug_q_cmd_complete, k); | 4069 | sd_dp->sqa_idx = sqp - sdebug_q_arr; |
4070 | sd_dp->qc_idx = k; | ||
4071 | INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); | ||
4046 | } | 4072 | } |
4047 | if (-1 == delta_jiff) | 4073 | if (sdebug_statistics) |
4048 | tasklet_hi_schedule(sqcp->tletp); | 4074 | sd_dp->issuing_cpu = raw_smp_processor_id(); |
4049 | else | 4075 | schedule_work(&sd_dp->ew.work); |
4050 | tasklet_schedule(sqcp->tletp); | ||
4051 | } | 4076 | } |
4052 | if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) && | 4077 | if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && |
4053 | (scsi_result == device_qfull_result)) | 4078 | (scsi_result == device_qfull_result))) |
4054 | sdev_printk(KERN_INFO, sdp, | 4079 | sdev_printk(KERN_INFO, sdp, |
4055 | "%s: num_in_q=%d +1, %s%s\n", __func__, | 4080 | "%s: num_in_q=%d +1, %s%s\n", __func__, |
4056 | num_in_q, (inject ? "<inject> " : ""), | 4081 | num_in_q, (inject ? "<inject> " : ""), |
@@ -4069,52 +4094,55 @@ respond_in_thread: /* call back to mid-layer using invocation thread */ | |||
4069 | as it can when the corresponding attribute in the | 4094 | as it can when the corresponding attribute in the |
4070 | /sys/bus/pseudo/drivers/scsi_debug directory is changed. | 4095 | /sys/bus/pseudo/drivers/scsi_debug directory is changed. |
4071 | */ | 4096 | */ |
4072 | module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); | 4097 | module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); |
4073 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); | 4098 | module_param_named(ato, sdebug_ato, int, S_IRUGO); |
4074 | module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR); | 4099 | module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); |
4075 | module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); | 4100 | module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); |
4076 | module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); | 4101 | module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); |
4077 | module_param_named(dif, scsi_debug_dif, int, S_IRUGO); | 4102 | module_param_named(dif, sdebug_dif, int, S_IRUGO); |
4078 | module_param_named(dix, scsi_debug_dix, int, S_IRUGO); | 4103 | module_param_named(dix, sdebug_dix, int, S_IRUGO); |
4079 | module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); | 4104 | module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR); |
4080 | module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); | 4105 | module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR); |
4081 | module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); | 4106 | module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); |
4082 | module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); | 4107 | module_param_named(guard, sdebug_guard, uint, S_IRUGO); |
4083 | module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR); | 4108 | module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); |
4084 | module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); | 4109 | module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); |
4085 | module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); | 4110 | module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); |
4086 | module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); | 4111 | module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); |
4087 | module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); | 4112 | module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); |
4088 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | 4113 | module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); |
4089 | module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); | 4114 | module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); |
4090 | module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); | 4115 | module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR); |
4091 | module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR); | 4116 | module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR); |
4092 | module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); | 4117 | module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR); |
4093 | module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); | 4118 | module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO); |
4094 | module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); | 4119 | module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO); |
4095 | module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); | 4120 | module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); |
4096 | module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); | 4121 | module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); |
4097 | module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); | 4122 | module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); |
4098 | module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); | 4123 | module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); |
4099 | module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); | 4124 | module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); |
4100 | module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); | 4125 | module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); |
4101 | module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); | 4126 | module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); |
4102 | module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); | 4127 | module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO); |
4103 | module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR); | 4128 | module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR); |
4104 | module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); | 4129 | module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR); |
4105 | module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); | 4130 | module_param_named(submit_queues, submit_queues, int, S_IRUGO); |
4106 | module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); | 4131 | module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); |
4107 | module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); | 4132 | module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); |
4108 | module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); | 4133 | module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); |
4109 | module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, | 4134 | module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); |
4135 | module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); | ||
4136 | module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); | ||
4137 | module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, | ||
4110 | S_IRUGO | S_IWUSR); | 4138 | S_IRUGO | S_IWUSR); |
4111 | module_param_named(write_same_length, scsi_debug_write_same_length, int, | 4139 | module_param_named(write_same_length, sdebug_write_same_length, int, |
4112 | S_IRUGO | S_IWUSR); | 4140 | S_IRUGO | S_IWUSR); |
4113 | 4141 | ||
4114 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); | 4142 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); |
4115 | MODULE_DESCRIPTION("SCSI debug adapter driver"); | 4143 | MODULE_DESCRIPTION("SCSI debug adapter driver"); |
4116 | MODULE_LICENSE("GPL"); | 4144 | MODULE_LICENSE("GPL"); |
4117 | MODULE_VERSION(SCSI_DEBUG_VERSION); | 4145 | MODULE_VERSION(SDEBUG_VERSION); |
4118 | 4146 | ||
4119 | MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); | 4147 | MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); |
4120 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); | 4148 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); |
@@ -4127,11 +4155,12 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); | |||
4127 | MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); | 4155 | MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); |
4128 | MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); | 4156 | MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); |
4129 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | 4157 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); |
4130 | MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)"); | 4158 | MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); |
4131 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); | 4159 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); |
4132 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); | 4160 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); |
4133 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); | 4161 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); |
4134 | MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); | 4162 | MODULE_PARM_DESC(lbprz, |
4163 | "on read unmapped LBs return 0 when 1 (def), return 0xff when 2"); | ||
4135 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); | 4164 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); |
4136 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); | 4165 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); |
4137 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); | 4166 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); |
@@ -4145,30 +4174,42 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... | |||
4145 | MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); | 4174 | MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); |
4146 | MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); | 4175 | MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); |
4147 | MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); | 4176 | MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); |
4148 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); | 4177 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); |
4149 | MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); | 4178 | MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); |
4179 | MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)"); | ||
4150 | MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); | 4180 | MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); |
4181 | MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)"); | ||
4151 | MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); | 4182 | MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); |
4152 | MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); | 4183 | MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); |
4153 | MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); | 4184 | MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); |
4154 | MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); | 4185 | MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); |
4186 | MODULE_PARM_DESC(uuid_ctl, | ||
4187 | "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); | ||
4155 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); | 4188 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); |
4156 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); | 4189 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); |
4157 | MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); | 4190 | MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); |
4158 | 4191 | ||
4159 | static char sdebug_info[256]; | 4192 | #define SDEBUG_INFO_LEN 256 |
4193 | static char sdebug_info[SDEBUG_INFO_LEN]; | ||
4160 | 4194 | ||
4161 | static const char * scsi_debug_info(struct Scsi_Host * shp) | 4195 | static const char * scsi_debug_info(struct Scsi_Host * shp) |
4162 | { | 4196 | { |
4163 | sprintf(sdebug_info, "scsi_debug, version %s [%s], " | 4197 | int k; |
4164 | "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION, | 4198 | |
4165 | scsi_debug_version_date, scsi_debug_dev_size_mb, | 4199 | k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n", |
4166 | scsi_debug_opts); | 4200 | my_name, SDEBUG_VERSION, sdebug_version_date); |
4201 | if (k >= (SDEBUG_INFO_LEN - 1)) | ||
4202 | return sdebug_info; | ||
4203 | scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, | ||
4204 | " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d", | ||
4205 | sdebug_dev_size_mb, sdebug_opts, submit_queues, | ||
4206 | "statistics", (int)sdebug_statistics); | ||
4167 | return sdebug_info; | 4207 | return sdebug_info; |
4168 | } | 4208 | } |
4169 | 4209 | ||
4170 | /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ | 4210 | /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ |
4171 | static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) | 4211 | static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, |
4212 | int length) | ||
4172 | { | 4213 | { |
4173 | char arr[16]; | 4214 | char arr[16]; |
4174 | int opts; | 4215 | int opts; |
@@ -4180,9 +4221,11 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt | |||
4180 | arr[minLen] = '\0'; | 4221 | arr[minLen] = '\0'; |
4181 | if (1 != sscanf(arr, "%d", &opts)) | 4222 | if (1 != sscanf(arr, "%d", &opts)) |
4182 | return -EINVAL; | 4223 | return -EINVAL; |
4183 | scsi_debug_opts = opts; | 4224 | sdebug_opts = opts; |
4184 | if (scsi_debug_every_nth != 0) | 4225 | sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); |
4185 | atomic_set(&sdebug_cmnd_count, 0); | 4226 | sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); |
4227 | if (sdebug_every_nth != 0) | ||
4228 | tweak_cmnd_count(); | ||
4186 | return length; | 4229 | return length; |
4187 | } | 4230 | } |
4188 | 4231 | ||
@@ -4191,69 +4234,83 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt | |||
4191 | * output are not atomics so might be inaccurate in a busy system. */ | 4234 | * output are not atomics so might be inaccurate in a busy system. */ |
4192 | static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) | 4235 | static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) |
4193 | { | 4236 | { |
4194 | int f, l; | 4237 | int f, j, l; |
4195 | char b[32]; | 4238 | struct sdebug_queue *sqp; |
4196 | 4239 | ||
4197 | if (scsi_debug_every_nth > 0) | 4240 | seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n", |
4198 | snprintf(b, sizeof(b), " (curr:%d)", | 4241 | SDEBUG_VERSION, sdebug_version_date); |
4199 | ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ? | 4242 | seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n", |
4200 | atomic_read(&sdebug_a_tsf) : | 4243 | sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb, |
4201 | atomic_read(&sdebug_cmnd_count))); | 4244 | sdebug_opts, sdebug_every_nth); |
4202 | else | 4245 | seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n", |
4203 | b[0] = '\0'; | 4246 | sdebug_jdelay, sdebug_ndelay, sdebug_max_luns, |
4204 | 4247 | sdebug_sector_size, "bytes"); | |
4205 | seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n" | 4248 | seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n", |
4206 | "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " | 4249 | sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, |
4207 | "every_nth=%d%s\n" | 4250 | num_aborts); |
4208 | "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n" | 4251 | seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", |
4209 | "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" | 4252 | num_dev_resets, num_target_resets, num_bus_resets, |
4210 | "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, " | 4253 | num_host_resets); |
4211 | "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d " | 4254 | seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n", |
4212 | "usec_in_jiffy=%lu\n", | 4255 | dix_reads, dix_writes, dif_errors); |
4213 | SCSI_DEBUG_VERSION, scsi_debug_version_date, | 4256 | seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n", |
4214 | scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts, | 4257 | TICK_NSEC / 1000, "statistics", sdebug_statistics, |
4215 | scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay, | 4258 | sdebug_mq_active); |
4216 | scsi_debug_max_luns, atomic_read(&sdebug_completions), | 4259 | seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n", |
4217 | scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, | 4260 | atomic_read(&sdebug_cmnd_count), |
4218 | sdebug_sectors_per, num_aborts, num_dev_resets, | 4261 | atomic_read(&sdebug_completions), |
4219 | num_target_resets, num_bus_resets, num_host_resets, | 4262 | "miss_cpus", atomic_read(&sdebug_miss_cpus), |
4220 | dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000); | 4263 | atomic_read(&sdebug_a_tsf)); |
4221 | 4264 | ||
4222 | f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue); | 4265 | seq_printf(m, "submit_queues=%d\n", submit_queues); |
4223 | if (f != scsi_debug_max_queue) { | 4266 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { |
4224 | l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue); | 4267 | seq_printf(m, " queue %d:\n", j); |
4225 | seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n", | 4268 | f = find_first_bit(sqp->in_use_bm, sdebug_max_queue); |
4226 | "queued_in_use_bm", f, l); | 4269 | if (f != sdebug_max_queue) { |
4270 | l = find_last_bit(sqp->in_use_bm, sdebug_max_queue); | ||
4271 | seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", | ||
4272 | "first,last bits", f, l); | ||
4273 | } | ||
4227 | } | 4274 | } |
4228 | return 0; | 4275 | return 0; |
4229 | } | 4276 | } |
4230 | 4277 | ||
4231 | static ssize_t delay_show(struct device_driver *ddp, char *buf) | 4278 | static ssize_t delay_show(struct device_driver *ddp, char *buf) |
4232 | { | 4279 | { |
4233 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); | 4280 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay); |
4234 | } | 4281 | } |
4235 | /* Returns -EBUSY if delay is being changed and commands are queued */ | 4282 | /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit |
4283 | * of delay is jiffies. | ||
4284 | */ | ||
4236 | static ssize_t delay_store(struct device_driver *ddp, const char *buf, | 4285 | static ssize_t delay_store(struct device_driver *ddp, const char *buf, |
4237 | size_t count) | 4286 | size_t count) |
4238 | { | 4287 | { |
4239 | int delay, res; | 4288 | int jdelay, res; |
4240 | 4289 | ||
4241 | if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) { | 4290 | if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) { |
4242 | res = count; | 4291 | res = count; |
4243 | if (scsi_debug_delay != delay) { | 4292 | if (sdebug_jdelay != jdelay) { |
4244 | unsigned long iflags; | 4293 | int j, k; |
4245 | int k; | 4294 | struct sdebug_queue *sqp; |
4246 | 4295 | ||
4247 | spin_lock_irqsave(&queued_arr_lock, iflags); | 4296 | block_unblock_all_queues(true); |
4248 | k = find_first_bit(queued_in_use_bm, | 4297 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; |
4249 | scsi_debug_max_queue); | 4298 | ++j, ++sqp) { |
4250 | if (k != scsi_debug_max_queue) | 4299 | k = find_first_bit(sqp->in_use_bm, |
4251 | res = -EBUSY; /* have queued commands */ | 4300 | sdebug_max_queue); |
4252 | else { | 4301 | if (k != sdebug_max_queue) { |
4253 | scsi_debug_delay = delay; | 4302 | res = -EBUSY; /* queued commands */ |
4254 | scsi_debug_ndelay = 0; | 4303 | break; |
4304 | } | ||
4305 | } | ||
4306 | if (res > 0) { | ||
4307 | /* make sure sdebug_defer instances get | ||
4308 | * re-allocated for new delay variant */ | ||
4309 | free_all_queued(); | ||
4310 | sdebug_jdelay = jdelay; | ||
4311 | sdebug_ndelay = 0; | ||
4255 | } | 4312 | } |
4256 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 4313 | block_unblock_all_queues(false); |
4257 | } | 4314 | } |
4258 | return res; | 4315 | return res; |
4259 | } | 4316 | } |
@@ -4263,31 +4320,41 @@ static DRIVER_ATTR_RW(delay); | |||
4263 | 4320 | ||
4264 | static ssize_t ndelay_show(struct device_driver *ddp, char *buf) | 4321 | static ssize_t ndelay_show(struct device_driver *ddp, char *buf) |
4265 | { | 4322 | { |
4266 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay); | 4323 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay); |
4267 | } | 4324 | } |
4268 | /* Returns -EBUSY if ndelay is being changed and commands are queued */ | 4325 | /* Returns -EBUSY if ndelay is being changed and commands are queued */ |
4269 | /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */ | 4326 | /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */ |
4270 | static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, | 4327 | static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, |
4271 | size_t count) | 4328 | size_t count) |
4272 | { | 4329 | { |
4273 | unsigned long iflags; | 4330 | int ndelay, res; |
4274 | int ndelay, res, k; | ||
4275 | 4331 | ||
4276 | if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && | 4332 | if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && |
4277 | (ndelay >= 0) && (ndelay < 1000000000)) { | 4333 | (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) { |
4278 | res = count; | 4334 | res = count; |
4279 | if (scsi_debug_ndelay != ndelay) { | 4335 | if (sdebug_ndelay != ndelay) { |
4280 | spin_lock_irqsave(&queued_arr_lock, iflags); | 4336 | int j, k; |
4281 | k = find_first_bit(queued_in_use_bm, | 4337 | struct sdebug_queue *sqp; |
4282 | scsi_debug_max_queue); | 4338 | |
4283 | if (k != scsi_debug_max_queue) | 4339 | block_unblock_all_queues(true); |
4284 | res = -EBUSY; /* have queued commands */ | 4340 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; |
4285 | else { | 4341 | ++j, ++sqp) { |
4286 | scsi_debug_ndelay = ndelay; | 4342 | k = find_first_bit(sqp->in_use_bm, |
4287 | scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN | 4343 | sdebug_max_queue); |
4288 | : DEF_DELAY; | 4344 | if (k != sdebug_max_queue) { |
4345 | res = -EBUSY; /* queued commands */ | ||
4346 | break; | ||
4347 | } | ||
4348 | } | ||
4349 | if (res > 0) { | ||
4350 | /* make sure sdebug_defer instances get | ||
4351 | * re-allocated for new delay variant */ | ||
4352 | free_all_queued(); | ||
4353 | sdebug_ndelay = ndelay; | ||
4354 | sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN | ||
4355 | : DEF_JDELAY; | ||
4289 | } | 4356 | } |
4290 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 4357 | block_unblock_all_queues(false); |
4291 | } | 4358 | } |
4292 | return res; | 4359 | return res; |
4293 | } | 4360 | } |
@@ -4297,7 +4364,7 @@ static DRIVER_ATTR_RW(ndelay); | |||
4297 | 4364 | ||
4298 | static ssize_t opts_show(struct device_driver *ddp, char *buf) | 4365 | static ssize_t opts_show(struct device_driver *ddp, char *buf) |
4299 | { | 4366 | { |
4300 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); | 4367 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts); |
4301 | } | 4368 | } |
4302 | 4369 | ||
4303 | static ssize_t opts_store(struct device_driver *ddp, const char *buf, | 4370 | static ssize_t opts_store(struct device_driver *ddp, const char *buf, |
@@ -4317,26 +4384,17 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf, | |||
4317 | } | 4384 | } |
4318 | return -EINVAL; | 4385 | return -EINVAL; |
4319 | opts_done: | 4386 | opts_done: |
4320 | scsi_debug_opts = opts; | 4387 | sdebug_opts = opts; |
4321 | if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) | 4388 | sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); |
4322 | sdebug_any_injecting_opt = true; | 4389 | sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); |
4323 | else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) | 4390 | tweak_cmnd_count(); |
4324 | sdebug_any_injecting_opt = true; | ||
4325 | else if (SCSI_DEBUG_OPT_DIF_ERR & opts) | ||
4326 | sdebug_any_injecting_opt = true; | ||
4327 | else if (SCSI_DEBUG_OPT_DIX_ERR & opts) | ||
4328 | sdebug_any_injecting_opt = true; | ||
4329 | else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) | ||
4330 | sdebug_any_injecting_opt = true; | ||
4331 | atomic_set(&sdebug_cmnd_count, 0); | ||
4332 | atomic_set(&sdebug_a_tsf, 0); | ||
4333 | return count; | 4391 | return count; |
4334 | } | 4392 | } |
4335 | static DRIVER_ATTR_RW(opts); | 4393 | static DRIVER_ATTR_RW(opts); |
4336 | 4394 | ||
4337 | static ssize_t ptype_show(struct device_driver *ddp, char *buf) | 4395 | static ssize_t ptype_show(struct device_driver *ddp, char *buf) |
4338 | { | 4396 | { |
4339 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype); | 4397 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype); |
4340 | } | 4398 | } |
4341 | static ssize_t ptype_store(struct device_driver *ddp, const char *buf, | 4399 | static ssize_t ptype_store(struct device_driver *ddp, const char *buf, |
4342 | size_t count) | 4400 | size_t count) |
@@ -4344,7 +4402,7 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf, | |||
4344 | int n; | 4402 | int n; |
4345 | 4403 | ||
4346 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4404 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4347 | scsi_debug_ptype = n; | 4405 | sdebug_ptype = n; |
4348 | return count; | 4406 | return count; |
4349 | } | 4407 | } |
4350 | return -EINVAL; | 4408 | return -EINVAL; |
@@ -4353,7 +4411,7 @@ static DRIVER_ATTR_RW(ptype); | |||
4353 | 4411 | ||
4354 | static ssize_t dsense_show(struct device_driver *ddp, char *buf) | 4412 | static ssize_t dsense_show(struct device_driver *ddp, char *buf) |
4355 | { | 4413 | { |
4356 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense); | 4414 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense); |
4357 | } | 4415 | } |
4358 | static ssize_t dsense_store(struct device_driver *ddp, const char *buf, | 4416 | static ssize_t dsense_store(struct device_driver *ddp, const char *buf, |
4359 | size_t count) | 4417 | size_t count) |
@@ -4361,7 +4419,7 @@ static ssize_t dsense_store(struct device_driver *ddp, const char *buf, | |||
4361 | int n; | 4419 | int n; |
4362 | 4420 | ||
4363 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4421 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4364 | scsi_debug_dsense = n; | 4422 | sdebug_dsense = n; |
4365 | return count; | 4423 | return count; |
4366 | } | 4424 | } |
4367 | return -EINVAL; | 4425 | return -EINVAL; |
@@ -4370,7 +4428,7 @@ static DRIVER_ATTR_RW(dsense); | |||
4370 | 4428 | ||
4371 | static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) | 4429 | static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) |
4372 | { | 4430 | { |
4373 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw); | 4431 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw); |
4374 | } | 4432 | } |
4375 | static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, | 4433 | static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, |
4376 | size_t count) | 4434 | size_t count) |
@@ -4379,11 +4437,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, | |||
4379 | 4437 | ||
4380 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4438 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4381 | n = (n > 0); | 4439 | n = (n > 0); |
4382 | scsi_debug_fake_rw = (scsi_debug_fake_rw > 0); | 4440 | sdebug_fake_rw = (sdebug_fake_rw > 0); |
4383 | if (scsi_debug_fake_rw != n) { | 4441 | if (sdebug_fake_rw != n) { |
4384 | if ((0 == n) && (NULL == fake_storep)) { | 4442 | if ((0 == n) && (NULL == fake_storep)) { |
4385 | unsigned long sz = | 4443 | unsigned long sz = |
4386 | (unsigned long)scsi_debug_dev_size_mb * | 4444 | (unsigned long)sdebug_dev_size_mb * |
4387 | 1048576; | 4445 | 1048576; |
4388 | 4446 | ||
4389 | fake_storep = vmalloc(sz); | 4447 | fake_storep = vmalloc(sz); |
@@ -4393,7 +4451,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, | |||
4393 | } | 4451 | } |
4394 | memset(fake_storep, 0, sz); | 4452 | memset(fake_storep, 0, sz); |
4395 | } | 4453 | } |
4396 | scsi_debug_fake_rw = n; | 4454 | sdebug_fake_rw = n; |
4397 | } | 4455 | } |
4398 | return count; | 4456 | return count; |
4399 | } | 4457 | } |
@@ -4403,7 +4461,7 @@ static DRIVER_ATTR_RW(fake_rw); | |||
4403 | 4461 | ||
4404 | static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) | 4462 | static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) |
4405 | { | 4463 | { |
4406 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); | 4464 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0); |
4407 | } | 4465 | } |
4408 | static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, | 4466 | static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, |
4409 | size_t count) | 4467 | size_t count) |
@@ -4411,7 +4469,7 @@ static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, | |||
4411 | int n; | 4469 | int n; |
4412 | 4470 | ||
4413 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4471 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4414 | scsi_debug_no_lun_0 = n; | 4472 | sdebug_no_lun_0 = n; |
4415 | return count; | 4473 | return count; |
4416 | } | 4474 | } |
4417 | return -EINVAL; | 4475 | return -EINVAL; |
@@ -4420,7 +4478,7 @@ static DRIVER_ATTR_RW(no_lun_0); | |||
4420 | 4478 | ||
4421 | static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) | 4479 | static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) |
4422 | { | 4480 | { |
4423 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); | 4481 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts); |
4424 | } | 4482 | } |
4425 | static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, | 4483 | static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, |
4426 | size_t count) | 4484 | size_t count) |
@@ -4428,7 +4486,7 @@ static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, | |||
4428 | int n; | 4486 | int n; |
4429 | 4487 | ||
4430 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4488 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4431 | scsi_debug_num_tgts = n; | 4489 | sdebug_num_tgts = n; |
4432 | sdebug_max_tgts_luns(); | 4490 | sdebug_max_tgts_luns(); |
4433 | return count; | 4491 | return count; |
4434 | } | 4492 | } |
@@ -4438,19 +4496,19 @@ static DRIVER_ATTR_RW(num_tgts); | |||
4438 | 4496 | ||
4439 | static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) | 4497 | static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) |
4440 | { | 4498 | { |
4441 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb); | 4499 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb); |
4442 | } | 4500 | } |
4443 | static DRIVER_ATTR_RO(dev_size_mb); | 4501 | static DRIVER_ATTR_RO(dev_size_mb); |
4444 | 4502 | ||
4445 | static ssize_t num_parts_show(struct device_driver *ddp, char *buf) | 4503 | static ssize_t num_parts_show(struct device_driver *ddp, char *buf) |
4446 | { | 4504 | { |
4447 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts); | 4505 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts); |
4448 | } | 4506 | } |
4449 | static DRIVER_ATTR_RO(num_parts); | 4507 | static DRIVER_ATTR_RO(num_parts); |
4450 | 4508 | ||
4451 | static ssize_t every_nth_show(struct device_driver *ddp, char *buf) | 4509 | static ssize_t every_nth_show(struct device_driver *ddp, char *buf) |
4452 | { | 4510 | { |
4453 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth); | 4511 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth); |
4454 | } | 4512 | } |
4455 | static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, | 4513 | static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, |
4456 | size_t count) | 4514 | size_t count) |
@@ -4458,8 +4516,12 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, | |||
4458 | int nth; | 4516 | int nth; |
4459 | 4517 | ||
4460 | if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { | 4518 | if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { |
4461 | scsi_debug_every_nth = nth; | 4519 | sdebug_every_nth = nth; |
4462 | atomic_set(&sdebug_cmnd_count, 0); | 4520 | if (nth && !sdebug_statistics) { |
4521 | pr_info("every_nth needs statistics=1, set it\n"); | ||
4522 | sdebug_statistics = true; | ||
4523 | } | ||
4524 | tweak_cmnd_count(); | ||
4463 | return count; | 4525 | return count; |
4464 | } | 4526 | } |
4465 | return -EINVAL; | 4527 | return -EINVAL; |
@@ -4468,7 +4530,7 @@ static DRIVER_ATTR_RW(every_nth); | |||
4468 | 4530 | ||
4469 | static ssize_t max_luns_show(struct device_driver *ddp, char *buf) | 4531 | static ssize_t max_luns_show(struct device_driver *ddp, char *buf) |
4470 | { | 4532 | { |
4471 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns); | 4533 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns); |
4472 | } | 4534 | } |
4473 | static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, | 4535 | static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, |
4474 | size_t count) | 4536 | size_t count) |
@@ -4477,10 +4539,14 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, | |||
4477 | bool changed; | 4539 | bool changed; |
4478 | 4540 | ||
4479 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4541 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4480 | changed = (scsi_debug_max_luns != n); | 4542 | if (n > 256) { |
4481 | scsi_debug_max_luns = n; | 4543 | pr_warn("max_luns can be no more than 256\n"); |
4544 | return -EINVAL; | ||
4545 | } | ||
4546 | changed = (sdebug_max_luns != n); | ||
4547 | sdebug_max_luns = n; | ||
4482 | sdebug_max_tgts_luns(); | 4548 | sdebug_max_tgts_luns(); |
4483 | if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */ | 4549 | if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ |
4484 | struct sdebug_host_info *sdhp; | 4550 | struct sdebug_host_info *sdhp; |
4485 | struct sdebug_dev_info *dp; | 4551 | struct sdebug_dev_info *dp; |
4486 | 4552 | ||
@@ -4503,28 +4569,34 @@ static DRIVER_ATTR_RW(max_luns); | |||
4503 | 4569 | ||
4504 | static ssize_t max_queue_show(struct device_driver *ddp, char *buf) | 4570 | static ssize_t max_queue_show(struct device_driver *ddp, char *buf) |
4505 | { | 4571 | { |
4506 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); | 4572 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue); |
4507 | } | 4573 | } |
4508 | /* N.B. max_queue can be changed while there are queued commands. In flight | 4574 | /* N.B. max_queue can be changed while there are queued commands. In flight |
4509 | * commands beyond the new max_queue will be completed. */ | 4575 | * commands beyond the new max_queue will be completed. */ |
4510 | static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, | 4576 | static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, |
4511 | size_t count) | 4577 | size_t count) |
4512 | { | 4578 | { |
4513 | unsigned long iflags; | 4579 | int j, n, k, a; |
4514 | int n, k; | 4580 | struct sdebug_queue *sqp; |
4515 | 4581 | ||
4516 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && | 4582 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && |
4517 | (n <= SCSI_DEBUG_CANQUEUE)) { | 4583 | (n <= SDEBUG_CANQUEUE)) { |
4518 | spin_lock_irqsave(&queued_arr_lock, iflags); | 4584 | block_unblock_all_queues(true); |
4519 | k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE); | 4585 | k = 0; |
4520 | scsi_debug_max_queue = n; | 4586 | for (j = 0, sqp = sdebug_q_arr; j < submit_queues; |
4521 | if (SCSI_DEBUG_CANQUEUE == k) | 4587 | ++j, ++sqp) { |
4588 | a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE); | ||
4589 | if (a > k) | ||
4590 | k = a; | ||
4591 | } | ||
4592 | sdebug_max_queue = n; | ||
4593 | if (k == SDEBUG_CANQUEUE) | ||
4522 | atomic_set(&retired_max_queue, 0); | 4594 | atomic_set(&retired_max_queue, 0); |
4523 | else if (k >= n) | 4595 | else if (k >= n) |
4524 | atomic_set(&retired_max_queue, k + 1); | 4596 | atomic_set(&retired_max_queue, k + 1); |
4525 | else | 4597 | else |
4526 | atomic_set(&retired_max_queue, 0); | 4598 | atomic_set(&retired_max_queue, 0); |
4527 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 4599 | block_unblock_all_queues(false); |
4528 | return count; | 4600 | return count; |
4529 | } | 4601 | } |
4530 | return -EINVAL; | 4602 | return -EINVAL; |
@@ -4533,19 +4605,19 @@ static DRIVER_ATTR_RW(max_queue); | |||
4533 | 4605 | ||
4534 | static ssize_t no_uld_show(struct device_driver *ddp, char *buf) | 4606 | static ssize_t no_uld_show(struct device_driver *ddp, char *buf) |
4535 | { | 4607 | { |
4536 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld); | 4608 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld); |
4537 | } | 4609 | } |
4538 | static DRIVER_ATTR_RO(no_uld); | 4610 | static DRIVER_ATTR_RO(no_uld); |
4539 | 4611 | ||
4540 | static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) | 4612 | static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) |
4541 | { | 4613 | { |
4542 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); | 4614 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level); |
4543 | } | 4615 | } |
4544 | static DRIVER_ATTR_RO(scsi_level); | 4616 | static DRIVER_ATTR_RO(scsi_level); |
4545 | 4617 | ||
4546 | static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) | 4618 | static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) |
4547 | { | 4619 | { |
4548 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb); | 4620 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb); |
4549 | } | 4621 | } |
4550 | static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, | 4622 | static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, |
4551 | size_t count) | 4623 | size_t count) |
@@ -4554,8 +4626,8 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, | |||
4554 | bool changed; | 4626 | bool changed; |
4555 | 4627 | ||
4556 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4628 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4557 | changed = (scsi_debug_virtual_gb != n); | 4629 | changed = (sdebug_virtual_gb != n); |
4558 | scsi_debug_virtual_gb = n; | 4630 | sdebug_virtual_gb = n; |
4559 | sdebug_capacity = get_sdebug_capacity(); | 4631 | sdebug_capacity = get_sdebug_capacity(); |
4560 | if (changed) { | 4632 | if (changed) { |
4561 | struct sdebug_host_info *sdhp; | 4633 | struct sdebug_host_info *sdhp; |
@@ -4580,9 +4652,12 @@ static DRIVER_ATTR_RW(virtual_gb); | |||
4580 | 4652 | ||
4581 | static ssize_t add_host_show(struct device_driver *ddp, char *buf) | 4653 | static ssize_t add_host_show(struct device_driver *ddp, char *buf) |
4582 | { | 4654 | { |
4583 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); | 4655 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host); |
4584 | } | 4656 | } |
4585 | 4657 | ||
4658 | static int sdebug_add_adapter(void); | ||
4659 | static void sdebug_remove_adapter(void); | ||
4660 | |||
4586 | static ssize_t add_host_store(struct device_driver *ddp, const char *buf, | 4661 | static ssize_t add_host_store(struct device_driver *ddp, const char *buf, |
4587 | size_t count) | 4662 | size_t count) |
4588 | { | 4663 | { |
@@ -4605,7 +4680,7 @@ static DRIVER_ATTR_RW(add_host); | |||
4605 | 4680 | ||
4606 | static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) | 4681 | static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) |
4607 | { | 4682 | { |
4608 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno); | 4683 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno); |
4609 | } | 4684 | } |
4610 | static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, | 4685 | static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, |
4611 | size_t count) | 4686 | size_t count) |
@@ -4613,40 +4688,68 @@ static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, | |||
4613 | int n; | 4688 | int n; |
4614 | 4689 | ||
4615 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4690 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4616 | scsi_debug_vpd_use_hostno = n; | 4691 | sdebug_vpd_use_hostno = n; |
4617 | return count; | 4692 | return count; |
4618 | } | 4693 | } |
4619 | return -EINVAL; | 4694 | return -EINVAL; |
4620 | } | 4695 | } |
4621 | static DRIVER_ATTR_RW(vpd_use_hostno); | 4696 | static DRIVER_ATTR_RW(vpd_use_hostno); |
4622 | 4697 | ||
4698 | static ssize_t statistics_show(struct device_driver *ddp, char *buf) | ||
4699 | { | ||
4700 | return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics); | ||
4701 | } | ||
4702 | static ssize_t statistics_store(struct device_driver *ddp, const char *buf, | ||
4703 | size_t count) | ||
4704 | { | ||
4705 | int n; | ||
4706 | |||
4707 | if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { | ||
4708 | if (n > 0) | ||
4709 | sdebug_statistics = true; | ||
4710 | else { | ||
4711 | clear_queue_stats(); | ||
4712 | sdebug_statistics = false; | ||
4713 | } | ||
4714 | return count; | ||
4715 | } | ||
4716 | return -EINVAL; | ||
4717 | } | ||
4718 | static DRIVER_ATTR_RW(statistics); | ||
4719 | |||
4623 | static ssize_t sector_size_show(struct device_driver *ddp, char *buf) | 4720 | static ssize_t sector_size_show(struct device_driver *ddp, char *buf) |
4624 | { | 4721 | { |
4625 | return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); | 4722 | return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size); |
4626 | } | 4723 | } |
4627 | static DRIVER_ATTR_RO(sector_size); | 4724 | static DRIVER_ATTR_RO(sector_size); |
4628 | 4725 | ||
4726 | static ssize_t submit_queues_show(struct device_driver *ddp, char *buf) | ||
4727 | { | ||
4728 | return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues); | ||
4729 | } | ||
4730 | static DRIVER_ATTR_RO(submit_queues); | ||
4731 | |||
4629 | static ssize_t dix_show(struct device_driver *ddp, char *buf) | 4732 | static ssize_t dix_show(struct device_driver *ddp, char *buf) |
4630 | { | 4733 | { |
4631 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix); | 4734 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix); |
4632 | } | 4735 | } |
4633 | static DRIVER_ATTR_RO(dix); | 4736 | static DRIVER_ATTR_RO(dix); |
4634 | 4737 | ||
4635 | static ssize_t dif_show(struct device_driver *ddp, char *buf) | 4738 | static ssize_t dif_show(struct device_driver *ddp, char *buf) |
4636 | { | 4739 | { |
4637 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif); | 4740 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif); |
4638 | } | 4741 | } |
4639 | static DRIVER_ATTR_RO(dif); | 4742 | static DRIVER_ATTR_RO(dif); |
4640 | 4743 | ||
4641 | static ssize_t guard_show(struct device_driver *ddp, char *buf) | 4744 | static ssize_t guard_show(struct device_driver *ddp, char *buf) |
4642 | { | 4745 | { |
4643 | return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); | 4746 | return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard); |
4644 | } | 4747 | } |
4645 | static DRIVER_ATTR_RO(guard); | 4748 | static DRIVER_ATTR_RO(guard); |
4646 | 4749 | ||
4647 | static ssize_t ato_show(struct device_driver *ddp, char *buf) | 4750 | static ssize_t ato_show(struct device_driver *ddp, char *buf) |
4648 | { | 4751 | { |
4649 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato); | 4752 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato); |
4650 | } | 4753 | } |
4651 | static DRIVER_ATTR_RO(ato); | 4754 | static DRIVER_ATTR_RO(ato); |
4652 | 4755 | ||
@@ -4669,7 +4772,7 @@ static DRIVER_ATTR_RO(map); | |||
4669 | 4772 | ||
4670 | static ssize_t removable_show(struct device_driver *ddp, char *buf) | 4773 | static ssize_t removable_show(struct device_driver *ddp, char *buf) |
4671 | { | 4774 | { |
4672 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0); | 4775 | return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0); |
4673 | } | 4776 | } |
4674 | static ssize_t removable_store(struct device_driver *ddp, const char *buf, | 4777 | static ssize_t removable_store(struct device_driver *ddp, const char *buf, |
4675 | size_t count) | 4778 | size_t count) |
@@ -4677,7 +4780,7 @@ static ssize_t removable_store(struct device_driver *ddp, const char *buf, | |||
4677 | int n; | 4780 | int n; |
4678 | 4781 | ||
4679 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4782 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4680 | scsi_debug_removable = (n > 0); | 4783 | sdebug_removable = (n > 0); |
4681 | return count; | 4784 | return count; |
4682 | } | 4785 | } |
4683 | return -EINVAL; | 4786 | return -EINVAL; |
@@ -4686,32 +4789,17 @@ static DRIVER_ATTR_RW(removable); | |||
4686 | 4789 | ||
4687 | static ssize_t host_lock_show(struct device_driver *ddp, char *buf) | 4790 | static ssize_t host_lock_show(struct device_driver *ddp, char *buf) |
4688 | { | 4791 | { |
4689 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock); | 4792 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock); |
4690 | } | 4793 | } |
4691 | /* Returns -EBUSY if host_lock is being changed and commands are queued */ | 4794 | /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */ |
4692 | static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, | 4795 | static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, |
4693 | size_t count) | 4796 | size_t count) |
4694 | { | 4797 | { |
4695 | int n, res; | 4798 | int n; |
4696 | 4799 | ||
4697 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4800 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4698 | bool new_host_lock = (n > 0); | 4801 | sdebug_host_lock = (n > 0); |
4699 | 4802 | return count; | |
4700 | res = count; | ||
4701 | if (new_host_lock != scsi_debug_host_lock) { | ||
4702 | unsigned long iflags; | ||
4703 | int k; | ||
4704 | |||
4705 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
4706 | k = find_first_bit(queued_in_use_bm, | ||
4707 | scsi_debug_max_queue); | ||
4708 | if (k != scsi_debug_max_queue) | ||
4709 | res = -EBUSY; /* have queued commands */ | ||
4710 | else | ||
4711 | scsi_debug_host_lock = new_host_lock; | ||
4712 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
4713 | } | ||
4714 | return res; | ||
4715 | } | 4803 | } |
4716 | return -EINVAL; | 4804 | return -EINVAL; |
4717 | } | 4805 | } |
@@ -4719,7 +4807,7 @@ static DRIVER_ATTR_RW(host_lock); | |||
4719 | 4807 | ||
4720 | static ssize_t strict_show(struct device_driver *ddp, char *buf) | 4808 | static ssize_t strict_show(struct device_driver *ddp, char *buf) |
4721 | { | 4809 | { |
4722 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict); | 4810 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict); |
4723 | } | 4811 | } |
4724 | static ssize_t strict_store(struct device_driver *ddp, const char *buf, | 4812 | static ssize_t strict_store(struct device_driver *ddp, const char *buf, |
4725 | size_t count) | 4813 | size_t count) |
@@ -4727,13 +4815,19 @@ static ssize_t strict_store(struct device_driver *ddp, const char *buf, | |||
4727 | int n; | 4815 | int n; |
4728 | 4816 | ||
4729 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4817 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4730 | scsi_debug_strict = (n > 0); | 4818 | sdebug_strict = (n > 0); |
4731 | return count; | 4819 | return count; |
4732 | } | 4820 | } |
4733 | return -EINVAL; | 4821 | return -EINVAL; |
4734 | } | 4822 | } |
4735 | static DRIVER_ATTR_RW(strict); | 4823 | static DRIVER_ATTR_RW(strict); |
4736 | 4824 | ||
4825 | static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf) | ||
4826 | { | ||
4827 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl); | ||
4828 | } | ||
4829 | static DRIVER_ATTR_RO(uuid_ctl); | ||
4830 | |||
4737 | 4831 | ||
4738 | /* Note: The following array creates attribute files in the | 4832 | /* Note: The following array creates attribute files in the |
4739 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these | 4833 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these |
@@ -4761,6 +4855,8 @@ static struct attribute *sdebug_drv_attrs[] = { | |||
4761 | &driver_attr_add_host.attr, | 4855 | &driver_attr_add_host.attr, |
4762 | &driver_attr_vpd_use_hostno.attr, | 4856 | &driver_attr_vpd_use_hostno.attr, |
4763 | &driver_attr_sector_size.attr, | 4857 | &driver_attr_sector_size.attr, |
4858 | &driver_attr_statistics.attr, | ||
4859 | &driver_attr_submit_queues.attr, | ||
4764 | &driver_attr_dix.attr, | 4860 | &driver_attr_dix.attr, |
4765 | &driver_attr_dif.attr, | 4861 | &driver_attr_dif.attr, |
4766 | &driver_attr_guard.attr, | 4862 | &driver_attr_guard.attr, |
@@ -4770,6 +4866,7 @@ static struct attribute *sdebug_drv_attrs[] = { | |||
4770 | &driver_attr_host_lock.attr, | 4866 | &driver_attr_host_lock.attr, |
4771 | &driver_attr_ndelay.attr, | 4867 | &driver_attr_ndelay.attr, |
4772 | &driver_attr_strict.attr, | 4868 | &driver_attr_strict.attr, |
4869 | &driver_attr_uuid_ctl.attr, | ||
4773 | NULL, | 4870 | NULL, |
4774 | }; | 4871 | }; |
4775 | ATTRIBUTE_GROUPS(sdebug_drv); | 4872 | ATTRIBUTE_GROUPS(sdebug_drv); |
@@ -4783,33 +4880,33 @@ static int __init scsi_debug_init(void) | |||
4783 | int k; | 4880 | int k; |
4784 | int ret; | 4881 | int ret; |
4785 | 4882 | ||
4786 | atomic_set(&sdebug_cmnd_count, 0); | ||
4787 | atomic_set(&sdebug_completions, 0); | ||
4788 | atomic_set(&retired_max_queue, 0); | 4883 | atomic_set(&retired_max_queue, 0); |
4789 | 4884 | ||
4790 | if (scsi_debug_ndelay >= 1000000000) { | 4885 | if (sdebug_ndelay >= 1000 * 1000 * 1000) { |
4791 | pr_warn("ndelay must be less than 1 second, ignored\n"); | 4886 | pr_warn("ndelay must be less than 1 second, ignored\n"); |
4792 | scsi_debug_ndelay = 0; | 4887 | sdebug_ndelay = 0; |
4793 | } else if (scsi_debug_ndelay > 0) | 4888 | } else if (sdebug_ndelay > 0) |
4794 | scsi_debug_delay = DELAY_OVERRIDDEN; | 4889 | sdebug_jdelay = JDELAY_OVERRIDDEN; |
4795 | 4890 | ||
4796 | switch (scsi_debug_sector_size) { | 4891 | switch (sdebug_sector_size) { |
4797 | case 512: | 4892 | case 512: |
4798 | case 1024: | 4893 | case 1024: |
4799 | case 2048: | 4894 | case 2048: |
4800 | case 4096: | 4895 | case 4096: |
4801 | break; | 4896 | break; |
4802 | default: | 4897 | default: |
4803 | pr_err("invalid sector_size %d\n", scsi_debug_sector_size); | 4898 | pr_err("invalid sector_size %d\n", sdebug_sector_size); |
4804 | return -EINVAL; | 4899 | return -EINVAL; |
4805 | } | 4900 | } |
4806 | 4901 | ||
4807 | switch (scsi_debug_dif) { | 4902 | switch (sdebug_dif) { |
4808 | 4903 | ||
4809 | case SD_DIF_TYPE0_PROTECTION: | 4904 | case SD_DIF_TYPE0_PROTECTION: |
4905 | break; | ||
4810 | case SD_DIF_TYPE1_PROTECTION: | 4906 | case SD_DIF_TYPE1_PROTECTION: |
4811 | case SD_DIF_TYPE2_PROTECTION: | 4907 | case SD_DIF_TYPE2_PROTECTION: |
4812 | case SD_DIF_TYPE3_PROTECTION: | 4908 | case SD_DIF_TYPE3_PROTECTION: |
4909 | have_dif_prot = true; | ||
4813 | break; | 4910 | break; |
4814 | 4911 | ||
4815 | default: | 4912 | default: |
@@ -4817,39 +4914,53 @@ static int __init scsi_debug_init(void) | |||
4817 | return -EINVAL; | 4914 | return -EINVAL; |
4818 | } | 4915 | } |
4819 | 4916 | ||
4820 | if (scsi_debug_guard > 1) { | 4917 | if (sdebug_guard > 1) { |
4821 | pr_err("guard must be 0 or 1\n"); | 4918 | pr_err("guard must be 0 or 1\n"); |
4822 | return -EINVAL; | 4919 | return -EINVAL; |
4823 | } | 4920 | } |
4824 | 4921 | ||
4825 | if (scsi_debug_ato > 1) { | 4922 | if (sdebug_ato > 1) { |
4826 | pr_err("ato must be 0 or 1\n"); | 4923 | pr_err("ato must be 0 or 1\n"); |
4827 | return -EINVAL; | 4924 | return -EINVAL; |
4828 | } | 4925 | } |
4829 | 4926 | ||
4830 | if (scsi_debug_physblk_exp > 15) { | 4927 | if (sdebug_physblk_exp > 15) { |
4831 | pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp); | 4928 | pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp); |
4832 | return -EINVAL; | 4929 | return -EINVAL; |
4833 | } | 4930 | } |
4931 | if (sdebug_max_luns > 256) { | ||
4932 | pr_warn("max_luns can be no more than 256, use default\n"); | ||
4933 | sdebug_max_luns = DEF_MAX_LUNS; | ||
4934 | } | ||
4834 | 4935 | ||
4835 | if (scsi_debug_lowest_aligned > 0x3fff) { | 4936 | if (sdebug_lowest_aligned > 0x3fff) { |
4836 | pr_err("lowest_aligned too big: %u\n", | 4937 | pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned); |
4837 | scsi_debug_lowest_aligned); | ||
4838 | return -EINVAL; | 4938 | return -EINVAL; |
4839 | } | 4939 | } |
4840 | 4940 | ||
4841 | if (scsi_debug_dev_size_mb < 1) | 4941 | if (submit_queues < 1) { |
4842 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | 4942 | pr_err("submit_queues must be 1 or more\n"); |
4843 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; | 4943 | return -EINVAL; |
4844 | sdebug_store_sectors = sz / scsi_debug_sector_size; | 4944 | } |
4945 | sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), | ||
4946 | GFP_KERNEL); | ||
4947 | if (sdebug_q_arr == NULL) | ||
4948 | return -ENOMEM; | ||
4949 | for (k = 0; k < submit_queues; ++k) | ||
4950 | spin_lock_init(&sdebug_q_arr[k].qc_lock); | ||
4951 | |||
4952 | if (sdebug_dev_size_mb < 1) | ||
4953 | sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | ||
4954 | sz = (unsigned long)sdebug_dev_size_mb * 1048576; | ||
4955 | sdebug_store_sectors = sz / sdebug_sector_size; | ||
4845 | sdebug_capacity = get_sdebug_capacity(); | 4956 | sdebug_capacity = get_sdebug_capacity(); |
4846 | 4957 | ||
4847 | /* play around with geometry, don't waste too much on track 0 */ | 4958 | /* play around with geometry, don't waste too much on track 0 */ |
4848 | sdebug_heads = 8; | 4959 | sdebug_heads = 8; |
4849 | sdebug_sectors_per = 32; | 4960 | sdebug_sectors_per = 32; |
4850 | if (scsi_debug_dev_size_mb >= 256) | 4961 | if (sdebug_dev_size_mb >= 256) |
4851 | sdebug_heads = 64; | 4962 | sdebug_heads = 64; |
4852 | else if (scsi_debug_dev_size_mb >= 16) | 4963 | else if (sdebug_dev_size_mb >= 16) |
4853 | sdebug_heads = 32; | 4964 | sdebug_heads = 32; |
4854 | sdebug_cylinders_per = (unsigned long)sdebug_capacity / | 4965 | sdebug_cylinders_per = (unsigned long)sdebug_capacity / |
4855 | (sdebug_sectors_per * sdebug_heads); | 4966 | (sdebug_sectors_per * sdebug_heads); |
@@ -4861,18 +4972,19 @@ static int __init scsi_debug_init(void) | |||
4861 | (sdebug_sectors_per * sdebug_heads); | 4972 | (sdebug_sectors_per * sdebug_heads); |
4862 | } | 4973 | } |
4863 | 4974 | ||
4864 | if (0 == scsi_debug_fake_rw) { | 4975 | if (sdebug_fake_rw == 0) { |
4865 | fake_storep = vmalloc(sz); | 4976 | fake_storep = vmalloc(sz); |
4866 | if (NULL == fake_storep) { | 4977 | if (NULL == fake_storep) { |
4867 | pr_err("out of memory, 1\n"); | 4978 | pr_err("out of memory, 1\n"); |
4868 | return -ENOMEM; | 4979 | ret = -ENOMEM; |
4980 | goto free_q_arr; | ||
4869 | } | 4981 | } |
4870 | memset(fake_storep, 0, sz); | 4982 | memset(fake_storep, 0, sz); |
4871 | if (scsi_debug_num_parts > 0) | 4983 | if (sdebug_num_parts > 0) |
4872 | sdebug_build_parts(fake_storep, sz); | 4984 | sdebug_build_parts(fake_storep, sz); |
4873 | } | 4985 | } |
4874 | 4986 | ||
4875 | if (scsi_debug_dix) { | 4987 | if (sdebug_dix) { |
4876 | int dif_size; | 4988 | int dif_size; |
4877 | 4989 | ||
4878 | dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); | 4990 | dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); |
@@ -4891,20 +5003,21 @@ static int __init scsi_debug_init(void) | |||
4891 | 5003 | ||
4892 | /* Logical Block Provisioning */ | 5004 | /* Logical Block Provisioning */ |
4893 | if (scsi_debug_lbp()) { | 5005 | if (scsi_debug_lbp()) { |
4894 | scsi_debug_unmap_max_blocks = | 5006 | sdebug_unmap_max_blocks = |
4895 | clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); | 5007 | clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU); |
4896 | 5008 | ||
4897 | scsi_debug_unmap_max_desc = | 5009 | sdebug_unmap_max_desc = |
4898 | clamp(scsi_debug_unmap_max_desc, 0U, 256U); | 5010 | clamp(sdebug_unmap_max_desc, 0U, 256U); |
4899 | 5011 | ||
4900 | scsi_debug_unmap_granularity = | 5012 | sdebug_unmap_granularity = |
4901 | clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); | 5013 | clamp(sdebug_unmap_granularity, 1U, 0xffffffffU); |
4902 | 5014 | ||
4903 | if (scsi_debug_unmap_alignment && | 5015 | if (sdebug_unmap_alignment && |
4904 | scsi_debug_unmap_granularity <= | 5016 | sdebug_unmap_granularity <= |
4905 | scsi_debug_unmap_alignment) { | 5017 | sdebug_unmap_alignment) { |
4906 | pr_err("ERR: unmap_granularity <= unmap_alignment\n"); | 5018 | pr_err("ERR: unmap_granularity <= unmap_alignment\n"); |
4907 | return -EINVAL; | 5019 | ret = -EINVAL; |
5020 | goto free_vm; | ||
4908 | } | 5021 | } |
4909 | 5022 | ||
4910 | map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; | 5023 | map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; |
@@ -4921,7 +5034,7 @@ static int __init scsi_debug_init(void) | |||
4921 | bitmap_zero(map_storep, map_size); | 5034 | bitmap_zero(map_storep, map_size); |
4922 | 5035 | ||
4923 | /* Map first 1KB for partition table */ | 5036 | /* Map first 1KB for partition table */ |
4924 | if (scsi_debug_num_parts) | 5037 | if (sdebug_num_parts) |
4925 | map_region(0, 2); | 5038 | map_region(0, 2); |
4926 | } | 5039 | } |
4927 | 5040 | ||
@@ -4942,8 +5055,8 @@ static int __init scsi_debug_init(void) | |||
4942 | goto bus_unreg; | 5055 | goto bus_unreg; |
4943 | } | 5056 | } |
4944 | 5057 | ||
4945 | host_to_add = scsi_debug_add_host; | 5058 | host_to_add = sdebug_add_host; |
4946 | scsi_debug_add_host = 0; | 5059 | sdebug_add_host = 0; |
4947 | 5060 | ||
4948 | for (k = 0; k < host_to_add; k++) { | 5061 | for (k = 0; k < host_to_add; k++) { |
4949 | if (sdebug_add_adapter()) { | 5062 | if (sdebug_add_adapter()) { |
@@ -4952,8 +5065,8 @@ static int __init scsi_debug_init(void) | |||
4952 | } | 5065 | } |
4953 | } | 5066 | } |
4954 | 5067 | ||
4955 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 5068 | if (sdebug_verbose) |
4956 | pr_info("built %d host(s)\n", scsi_debug_add_host); | 5069 | pr_info("built %d host(s)\n", sdebug_add_host); |
4957 | 5070 | ||
4958 | return 0; | 5071 | return 0; |
4959 | 5072 | ||
@@ -4965,13 +5078,14 @@ free_vm: | |||
4965 | vfree(map_storep); | 5078 | vfree(map_storep); |
4966 | vfree(dif_storep); | 5079 | vfree(dif_storep); |
4967 | vfree(fake_storep); | 5080 | vfree(fake_storep); |
4968 | 5081 | free_q_arr: | |
5082 | kfree(sdebug_q_arr); | ||
4969 | return ret; | 5083 | return ret; |
4970 | } | 5084 | } |
4971 | 5085 | ||
4972 | static void __exit scsi_debug_exit(void) | 5086 | static void __exit scsi_debug_exit(void) |
4973 | { | 5087 | { |
4974 | int k = scsi_debug_add_host; | 5088 | int k = sdebug_add_host; |
4975 | 5089 | ||
4976 | stop_all_queued(); | 5090 | stop_all_queued(); |
4977 | free_all_queued(); | 5091 | free_all_queued(); |
@@ -4983,6 +5097,7 @@ static void __exit scsi_debug_exit(void) | |||
4983 | 5097 | ||
4984 | vfree(dif_storep); | 5098 | vfree(dif_storep); |
4985 | vfree(fake_storep); | 5099 | vfree(fake_storep); |
5100 | kfree(sdebug_q_arr); | ||
4986 | } | 5101 | } |
4987 | 5102 | ||
4988 | device_initcall(scsi_debug_init); | 5103 | device_initcall(scsi_debug_init); |
@@ -5011,7 +5126,7 @@ static int sdebug_add_adapter(void) | |||
5011 | 5126 | ||
5012 | INIT_LIST_HEAD(&sdbg_host->dev_info_list); | 5127 | INIT_LIST_HEAD(&sdbg_host->dev_info_list); |
5013 | 5128 | ||
5014 | devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; | 5129 | devs_per_host = sdebug_num_tgts * sdebug_max_luns; |
5015 | for (k = 0; k < devs_per_host; k++) { | 5130 | for (k = 0; k < devs_per_host; k++) { |
5016 | sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); | 5131 | sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); |
5017 | if (!sdbg_devinfo) { | 5132 | if (!sdbg_devinfo) { |
@@ -5028,14 +5143,14 @@ static int sdebug_add_adapter(void) | |||
5028 | sdbg_host->dev.bus = &pseudo_lld_bus; | 5143 | sdbg_host->dev.bus = &pseudo_lld_bus; |
5029 | sdbg_host->dev.parent = pseudo_primary; | 5144 | sdbg_host->dev.parent = pseudo_primary; |
5030 | sdbg_host->dev.release = &sdebug_release_adapter; | 5145 | sdbg_host->dev.release = &sdebug_release_adapter; |
5031 | dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); | 5146 | dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host); |
5032 | 5147 | ||
5033 | error = device_register(&sdbg_host->dev); | 5148 | error = device_register(&sdbg_host->dev); |
5034 | 5149 | ||
5035 | if (error) | 5150 | if (error) |
5036 | goto clean; | 5151 | goto clean; |
5037 | 5152 | ||
5038 | ++scsi_debug_add_host; | 5153 | ++sdebug_add_host; |
5039 | return error; | 5154 | return error; |
5040 | 5155 | ||
5041 | clean: | 5156 | clean: |
@@ -5064,78 +5179,54 @@ static void sdebug_remove_adapter(void) | |||
5064 | if (!sdbg_host) | 5179 | if (!sdbg_host) |
5065 | return; | 5180 | return; |
5066 | 5181 | ||
5067 | device_unregister(&sdbg_host->dev); | 5182 | device_unregister(&sdbg_host->dev); |
5068 | --scsi_debug_add_host; | 5183 | --sdebug_add_host; |
5069 | } | 5184 | } |
5070 | 5185 | ||
5071 | static int | 5186 | static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) |
5072 | sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) | ||
5073 | { | 5187 | { |
5074 | int num_in_q = 0; | 5188 | int num_in_q = 0; |
5075 | unsigned long iflags; | ||
5076 | struct sdebug_dev_info *devip; | 5189 | struct sdebug_dev_info *devip; |
5077 | 5190 | ||
5078 | spin_lock_irqsave(&queued_arr_lock, iflags); | 5191 | block_unblock_all_queues(true); |
5079 | devip = (struct sdebug_dev_info *)sdev->hostdata; | 5192 | devip = (struct sdebug_dev_info *)sdev->hostdata; |
5080 | if (NULL == devip) { | 5193 | if (NULL == devip) { |
5081 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 5194 | block_unblock_all_queues(false); |
5082 | return -ENODEV; | 5195 | return -ENODEV; |
5083 | } | 5196 | } |
5084 | num_in_q = atomic_read(&devip->num_in_q); | 5197 | num_in_q = atomic_read(&devip->num_in_q); |
5085 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
5086 | 5198 | ||
5087 | if (qdepth < 1) | 5199 | if (qdepth < 1) |
5088 | qdepth = 1; | 5200 | qdepth = 1; |
5089 | /* allow to exceed max host queued_arr elements for testing */ | 5201 | /* allow to exceed max host qc_arr elements for testing */ |
5090 | if (qdepth > SCSI_DEBUG_CANQUEUE + 10) | 5202 | if (qdepth > SDEBUG_CANQUEUE + 10) |
5091 | qdepth = SCSI_DEBUG_CANQUEUE + 10; | 5203 | qdepth = SDEBUG_CANQUEUE + 10; |
5092 | scsi_change_queue_depth(sdev, qdepth); | 5204 | scsi_change_queue_depth(sdev, qdepth); |
5093 | 5205 | ||
5094 | if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { | 5206 | if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { |
5095 | sdev_printk(KERN_INFO, sdev, | 5207 | sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", |
5096 | "%s: qdepth=%d, num_in_q=%d\n", | ||
5097 | __func__, qdepth, num_in_q); | 5208 | __func__, qdepth, num_in_q); |
5098 | } | 5209 | } |
5210 | block_unblock_all_queues(false); | ||
5099 | return sdev->queue_depth; | 5211 | return sdev->queue_depth; |
5100 | } | 5212 | } |
5101 | 5213 | ||
5102 | static int | 5214 | static bool fake_timeout(struct scsi_cmnd *scp) |
5103 | check_inject(struct scsi_cmnd *scp) | ||
5104 | { | 5215 | { |
5105 | struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); | 5216 | if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { |
5106 | 5217 | if (sdebug_every_nth < -1) | |
5107 | memset(ep, 0, sizeof(struct sdebug_scmd_extra_t)); | 5218 | sdebug_every_nth = -1; |
5108 | 5219 | if (SDEBUG_OPT_TIMEOUT & sdebug_opts) | |
5109 | if (atomic_inc_return(&sdebug_cmnd_count) >= | 5220 | return true; /* ignore command causing timeout */ |
5110 | abs(scsi_debug_every_nth)) { | 5221 | else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts && |
5111 | atomic_set(&sdebug_cmnd_count, 0); | ||
5112 | if (scsi_debug_every_nth < -1) | ||
5113 | scsi_debug_every_nth = -1; | ||
5114 | if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) | ||
5115 | return 1; /* ignore command causing timeout */ | ||
5116 | else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts && | ||
5117 | scsi_medium_access_command(scp)) | 5222 | scsi_medium_access_command(scp)) |
5118 | return 1; /* time out reads and writes */ | 5223 | return true; /* time out reads and writes */ |
5119 | if (sdebug_any_injecting_opt) { | ||
5120 | int opts = scsi_debug_opts; | ||
5121 | |||
5122 | if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) | ||
5123 | ep->inj_recovered = true; | ||
5124 | else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) | ||
5125 | ep->inj_transport = true; | ||
5126 | else if (SCSI_DEBUG_OPT_DIF_ERR & opts) | ||
5127 | ep->inj_dif = true; | ||
5128 | else if (SCSI_DEBUG_OPT_DIX_ERR & opts) | ||
5129 | ep->inj_dix = true; | ||
5130 | else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) | ||
5131 | ep->inj_short = true; | ||
5132 | } | ||
5133 | } | 5224 | } |
5134 | return 0; | 5225 | return false; |
5135 | } | 5226 | } |
5136 | 5227 | ||
5137 | static int | 5228 | static int scsi_debug_queuecommand(struct Scsi_Host *shost, |
5138 | scsi_debug_queuecommand(struct scsi_cmnd *scp) | 5229 | struct scsi_cmnd *scp) |
5139 | { | 5230 | { |
5140 | u8 sdeb_i; | 5231 | u8 sdeb_i; |
5141 | struct scsi_device *sdp = scp->device; | 5232 | struct scsi_device *sdp = scp->device; |
@@ -5146,15 +5237,16 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) | |||
5146 | int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); | 5237 | int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); |
5147 | int k, na; | 5238 | int k, na; |
5148 | int errsts = 0; | 5239 | int errsts = 0; |
5149 | int errsts_no_connect = DID_NO_CONNECT << 16; | ||
5150 | u32 flags; | 5240 | u32 flags; |
5151 | u16 sa; | 5241 | u16 sa; |
5152 | u8 opcode = cmd[0]; | 5242 | u8 opcode = cmd[0]; |
5153 | bool has_wlun_rl; | 5243 | bool has_wlun_rl; |
5154 | bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); | ||
5155 | 5244 | ||
5156 | scsi_set_resid(scp, 0); | 5245 | scsi_set_resid(scp, 0); |
5157 | if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { | 5246 | if (sdebug_statistics) |
5247 | atomic_inc(&sdebug_cmnd_count); | ||
5248 | if (unlikely(sdebug_verbose && | ||
5249 | !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { | ||
5158 | char b[120]; | 5250 | char b[120]; |
5159 | int n, len, sb; | 5251 | int n, len, sb; |
5160 | 5252 | ||
@@ -5167,19 +5259,25 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) | |||
5167 | n += scnprintf(b + n, sb - n, "%02x ", | 5259 | n += scnprintf(b + n, sb - n, "%02x ", |
5168 | (u32)cmd[k]); | 5260 | (u32)cmd[k]); |
5169 | } | 5261 | } |
5170 | sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); | 5262 | if (sdebug_mq_active) |
5263 | sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n", | ||
5264 | my_name, blk_mq_unique_tag(scp->request), | ||
5265 | b); | ||
5266 | else | ||
5267 | sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, | ||
5268 | b); | ||
5171 | } | 5269 | } |
5172 | has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); | 5270 | has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); |
5173 | if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) | 5271 | if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)) |
5174 | return schedule_resp(scp, NULL, errsts_no_connect, 0); | 5272 | goto err_out; |
5175 | 5273 | ||
5176 | sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ | 5274 | sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ |
5177 | oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ | 5275 | oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ |
5178 | devip = (struct sdebug_dev_info *)sdp->hostdata; | 5276 | devip = (struct sdebug_dev_info *)sdp->hostdata; |
5179 | if (!devip) { | 5277 | if (unlikely(!devip)) { |
5180 | devip = devInfoReg(sdp); | 5278 | devip = find_build_dev_info(sdp); |
5181 | if (NULL == devip) | 5279 | if (NULL == devip) |
5182 | return schedule_resp(scp, NULL, errsts_no_connect, 0); | 5280 | goto err_out; |
5183 | } | 5281 | } |
5184 | na = oip->num_attached; | 5282 | na = oip->num_attached; |
5185 | r_pfp = oip->pfp; | 5283 | r_pfp = oip->pfp; |
@@ -5211,18 +5309,18 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) | |||
5211 | } | 5309 | } |
5212 | } /* else (when na==0) we assume the oip is a match */ | 5310 | } /* else (when na==0) we assume the oip is a match */ |
5213 | flags = oip->flags; | 5311 | flags = oip->flags; |
5214 | if (F_INV_OP & flags) { | 5312 | if (unlikely(F_INV_OP & flags)) { |
5215 | mk_sense_invalid_opcode(scp); | 5313 | mk_sense_invalid_opcode(scp); |
5216 | goto check_cond; | 5314 | goto check_cond; |
5217 | } | 5315 | } |
5218 | if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) { | 5316 | if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) { |
5219 | if (debug) | 5317 | if (sdebug_verbose) |
5220 | sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: " | 5318 | sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n", |
5221 | "0x%x not supported for wlun\n", opcode); | 5319 | my_name, opcode, " supported for wlun"); |
5222 | mk_sense_invalid_opcode(scp); | 5320 | mk_sense_invalid_opcode(scp); |
5223 | goto check_cond; | 5321 | goto check_cond; |
5224 | } | 5322 | } |
5225 | if (scsi_debug_strict) { /* check cdb against mask */ | 5323 | if (unlikely(sdebug_strict)) { /* check cdb against mask */ |
5226 | u8 rem; | 5324 | u8 rem; |
5227 | int j; | 5325 | int j; |
5228 | 5326 | ||
@@ -5238,52 +5336,40 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) | |||
5238 | } | 5336 | } |
5239 | } | 5337 | } |
5240 | } | 5338 | } |
5241 | if (!(F_SKIP_UA & flags) && | 5339 | if (unlikely(!(F_SKIP_UA & flags) && |
5242 | SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) { | 5340 | find_first_bit(devip->uas_bm, |
5243 | errsts = check_readiness(scp, UAS_ONLY, devip); | 5341 | SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) { |
5342 | errsts = make_ua(scp, devip); | ||
5244 | if (errsts) | 5343 | if (errsts) |
5245 | goto check_cond; | 5344 | goto check_cond; |
5246 | } | 5345 | } |
5247 | if ((F_M_ACCESS & flags) && devip->stopped) { | 5346 | if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) { |
5248 | mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); | 5347 | mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); |
5249 | if (debug) | 5348 | if (sdebug_verbose) |
5250 | sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: " | 5349 | sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: " |
5251 | "%s\n", my_name, "initializing command " | 5350 | "%s\n", my_name, "initializing command " |
5252 | "required"); | 5351 | "required"); |
5253 | errsts = check_condition_result; | 5352 | errsts = check_condition_result; |
5254 | goto fini; | 5353 | goto fini; |
5255 | } | 5354 | } |
5256 | if (scsi_debug_fake_rw && (F_FAKE_RW & flags)) | 5355 | if (sdebug_fake_rw && (F_FAKE_RW & flags)) |
5257 | goto fini; | 5356 | goto fini; |
5258 | if (scsi_debug_every_nth) { | 5357 | if (unlikely(sdebug_every_nth)) { |
5259 | if (check_inject(scp)) | 5358 | if (fake_timeout(scp)) |
5260 | return 0; /* ignore command: make trouble */ | 5359 | return 0; /* ignore command: make trouble */ |
5261 | } | 5360 | } |
5262 | if (oip->pfp) /* if this command has a resp_* function, call it */ | 5361 | if (likely(oip->pfp)) |
5263 | errsts = oip->pfp(scp, devip); | 5362 | errsts = oip->pfp(scp, devip); /* calls a resp_* function */ |
5264 | else if (r_pfp) /* if leaf function ptr NULL, try the root's */ | 5363 | else if (r_pfp) /* if leaf function ptr NULL, try the root's */ |
5265 | errsts = r_pfp(scp, devip); | 5364 | errsts = r_pfp(scp, devip); |
5266 | 5365 | ||
5267 | fini: | 5366 | fini: |
5268 | return schedule_resp(scp, devip, errsts, | 5367 | return schedule_resp(scp, devip, errsts, |
5269 | ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay)); | 5368 | ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay)); |
5270 | check_cond: | 5369 | check_cond: |
5271 | return schedule_resp(scp, devip, check_condition_result, 0); | 5370 | return schedule_resp(scp, devip, check_condition_result, 0); |
5272 | } | 5371 | err_out: |
5273 | 5372 | return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0); | |
5274 | static int | ||
5275 | sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | ||
5276 | { | ||
5277 | if (scsi_debug_host_lock) { | ||
5278 | unsigned long iflags; | ||
5279 | int rc; | ||
5280 | |||
5281 | spin_lock_irqsave(shost->host_lock, iflags); | ||
5282 | rc = scsi_debug_queuecommand(cmd); | ||
5283 | spin_unlock_irqrestore(shost->host_lock, iflags); | ||
5284 | return rc; | ||
5285 | } else | ||
5286 | return scsi_debug_queuecommand(cmd); | ||
5287 | } | 5373 | } |
5288 | 5374 | ||
5289 | static struct scsi_host_template sdebug_driver_template = { | 5375 | static struct scsi_host_template sdebug_driver_template = { |
@@ -5296,36 +5382,34 @@ static struct scsi_host_template sdebug_driver_template = { | |||
5296 | .slave_configure = scsi_debug_slave_configure, | 5382 | .slave_configure = scsi_debug_slave_configure, |
5297 | .slave_destroy = scsi_debug_slave_destroy, | 5383 | .slave_destroy = scsi_debug_slave_destroy, |
5298 | .ioctl = scsi_debug_ioctl, | 5384 | .ioctl = scsi_debug_ioctl, |
5299 | .queuecommand = sdebug_queuecommand_lock_or_not, | 5385 | .queuecommand = scsi_debug_queuecommand, |
5300 | .change_queue_depth = sdebug_change_qdepth, | 5386 | .change_queue_depth = sdebug_change_qdepth, |
5301 | .eh_abort_handler = scsi_debug_abort, | 5387 | .eh_abort_handler = scsi_debug_abort, |
5302 | .eh_device_reset_handler = scsi_debug_device_reset, | 5388 | .eh_device_reset_handler = scsi_debug_device_reset, |
5303 | .eh_target_reset_handler = scsi_debug_target_reset, | 5389 | .eh_target_reset_handler = scsi_debug_target_reset, |
5304 | .eh_bus_reset_handler = scsi_debug_bus_reset, | 5390 | .eh_bus_reset_handler = scsi_debug_bus_reset, |
5305 | .eh_host_reset_handler = scsi_debug_host_reset, | 5391 | .eh_host_reset_handler = scsi_debug_host_reset, |
5306 | .can_queue = SCSI_DEBUG_CANQUEUE, | 5392 | .can_queue = SDEBUG_CANQUEUE, |
5307 | .this_id = 7, | 5393 | .this_id = 7, |
5308 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 5394 | .sg_tablesize = SG_MAX_SEGMENTS, |
5309 | .cmd_per_lun = DEF_CMD_PER_LUN, | 5395 | .cmd_per_lun = DEF_CMD_PER_LUN, |
5310 | .max_sectors = -1U, | 5396 | .max_sectors = -1U, |
5311 | .use_clustering = DISABLE_CLUSTERING, | 5397 | .use_clustering = DISABLE_CLUSTERING, |
5312 | .module = THIS_MODULE, | 5398 | .module = THIS_MODULE, |
5313 | .track_queue_depth = 1, | 5399 | .track_queue_depth = 1, |
5314 | .cmd_size = sizeof(struct sdebug_scmd_extra_t), | ||
5315 | }; | 5400 | }; |
5316 | 5401 | ||
5317 | static int sdebug_driver_probe(struct device * dev) | 5402 | static int sdebug_driver_probe(struct device * dev) |
5318 | { | 5403 | { |
5319 | int error = 0; | 5404 | int error = 0; |
5320 | int opts; | ||
5321 | struct sdebug_host_info *sdbg_host; | 5405 | struct sdebug_host_info *sdbg_host; |
5322 | struct Scsi_Host *hpnt; | 5406 | struct Scsi_Host *hpnt; |
5323 | int host_prot; | 5407 | int hprot; |
5324 | 5408 | ||
5325 | sdbg_host = to_sdebug_host(dev); | 5409 | sdbg_host = to_sdebug_host(dev); |
5326 | 5410 | ||
5327 | sdebug_driver_template.can_queue = scsi_debug_max_queue; | 5411 | sdebug_driver_template.can_queue = sdebug_max_queue; |
5328 | if (scsi_debug_clustering) | 5412 | if (sdebug_clustering) |
5329 | sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; | 5413 | sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; |
5330 | hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); | 5414 | hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); |
5331 | if (NULL == hpnt) { | 5415 | if (NULL == hpnt) { |
@@ -5333,72 +5417,75 @@ static int sdebug_driver_probe(struct device * dev) | |||
5333 | error = -ENODEV; | 5417 | error = -ENODEV; |
5334 | return error; | 5418 | return error; |
5335 | } | 5419 | } |
5420 | if (submit_queues > nr_cpu_ids) { | ||
5421 | pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n", | ||
5422 | my_name, submit_queues, nr_cpu_ids); | ||
5423 | submit_queues = nr_cpu_ids; | ||
5424 | } | ||
5425 | /* Decide whether to tell scsi subsystem that we want mq */ | ||
5426 | /* Following should give the same answer for each host */ | ||
5427 | sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1); | ||
5428 | if (sdebug_mq_active) | ||
5429 | hpnt->nr_hw_queues = submit_queues; | ||
5336 | 5430 | ||
5337 | sdbg_host->shost = hpnt; | 5431 | sdbg_host->shost = hpnt; |
5338 | *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; | 5432 | *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; |
5339 | if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id)) | 5433 | if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) |
5340 | hpnt->max_id = scsi_debug_num_tgts + 1; | 5434 | hpnt->max_id = sdebug_num_tgts + 1; |
5341 | else | 5435 | else |
5342 | hpnt->max_id = scsi_debug_num_tgts; | 5436 | hpnt->max_id = sdebug_num_tgts; |
5343 | /* = scsi_debug_max_luns; */ | 5437 | /* = sdebug_max_luns; */ |
5344 | hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; | 5438 | hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; |
5345 | 5439 | ||
5346 | host_prot = 0; | 5440 | hprot = 0; |
5347 | 5441 | ||
5348 | switch (scsi_debug_dif) { | 5442 | switch (sdebug_dif) { |
5349 | 5443 | ||
5350 | case SD_DIF_TYPE1_PROTECTION: | 5444 | case SD_DIF_TYPE1_PROTECTION: |
5351 | host_prot = SHOST_DIF_TYPE1_PROTECTION; | 5445 | hprot = SHOST_DIF_TYPE1_PROTECTION; |
5352 | if (scsi_debug_dix) | 5446 | if (sdebug_dix) |
5353 | host_prot |= SHOST_DIX_TYPE1_PROTECTION; | 5447 | hprot |= SHOST_DIX_TYPE1_PROTECTION; |
5354 | break; | 5448 | break; |
5355 | 5449 | ||
5356 | case SD_DIF_TYPE2_PROTECTION: | 5450 | case SD_DIF_TYPE2_PROTECTION: |
5357 | host_prot = SHOST_DIF_TYPE2_PROTECTION; | 5451 | hprot = SHOST_DIF_TYPE2_PROTECTION; |
5358 | if (scsi_debug_dix) | 5452 | if (sdebug_dix) |
5359 | host_prot |= SHOST_DIX_TYPE2_PROTECTION; | 5453 | hprot |= SHOST_DIX_TYPE2_PROTECTION; |
5360 | break; | 5454 | break; |
5361 | 5455 | ||
5362 | case SD_DIF_TYPE3_PROTECTION: | 5456 | case SD_DIF_TYPE3_PROTECTION: |
5363 | host_prot = SHOST_DIF_TYPE3_PROTECTION; | 5457 | hprot = SHOST_DIF_TYPE3_PROTECTION; |
5364 | if (scsi_debug_dix) | 5458 | if (sdebug_dix) |
5365 | host_prot |= SHOST_DIX_TYPE3_PROTECTION; | 5459 | hprot |= SHOST_DIX_TYPE3_PROTECTION; |
5366 | break; | 5460 | break; |
5367 | 5461 | ||
5368 | default: | 5462 | default: |
5369 | if (scsi_debug_dix) | 5463 | if (sdebug_dix) |
5370 | host_prot |= SHOST_DIX_TYPE0_PROTECTION; | 5464 | hprot |= SHOST_DIX_TYPE0_PROTECTION; |
5371 | break; | 5465 | break; |
5372 | } | 5466 | } |
5373 | 5467 | ||
5374 | scsi_host_set_prot(hpnt, host_prot); | 5468 | scsi_host_set_prot(hpnt, hprot); |
5375 | 5469 | ||
5376 | pr_info("host protection%s%s%s%s%s%s%s\n", | 5470 | if (have_dif_prot || sdebug_dix) |
5377 | (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", | 5471 | pr_info("host protection%s%s%s%s%s%s%s\n", |
5378 | (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", | 5472 | (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", |
5379 | (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", | 5473 | (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", |
5380 | (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", | 5474 | (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", |
5381 | (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", | 5475 | (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", |
5382 | (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", | 5476 | (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", |
5383 | (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); | 5477 | (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", |
5478 | (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); | ||
5384 | 5479 | ||
5385 | if (scsi_debug_guard == 1) | 5480 | if (sdebug_guard == 1) |
5386 | scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); | 5481 | scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); |
5387 | else | 5482 | else |
5388 | scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); | 5483 | scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); |
5389 | 5484 | ||
5390 | opts = scsi_debug_opts; | 5485 | sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts); |
5391 | if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) | 5486 | sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); |
5392 | sdebug_any_injecting_opt = true; | 5487 | if (sdebug_every_nth) /* need stats counters for every_nth */ |
5393 | else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) | 5488 | sdebug_statistics = true; |
5394 | sdebug_any_injecting_opt = true; | ||
5395 | else if (SCSI_DEBUG_OPT_DIF_ERR & opts) | ||
5396 | sdebug_any_injecting_opt = true; | ||
5397 | else if (SCSI_DEBUG_OPT_DIX_ERR & opts) | ||
5398 | sdebug_any_injecting_opt = true; | ||
5399 | else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) | ||
5400 | sdebug_any_injecting_opt = true; | ||
5401 | |||
5402 | error = scsi_add_host(hpnt, &sdbg_host->dev); | 5489 | error = scsi_add_host(hpnt, &sdbg_host->dev); |
5403 | if (error) { | 5490 | if (error) { |
5404 | pr_err("scsi_add_host failed\n"); | 5491 | pr_err("scsi_add_host failed\n"); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8106515d1df8..b2e332af0f51 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -14,8 +14,6 @@ | |||
14 | #include <linux/completion.h> | 14 | #include <linux/completion.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/export.h> | 16 | #include <linux/export.h> |
17 | #include <linux/mempool.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/init.h> | 17 | #include <linux/init.h> |
20 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
21 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
@@ -40,39 +38,6 @@ | |||
40 | #include "scsi_logging.h" | 38 | #include "scsi_logging.h" |
41 | 39 | ||
42 | 40 | ||
43 | #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) | ||
44 | #define SG_MEMPOOL_SIZE 2 | ||
45 | |||
46 | struct scsi_host_sg_pool { | ||
47 | size_t size; | ||
48 | char *name; | ||
49 | struct kmem_cache *slab; | ||
50 | mempool_t *pool; | ||
51 | }; | ||
52 | |||
53 | #define SP(x) { .size = x, "sgpool-" __stringify(x) } | ||
54 | #if (SCSI_MAX_SG_SEGMENTS < 32) | ||
55 | #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) | ||
56 | #endif | ||
57 | static struct scsi_host_sg_pool scsi_sg_pools[] = { | ||
58 | SP(8), | ||
59 | SP(16), | ||
60 | #if (SCSI_MAX_SG_SEGMENTS > 32) | ||
61 | SP(32), | ||
62 | #if (SCSI_MAX_SG_SEGMENTS > 64) | ||
63 | SP(64), | ||
64 | #if (SCSI_MAX_SG_SEGMENTS > 128) | ||
65 | SP(128), | ||
66 | #if (SCSI_MAX_SG_SEGMENTS > 256) | ||
67 | #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) | ||
68 | #endif | ||
69 | #endif | ||
70 | #endif | ||
71 | #endif | ||
72 | SP(SCSI_MAX_SG_SEGMENTS) | ||
73 | }; | ||
74 | #undef SP | ||
75 | |||
76 | struct kmem_cache *scsi_sdb_cache; | 41 | struct kmem_cache *scsi_sdb_cache; |
77 | 42 | ||
78 | /* | 43 | /* |
@@ -553,66 +518,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) | |||
553 | scsi_run_queue(sdev->request_queue); | 518 | scsi_run_queue(sdev->request_queue); |
554 | } | 519 | } |
555 | 520 | ||
556 | static inline unsigned int scsi_sgtable_index(unsigned short nents) | ||
557 | { | ||
558 | unsigned int index; | ||
559 | |||
560 | BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); | ||
561 | |||
562 | if (nents <= 8) | ||
563 | index = 0; | ||
564 | else | ||
565 | index = get_count_order(nents) - 3; | ||
566 | |||
567 | return index; | ||
568 | } | ||
569 | |||
570 | static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) | ||
571 | { | ||
572 | struct scsi_host_sg_pool *sgp; | ||
573 | |||
574 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); | ||
575 | mempool_free(sgl, sgp->pool); | ||
576 | } | ||
577 | |||
578 | static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) | ||
579 | { | ||
580 | struct scsi_host_sg_pool *sgp; | ||
581 | |||
582 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); | ||
583 | return mempool_alloc(sgp->pool, gfp_mask); | ||
584 | } | ||
585 | |||
586 | static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) | ||
587 | { | ||
588 | if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) | ||
589 | return; | ||
590 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); | ||
591 | } | ||
592 | |||
593 | static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) | ||
594 | { | ||
595 | struct scatterlist *first_chunk = NULL; | ||
596 | int ret; | ||
597 | |||
598 | BUG_ON(!nents); | ||
599 | |||
600 | if (mq) { | ||
601 | if (nents <= SCSI_MAX_SG_SEGMENTS) { | ||
602 | sdb->table.nents = sdb->table.orig_nents = nents; | ||
603 | sg_init_table(sdb->table.sgl, nents); | ||
604 | return 0; | ||
605 | } | ||
606 | first_chunk = sdb->table.sgl; | ||
607 | } | ||
608 | |||
609 | ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, | ||
610 | first_chunk, GFP_ATOMIC, scsi_sg_alloc); | ||
611 | if (unlikely(ret)) | ||
612 | scsi_free_sgtable(sdb, mq); | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) | 521 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) |
617 | { | 522 | { |
618 | if (cmd->request->cmd_type == REQ_TYPE_FS) { | 523 | if (cmd->request->cmd_type == REQ_TYPE_FS) { |
@@ -625,12 +530,17 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) | |||
625 | 530 | ||
626 | static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) | 531 | static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) |
627 | { | 532 | { |
533 | struct scsi_data_buffer *sdb; | ||
534 | |||
628 | if (cmd->sdb.table.nents) | 535 | if (cmd->sdb.table.nents) |
629 | scsi_free_sgtable(&cmd->sdb, true); | 536 | sg_free_table_chained(&cmd->sdb.table, true); |
630 | if (cmd->request->next_rq && cmd->request->next_rq->special) | 537 | if (cmd->request->next_rq) { |
631 | scsi_free_sgtable(cmd->request->next_rq->special, true); | 538 | sdb = cmd->request->next_rq->special; |
539 | if (sdb) | ||
540 | sg_free_table_chained(&sdb->table, true); | ||
541 | } | ||
632 | if (scsi_prot_sg_count(cmd)) | 542 | if (scsi_prot_sg_count(cmd)) |
633 | scsi_free_sgtable(cmd->prot_sdb, true); | 543 | sg_free_table_chained(&cmd->prot_sdb->table, true); |
634 | } | 544 | } |
635 | 545 | ||
636 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | 546 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) |
@@ -669,19 +579,19 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | |||
669 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | 579 | static void scsi_release_buffers(struct scsi_cmnd *cmd) |
670 | { | 580 | { |
671 | if (cmd->sdb.table.nents) | 581 | if (cmd->sdb.table.nents) |
672 | scsi_free_sgtable(&cmd->sdb, false); | 582 | sg_free_table_chained(&cmd->sdb.table, false); |
673 | 583 | ||
674 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | 584 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
675 | 585 | ||
676 | if (scsi_prot_sg_count(cmd)) | 586 | if (scsi_prot_sg_count(cmd)) |
677 | scsi_free_sgtable(cmd->prot_sdb, false); | 587 | sg_free_table_chained(&cmd->prot_sdb->table, false); |
678 | } | 588 | } |
679 | 589 | ||
680 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | 590 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) |
681 | { | 591 | { |
682 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; | 592 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; |
683 | 593 | ||
684 | scsi_free_sgtable(bidi_sdb, false); | 594 | sg_free_table_chained(&bidi_sdb->table, false); |
685 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | 595 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); |
686 | cmd->request->next_rq->special = NULL; | 596 | cmd->request->next_rq->special = NULL; |
687 | } | 597 | } |
@@ -1085,8 +995,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) | |||
1085 | /* | 995 | /* |
1086 | * If sg table allocation fails, requeue request later. | 996 | * If sg table allocation fails, requeue request later. |
1087 | */ | 997 | */ |
1088 | if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, | 998 | if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments, |
1089 | req->mq_ctx != NULL))) | 999 | sdb->table.sgl))) |
1090 | return BLKPREP_DEFER; | 1000 | return BLKPREP_DEFER; |
1091 | 1001 | ||
1092 | /* | 1002 | /* |
@@ -1158,7 +1068,8 @@ int scsi_init_io(struct scsi_cmnd *cmd) | |||
1158 | 1068 | ||
1159 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); | 1069 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
1160 | 1070 | ||
1161 | if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { | 1071 | if (sg_alloc_table_chained(&prot_sdb->table, ivecs, |
1072 | prot_sdb->table.sgl)) { | ||
1162 | error = BLKPREP_DEFER; | 1073 | error = BLKPREP_DEFER; |
1163 | goto err_exit; | 1074 | goto err_exit; |
1164 | } | 1075 | } |
@@ -1932,7 +1843,7 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1932 | if (scsi_host_get_prot(shost)) { | 1843 | if (scsi_host_get_prot(shost)) { |
1933 | cmd->prot_sdb = (void *)sg + | 1844 | cmd->prot_sdb = (void *)sg + |
1934 | min_t(unsigned int, | 1845 | min_t(unsigned int, |
1935 | shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * | 1846 | shost->sg_tablesize, SG_CHUNK_SIZE) * |
1936 | sizeof(struct scatterlist); | 1847 | sizeof(struct scatterlist); |
1937 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); | 1848 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); |
1938 | 1849 | ||
@@ -2105,7 +2016,7 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | |||
2105 | * this limit is imposed by hardware restrictions | 2016 | * this limit is imposed by hardware restrictions |
2106 | */ | 2017 | */ |
2107 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, | 2018 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
2108 | SCSI_MAX_SG_CHAIN_SEGMENTS)); | 2019 | SG_MAX_SEGMENTS)); |
2109 | 2020 | ||
2110 | if (scsi_host_prot_dma(shost)) { | 2021 | if (scsi_host_prot_dma(shost)) { |
2111 | shost->sg_prot_tablesize = | 2022 | shost->sg_prot_tablesize = |
@@ -2187,8 +2098,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) | |||
2187 | unsigned int cmd_size, sgl_size, tbl_size; | 2098 | unsigned int cmd_size, sgl_size, tbl_size; |
2188 | 2099 | ||
2189 | tbl_size = shost->sg_tablesize; | 2100 | tbl_size = shost->sg_tablesize; |
2190 | if (tbl_size > SCSI_MAX_SG_SEGMENTS) | 2101 | if (tbl_size > SG_CHUNK_SIZE) |
2191 | tbl_size = SCSI_MAX_SG_SEGMENTS; | 2102 | tbl_size = SG_CHUNK_SIZE; |
2192 | sgl_size = tbl_size * sizeof(struct scatterlist); | 2103 | sgl_size = tbl_size * sizeof(struct scatterlist); |
2193 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; | 2104 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; |
2194 | if (scsi_host_get_prot(shost)) | 2105 | if (scsi_host_get_prot(shost)) |
@@ -2264,8 +2175,6 @@ EXPORT_SYMBOL(scsi_unblock_requests); | |||
2264 | 2175 | ||
2265 | int __init scsi_init_queue(void) | 2176 | int __init scsi_init_queue(void) |
2266 | { | 2177 | { |
2267 | int i; | ||
2268 | |||
2269 | scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", | 2178 | scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", |
2270 | sizeof(struct scsi_data_buffer), | 2179 | sizeof(struct scsi_data_buffer), |
2271 | 0, 0, NULL); | 2180 | 0, 0, NULL); |
@@ -2274,53 +2183,12 @@ int __init scsi_init_queue(void) | |||
2274 | return -ENOMEM; | 2183 | return -ENOMEM; |
2275 | } | 2184 | } |
2276 | 2185 | ||
2277 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
2278 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | ||
2279 | int size = sgp->size * sizeof(struct scatterlist); | ||
2280 | |||
2281 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | ||
2282 | SLAB_HWCACHE_ALIGN, NULL); | ||
2283 | if (!sgp->slab) { | ||
2284 | printk(KERN_ERR "SCSI: can't init sg slab %s\n", | ||
2285 | sgp->name); | ||
2286 | goto cleanup_sdb; | ||
2287 | } | ||
2288 | |||
2289 | sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, | ||
2290 | sgp->slab); | ||
2291 | if (!sgp->pool) { | ||
2292 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | ||
2293 | sgp->name); | ||
2294 | goto cleanup_sdb; | ||
2295 | } | ||
2296 | } | ||
2297 | |||
2298 | return 0; | 2186 | return 0; |
2299 | |||
2300 | cleanup_sdb: | ||
2301 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
2302 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | ||
2303 | if (sgp->pool) | ||
2304 | mempool_destroy(sgp->pool); | ||
2305 | if (sgp->slab) | ||
2306 | kmem_cache_destroy(sgp->slab); | ||
2307 | } | ||
2308 | kmem_cache_destroy(scsi_sdb_cache); | ||
2309 | |||
2310 | return -ENOMEM; | ||
2311 | } | 2187 | } |
2312 | 2188 | ||
2313 | void scsi_exit_queue(void) | 2189 | void scsi_exit_queue(void) |
2314 | { | 2190 | { |
2315 | int i; | ||
2316 | |||
2317 | kmem_cache_destroy(scsi_sdb_cache); | 2191 | kmem_cache_destroy(scsi_sdb_cache); |
2318 | |||
2319 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
2320 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | ||
2321 | mempool_destroy(sgp->pool); | ||
2322 | kmem_cache_destroy(sgp->slab); | ||
2323 | } | ||
2324 | } | 2192 | } |
2325 | 2193 | ||
2326 | /** | 2194 | /** |
@@ -3196,6 +3064,7 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) | |||
3196 | * - EUI-64 based 12-byte | 3064 | * - EUI-64 based 12-byte |
3197 | * - NAA IEEE Registered | 3065 | * - NAA IEEE Registered |
3198 | * - NAA IEEE Extended | 3066 | * - NAA IEEE Extended |
3067 | * - T10 Vendor ID | ||
3199 | * as longer descriptors reduce the likelyhood | 3068 | * as longer descriptors reduce the likelyhood |
3200 | * of identification clashes. | 3069 | * of identification clashes. |
3201 | */ | 3070 | */ |
@@ -3214,6 +3083,21 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) | |||
3214 | goto next_desig; | 3083 | goto next_desig; |
3215 | 3084 | ||
3216 | switch (d[1] & 0xf) { | 3085 | switch (d[1] & 0xf) { |
3086 | case 0x1: | ||
3087 | /* T10 Vendor ID */ | ||
3088 | if (cur_id_size > d[3]) | ||
3089 | break; | ||
3090 | /* Prefer anything */ | ||
3091 | if (cur_id_type > 0x01 && cur_id_type != 0xff) | ||
3092 | break; | ||
3093 | cur_id_size = d[3]; | ||
3094 | if (cur_id_size + 4 > id_len) | ||
3095 | cur_id_size = id_len - 4; | ||
3096 | cur_id_str = d + 4; | ||
3097 | cur_id_type = d[1] & 0xf; | ||
3098 | id_size = snprintf(id, id_len, "t10.%*pE", | ||
3099 | cur_id_size, cur_id_str); | ||
3100 | break; | ||
3217 | case 0x2: | 3101 | case 0x2: |
3218 | /* EUI-64 */ | 3102 | /* EUI-64 */ |
3219 | if (cur_id_size > d[3]) | 3103 | if (cur_id_size > d[3]) |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 27b4d0a6a01d..57a4b9973320 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -116,7 +116,7 @@ extern void scsi_exit_procfs(void); | |||
116 | extern char scsi_scan_type[]; | 116 | extern char scsi_scan_type[]; |
117 | extern int scsi_complete_async_scans(void); | 117 | extern int scsi_complete_async_scans(void); |
118 | extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, | 118 | extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, |
119 | unsigned int, u64, int); | 119 | unsigned int, u64, enum scsi_scan_mode); |
120 | extern void scsi_forget_host(struct Scsi_Host *); | 120 | extern void scsi_forget_host(struct Scsi_Host *); |
121 | extern void scsi_rescan_device(struct device *); | 121 | extern void scsi_rescan_device(struct device *); |
122 | 122 | ||
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 251598eb3547..7a74b82e8973 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -251,7 +251,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) | |||
251 | if (shost->transportt->user_scan) | 251 | if (shost->transportt->user_scan) |
252 | error = shost->transportt->user_scan(shost, channel, id, lun); | 252 | error = shost->transportt->user_scan(shost, channel, id, lun); |
253 | else | 253 | else |
254 | error = scsi_scan_host_selected(shost, channel, id, lun, 1); | 254 | error = scsi_scan_host_selected(shost, channel, id, lun, |
255 | SCSI_SCAN_MANUAL); | ||
255 | scsi_host_put(shost); | 256 | scsi_host_put(shost); |
256 | return error; | 257 | return error; |
257 | } | 258 | } |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 97074c91e328..e0a78f53d809 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -96,10 +96,13 @@ MODULE_PARM_DESC(max_luns, | |||
96 | #define SCSI_SCAN_TYPE_DEFAULT "sync" | 96 | #define SCSI_SCAN_TYPE_DEFAULT "sync" |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; | 99 | char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; |
100 | 100 | ||
101 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); | 101 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), |
102 | MODULE_PARM_DESC(scan, "sync, async or none"); | 102 | S_IRUGO|S_IWUSR); |
103 | MODULE_PARM_DESC(scan, "sync, async, manual, or none. " | ||
104 | "Setting to 'manual' disables automatic scanning, but allows " | ||
105 | "for manual device scan via the 'scan' sysfs attribute."); | ||
103 | 106 | ||
104 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; | 107 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; |
105 | 108 | ||
@@ -316,6 +319,7 @@ static void scsi_target_destroy(struct scsi_target *starget) | |||
316 | struct Scsi_Host *shost = dev_to_shost(dev->parent); | 319 | struct Scsi_Host *shost = dev_to_shost(dev->parent); |
317 | unsigned long flags; | 320 | unsigned long flags; |
318 | 321 | ||
322 | BUG_ON(starget->state == STARGET_DEL); | ||
319 | starget->state = STARGET_DEL; | 323 | starget->state = STARGET_DEL; |
320 | transport_destroy_device(dev); | 324 | transport_destroy_device(dev); |
321 | spin_lock_irqsave(shost->host_lock, flags); | 325 | spin_lock_irqsave(shost->host_lock, flags); |
@@ -1040,7 +1044,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, | |||
1040 | * @lun: LUN of target device | 1044 | * @lun: LUN of target device |
1041 | * @bflagsp: store bflags here if not NULL | 1045 | * @bflagsp: store bflags here if not NULL |
1042 | * @sdevp: probe the LUN corresponding to this scsi_device | 1046 | * @sdevp: probe the LUN corresponding to this scsi_device |
1043 | * @rescan: if nonzero skip some code only needed on first scan | 1047 | * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only |
1048 | * needed on first scan | ||
1044 | * @hostdata: passed to scsi_alloc_sdev() | 1049 | * @hostdata: passed to scsi_alloc_sdev() |
1045 | * | 1050 | * |
1046 | * Description: | 1051 | * Description: |
@@ -1055,7 +1060,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, | |||
1055 | **/ | 1060 | **/ |
1056 | static int scsi_probe_and_add_lun(struct scsi_target *starget, | 1061 | static int scsi_probe_and_add_lun(struct scsi_target *starget, |
1057 | u64 lun, int *bflagsp, | 1062 | u64 lun, int *bflagsp, |
1058 | struct scsi_device **sdevp, int rescan, | 1063 | struct scsi_device **sdevp, |
1064 | enum scsi_scan_mode rescan, | ||
1059 | void *hostdata) | 1065 | void *hostdata) |
1060 | { | 1066 | { |
1061 | struct scsi_device *sdev; | 1067 | struct scsi_device *sdev; |
@@ -1069,7 +1075,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
1069 | */ | 1075 | */ |
1070 | sdev = scsi_device_lookup_by_target(starget, lun); | 1076 | sdev = scsi_device_lookup_by_target(starget, lun); |
1071 | if (sdev) { | 1077 | if (sdev) { |
1072 | if (rescan || !scsi_device_created(sdev)) { | 1078 | if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { |
1073 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, | 1079 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
1074 | "scsi scan: device exists on %s\n", | 1080 | "scsi scan: device exists on %s\n", |
1075 | dev_name(&sdev->sdev_gendev))); | 1081 | dev_name(&sdev->sdev_gendev))); |
@@ -1205,7 +1211,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
1205 | * Modifies sdevscan->lun. | 1211 | * Modifies sdevscan->lun. |
1206 | **/ | 1212 | **/ |
1207 | static void scsi_sequential_lun_scan(struct scsi_target *starget, | 1213 | static void scsi_sequential_lun_scan(struct scsi_target *starget, |
1208 | int bflags, int scsi_level, int rescan) | 1214 | int bflags, int scsi_level, |
1215 | enum scsi_scan_mode rescan) | ||
1209 | { | 1216 | { |
1210 | uint max_dev_lun; | 1217 | uint max_dev_lun; |
1211 | u64 sparse_lun, lun; | 1218 | u64 sparse_lun, lun; |
@@ -1300,7 +1307,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, | |||
1300 | * 1: could not scan with REPORT LUN | 1307 | * 1: could not scan with REPORT LUN |
1301 | **/ | 1308 | **/ |
1302 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | 1309 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, |
1303 | int rescan) | 1310 | enum scsi_scan_mode rescan) |
1304 | { | 1311 | { |
1305 | char devname[64]; | 1312 | char devname[64]; |
1306 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 1313 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
@@ -1546,7 +1553,7 @@ void scsi_rescan_device(struct device *dev) | |||
1546 | EXPORT_SYMBOL(scsi_rescan_device); | 1553 | EXPORT_SYMBOL(scsi_rescan_device); |
1547 | 1554 | ||
1548 | static void __scsi_scan_target(struct device *parent, unsigned int channel, | 1555 | static void __scsi_scan_target(struct device *parent, unsigned int channel, |
1549 | unsigned int id, u64 lun, int rescan) | 1556 | unsigned int id, u64 lun, enum scsi_scan_mode rescan) |
1550 | { | 1557 | { |
1551 | struct Scsi_Host *shost = dev_to_shost(parent); | 1558 | struct Scsi_Host *shost = dev_to_shost(parent); |
1552 | int bflags = 0; | 1559 | int bflags = 0; |
@@ -1604,7 +1611,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1604 | * @channel: channel to scan | 1611 | * @channel: channel to scan |
1605 | * @id: target id to scan | 1612 | * @id: target id to scan |
1606 | * @lun: Specific LUN to scan or SCAN_WILD_CARD | 1613 | * @lun: Specific LUN to scan or SCAN_WILD_CARD |
1607 | * @rescan: passed to LUN scanning routines | 1614 | * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for |
1615 | * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, | ||
1616 | * and SCSI_SCAN_MANUAL to force scanning even if | ||
1617 | * 'scan=manual' is set. | ||
1608 | * | 1618 | * |
1609 | * Description: | 1619 | * Description: |
1610 | * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, | 1620 | * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, |
@@ -1614,13 +1624,17 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1614 | * sequential scan of LUNs on the target id. | 1624 | * sequential scan of LUNs on the target id. |
1615 | **/ | 1625 | **/ |
1616 | void scsi_scan_target(struct device *parent, unsigned int channel, | 1626 | void scsi_scan_target(struct device *parent, unsigned int channel, |
1617 | unsigned int id, u64 lun, int rescan) | 1627 | unsigned int id, u64 lun, enum scsi_scan_mode rescan) |
1618 | { | 1628 | { |
1619 | struct Scsi_Host *shost = dev_to_shost(parent); | 1629 | struct Scsi_Host *shost = dev_to_shost(parent); |
1620 | 1630 | ||
1621 | if (strncmp(scsi_scan_type, "none", 4) == 0) | 1631 | if (strncmp(scsi_scan_type, "none", 4) == 0) |
1622 | return; | 1632 | return; |
1623 | 1633 | ||
1634 | if (rescan != SCSI_SCAN_MANUAL && | ||
1635 | strncmp(scsi_scan_type, "manual", 6) == 0) | ||
1636 | return; | ||
1637 | |||
1624 | mutex_lock(&shost->scan_mutex); | 1638 | mutex_lock(&shost->scan_mutex); |
1625 | if (!shost->async_scan) | 1639 | if (!shost->async_scan) |
1626 | scsi_complete_async_scans(); | 1640 | scsi_complete_async_scans(); |
@@ -1634,7 +1648,8 @@ void scsi_scan_target(struct device *parent, unsigned int channel, | |||
1634 | EXPORT_SYMBOL(scsi_scan_target); | 1648 | EXPORT_SYMBOL(scsi_scan_target); |
1635 | 1649 | ||
1636 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, | 1650 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, |
1637 | unsigned int id, u64 lun, int rescan) | 1651 | unsigned int id, u64 lun, |
1652 | enum scsi_scan_mode rescan) | ||
1638 | { | 1653 | { |
1639 | uint order_id; | 1654 | uint order_id; |
1640 | 1655 | ||
@@ -1665,7 +1680,8 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, | |||
1665 | } | 1680 | } |
1666 | 1681 | ||
1667 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, | 1682 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, |
1668 | unsigned int id, u64 lun, int rescan) | 1683 | unsigned int id, u64 lun, |
1684 | enum scsi_scan_mode rescan) | ||
1669 | { | 1685 | { |
1670 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, | 1686 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, |
1671 | "%s: <%u:%u:%llu>\n", | 1687 | "%s: <%u:%u:%llu>\n", |
@@ -1844,7 +1860,8 @@ void scsi_scan_host(struct Scsi_Host *shost) | |||
1844 | { | 1860 | { |
1845 | struct async_scan_data *data; | 1861 | struct async_scan_data *data; |
1846 | 1862 | ||
1847 | if (strncmp(scsi_scan_type, "none", 4) == 0) | 1863 | if (strncmp(scsi_scan_type, "none", 4) == 0 || |
1864 | strncmp(scsi_scan_type, "manual", 6) == 0) | ||
1848 | return; | 1865 | return; |
1849 | if (scsi_autopm_get_host(shost) < 0) | 1866 | if (scsi_autopm_get_host(shost) < 0) |
1850 | return; | 1867 | return; |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 2b642b145be1..07349270535d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -145,7 +145,8 @@ static int scsi_scan(struct Scsi_Host *shost, const char *str) | |||
145 | if (shost->transportt->user_scan) | 145 | if (shost->transportt->user_scan) |
146 | res = shost->transportt->user_scan(shost, channel, id, lun); | 146 | res = shost->transportt->user_scan(shost, channel, id, lun); |
147 | else | 147 | else |
148 | res = scsi_scan_host_selected(shost, channel, id, lun, 1); | 148 | res = scsi_scan_host_selected(shost, channel, id, lun, |
149 | SCSI_SCAN_MANUAL); | ||
149 | return res; | 150 | return res; |
150 | } | 151 | } |
151 | 152 | ||
@@ -1366,18 +1367,18 @@ static void __scsi_remove_target(struct scsi_target *starget) | |||
1366 | void scsi_remove_target(struct device *dev) | 1367 | void scsi_remove_target(struct device *dev) |
1367 | { | 1368 | { |
1368 | struct Scsi_Host *shost = dev_to_shost(dev->parent); | 1369 | struct Scsi_Host *shost = dev_to_shost(dev->parent); |
1369 | struct scsi_target *starget, *last_target = NULL; | 1370 | struct scsi_target *starget; |
1370 | unsigned long flags; | 1371 | unsigned long flags; |
1371 | 1372 | ||
1372 | restart: | 1373 | restart: |
1373 | spin_lock_irqsave(shost->host_lock, flags); | 1374 | spin_lock_irqsave(shost->host_lock, flags); |
1374 | list_for_each_entry(starget, &shost->__targets, siblings) { | 1375 | list_for_each_entry(starget, &shost->__targets, siblings) { |
1375 | if (starget->state == STARGET_DEL || | 1376 | if (starget->state == STARGET_DEL || |
1376 | starget == last_target) | 1377 | starget->state == STARGET_REMOVE) |
1377 | continue; | 1378 | continue; |
1378 | if (starget->dev.parent == dev || &starget->dev == dev) { | 1379 | if (starget->dev.parent == dev || &starget->dev == dev) { |
1379 | kref_get(&starget->reap_ref); | 1380 | kref_get(&starget->reap_ref); |
1380 | last_target = starget; | 1381 | starget->state = STARGET_REMOVE; |
1381 | spin_unlock_irqrestore(shost->host_lock, flags); | 1382 | spin_unlock_irqrestore(shost->host_lock, flags); |
1382 | __scsi_remove_target(starget); | 1383 | __scsi_remove_target(starget); |
1383 | scsi_target_reap(starget); | 1384 | scsi_target_reap(starget); |
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 08bb47b53bc3..0ff083bbf5b1 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/trace_seq.h> | 19 | #include <linux/trace_seq.h> |
20 | #include <asm/unaligned.h> | ||
20 | #include <trace/events/scsi.h> | 21 | #include <trace/events/scsi.h> |
21 | 22 | ||
22 | #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) | 23 | #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) |
@@ -231,6 +232,158 @@ out: | |||
231 | } | 232 | } |
232 | 233 | ||
233 | static const char * | 234 | static const char * |
235 | scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len) | ||
236 | { | ||
237 | const char *ret = trace_seq_buffer_ptr(p), *cmd; | ||
238 | u32 alloc_len; | ||
239 | |||
240 | switch (SERVICE_ACTION16(cdb)) { | ||
241 | case MI_REPORT_IDENTIFYING_INFORMATION: | ||
242 | cmd = "REPORT_IDENTIFYING_INFORMATION"; | ||
243 | break; | ||
244 | case MI_REPORT_TARGET_PGS: | ||
245 | cmd = "REPORT_TARGET_PORT_GROUPS"; | ||
246 | break; | ||
247 | case MI_REPORT_ALIASES: | ||
248 | cmd = "REPORT_ALIASES"; | ||
249 | break; | ||
250 | case MI_REPORT_SUPPORTED_OPERATION_CODES: | ||
251 | cmd = "REPORT_SUPPORTED_OPERATION_CODES"; | ||
252 | break; | ||
253 | case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: | ||
254 | cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS"; | ||
255 | break; | ||
256 | case MI_REPORT_PRIORITY: | ||
257 | cmd = "REPORT_PRIORITY"; | ||
258 | break; | ||
259 | case MI_REPORT_TIMESTAMP: | ||
260 | cmd = "REPORT_TIMESTAMP"; | ||
261 | break; | ||
262 | case MI_MANAGEMENT_PROTOCOL_IN: | ||
263 | cmd = "MANAGEMENT_PROTOCOL_IN"; | ||
264 | break; | ||
265 | default: | ||
266 | trace_seq_puts(p, "UNKNOWN"); | ||
267 | goto out; | ||
268 | } | ||
269 | |||
270 | alloc_len = get_unaligned_be32(&cdb[6]); | ||
271 | |||
272 | trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); | ||
273 | |||
274 | out: | ||
275 | trace_seq_putc(p, 0); | ||
276 | |||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | static const char * | ||
281 | scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len) | ||
282 | { | ||
283 | const char *ret = trace_seq_buffer_ptr(p), *cmd; | ||
284 | u32 alloc_len; | ||
285 | |||
286 | switch (SERVICE_ACTION16(cdb)) { | ||
287 | case MO_SET_IDENTIFYING_INFORMATION: | ||
288 | cmd = "SET_IDENTIFYING_INFORMATION"; | ||
289 | break; | ||
290 | case MO_SET_TARGET_PGS: | ||
291 | cmd = "SET_TARGET_PORT_GROUPS"; | ||
292 | break; | ||
293 | case MO_CHANGE_ALIASES: | ||
294 | cmd = "CHANGE_ALIASES"; | ||
295 | break; | ||
296 | case MO_SET_PRIORITY: | ||
297 | cmd = "SET_PRIORITY"; | ||
298 | break; | ||
299 | case MO_SET_TIMESTAMP: | ||
300 | cmd = "SET_TIMESTAMP"; | ||
301 | break; | ||
302 | case MO_MANAGEMENT_PROTOCOL_OUT: | ||
303 | cmd = "MANAGEMENT_PROTOCOL_OUT"; | ||
304 | break; | ||
305 | default: | ||
306 | trace_seq_puts(p, "UNKNOWN"); | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | alloc_len = get_unaligned_be32(&cdb[6]); | ||
311 | |||
312 | trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); | ||
313 | |||
314 | out: | ||
315 | trace_seq_putc(p, 0); | ||
316 | |||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | static const char * | ||
321 | scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len) | ||
322 | { | ||
323 | const char *ret = trace_seq_buffer_ptr(p), *cmd; | ||
324 | u64 zone_id; | ||
325 | u32 alloc_len; | ||
326 | u8 options; | ||
327 | |||
328 | switch (SERVICE_ACTION16(cdb)) { | ||
329 | case ZI_REPORT_ZONES: | ||
330 | cmd = "REPORT_ZONES"; | ||
331 | break; | ||
332 | default: | ||
333 | trace_seq_puts(p, "UNKNOWN"); | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | zone_id = get_unaligned_be64(&cdb[2]); | ||
338 | alloc_len = get_unaligned_be32(&cdb[10]); | ||
339 | options = cdb[14] & 0x3f; | ||
340 | |||
341 | trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u", | ||
342 | cmd, (unsigned long long)zone_id, alloc_len, | ||
343 | options, (cdb[14] >> 7) & 1); | ||
344 | |||
345 | out: | ||
346 | trace_seq_putc(p, 0); | ||
347 | |||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static const char * | ||
352 | scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len) | ||
353 | { | ||
354 | const char *ret = trace_seq_buffer_ptr(p), *cmd; | ||
355 | u64 zone_id; | ||
356 | |||
357 | switch (SERVICE_ACTION16(cdb)) { | ||
358 | case ZO_CLOSE_ZONE: | ||
359 | cmd = "CLOSE_ZONE"; | ||
360 | break; | ||
361 | case ZO_FINISH_ZONE: | ||
362 | cmd = "FINISH_ZONE"; | ||
363 | break; | ||
364 | case ZO_OPEN_ZONE: | ||
365 | cmd = "OPEN_ZONE"; | ||
366 | break; | ||
367 | case ZO_RESET_WRITE_POINTER: | ||
368 | cmd = "RESET_WRITE_POINTER"; | ||
369 | break; | ||
370 | default: | ||
371 | trace_seq_puts(p, "UNKNOWN"); | ||
372 | goto out; | ||
373 | } | ||
374 | |||
375 | zone_id = get_unaligned_be64(&cdb[2]); | ||
376 | |||
377 | trace_seq_printf(p, "%s zone=%llu all=%u", cmd, | ||
378 | (unsigned long long)zone_id, cdb[14] & 1); | ||
379 | |||
380 | out: | ||
381 | trace_seq_putc(p, 0); | ||
382 | |||
383 | return ret; | ||
384 | } | ||
385 | |||
386 | static const char * | ||
234 | scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) | 387 | scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) |
235 | { | 388 | { |
236 | switch (SERVICE_ACTION32(cdb)) { | 389 | switch (SERVICE_ACTION32(cdb)) { |
@@ -282,6 +435,14 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) | |||
282 | return scsi_trace_service_action_in(p, cdb, len); | 435 | return scsi_trace_service_action_in(p, cdb, len); |
283 | case VARIABLE_LENGTH_CMD: | 436 | case VARIABLE_LENGTH_CMD: |
284 | return scsi_trace_varlen(p, cdb, len); | 437 | return scsi_trace_varlen(p, cdb, len); |
438 | case MAINTENANCE_IN: | ||
439 | return scsi_trace_maintenance_in(p, cdb, len); | ||
440 | case MAINTENANCE_OUT: | ||
441 | return scsi_trace_maintenance_out(p, cdb, len); | ||
442 | case ZBC_IN: | ||
443 | return scsi_trace_zbc_in(p, cdb, len); | ||
444 | case ZBC_OUT: | ||
445 | return scsi_trace_zbc_out(p, cdb, len); | ||
285 | default: | 446 | default: |
286 | return scsi_trace_misc(p, cdb, len); | 447 | return scsi_trace_misc(p, cdb, len); |
287 | } | 448 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 8a8822641b26..0f3a3869524b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -2027,11 +2027,10 @@ static void fc_vport_dev_release(struct device *dev) | |||
2027 | kfree(vport); | 2027 | kfree(vport); |
2028 | } | 2028 | } |
2029 | 2029 | ||
2030 | int scsi_is_fc_vport(const struct device *dev) | 2030 | static int scsi_is_fc_vport(const struct device *dev) |
2031 | { | 2031 | { |
2032 | return dev->release == fc_vport_dev_release; | 2032 | return dev->release == fc_vport_dev_release; |
2033 | } | 2033 | } |
2034 | EXPORT_SYMBOL(scsi_is_fc_vport); | ||
2035 | 2034 | ||
2036 | static int fc_vport_match(struct attribute_container *cont, | 2035 | static int fc_vport_match(struct attribute_container *cont, |
2037 | struct device *dev) | 2036 | struct device *dev) |
@@ -2110,7 +2109,8 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) | |||
2110 | if ((channel == rport->channel) && | 2109 | if ((channel == rport->channel) && |
2111 | (id == rport->scsi_target_id)) { | 2110 | (id == rport->scsi_target_id)) { |
2112 | spin_unlock_irqrestore(shost->host_lock, flags); | 2111 | spin_unlock_irqrestore(shost->host_lock, flags); |
2113 | scsi_scan_target(&rport->dev, channel, id, lun, 1); | 2112 | scsi_scan_target(&rport->dev, channel, id, lun, |
2113 | SCSI_SCAN_MANUAL); | ||
2114 | return; | 2114 | return; |
2115 | } | 2115 | } |
2116 | } | 2116 | } |
@@ -3277,7 +3277,8 @@ fc_scsi_scan_rport(struct work_struct *work) | |||
3277 | (rport->roles & FC_PORT_ROLE_FCP_TARGET) && | 3277 | (rport->roles & FC_PORT_ROLE_FCP_TARGET) && |
3278 | !(i->f->disable_target_scan)) { | 3278 | !(i->f->disable_target_scan)) { |
3279 | scsi_scan_target(&rport->dev, rport->channel, | 3279 | scsi_scan_target(&rport->dev, rport->channel, |
3280 | rport->scsi_target_id, SCAN_WILD_CARD, 1); | 3280 | rport->scsi_target_id, SCAN_WILD_CARD, |
3281 | SCSI_SCAN_RESCAN); | ||
3281 | } | 3282 | } |
3282 | 3283 | ||
3283 | spin_lock_irqsave(shost->host_lock, flags); | 3284 | spin_lock_irqsave(shost->host_lock, flags); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 441481623fb9..42bca619f854 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev) | |||
1009 | kfree(fnode_sess); | 1009 | kfree(fnode_sess); |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | struct device_type iscsi_flashnode_sess_dev_type = { | 1012 | static struct device_type iscsi_flashnode_sess_dev_type = { |
1013 | .name = "iscsi_flashnode_sess_dev_type", | 1013 | .name = "iscsi_flashnode_sess_dev_type", |
1014 | .groups = iscsi_flashnode_sess_attr_groups, | 1014 | .groups = iscsi_flashnode_sess_attr_groups, |
1015 | .release = iscsi_flashnode_sess_release, | 1015 | .release = iscsi_flashnode_sess_release, |
@@ -1195,13 +1195,13 @@ static void iscsi_flashnode_conn_release(struct device *dev) | |||
1195 | kfree(fnode_conn); | 1195 | kfree(fnode_conn); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | struct device_type iscsi_flashnode_conn_dev_type = { | 1198 | static struct device_type iscsi_flashnode_conn_dev_type = { |
1199 | .name = "iscsi_flashnode_conn_dev_type", | 1199 | .name = "iscsi_flashnode_conn_dev_type", |
1200 | .groups = iscsi_flashnode_conn_attr_groups, | 1200 | .groups = iscsi_flashnode_conn_attr_groups, |
1201 | .release = iscsi_flashnode_conn_release, | 1201 | .release = iscsi_flashnode_conn_release, |
1202 | }; | 1202 | }; |
1203 | 1203 | ||
1204 | struct bus_type iscsi_flashnode_bus; | 1204 | static struct bus_type iscsi_flashnode_bus; |
1205 | 1205 | ||
1206 | int iscsi_flashnode_bus_match(struct device *dev, | 1206 | int iscsi_flashnode_bus_match(struct device *dev, |
1207 | struct device_driver *drv) | 1207 | struct device_driver *drv) |
@@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev, | |||
1212 | } | 1212 | } |
1213 | EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); | 1213 | EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); |
1214 | 1214 | ||
1215 | struct bus_type iscsi_flashnode_bus = { | 1215 | static struct bus_type iscsi_flashnode_bus = { |
1216 | .name = "iscsi_flashnode", | 1216 | .name = "iscsi_flashnode", |
1217 | .match = &iscsi_flashnode_bus_match, | 1217 | .match = &iscsi_flashnode_bus_match, |
1218 | }; | 1218 | }; |
@@ -1324,11 +1324,10 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); | |||
1324 | * 1 on success | 1324 | * 1 on success |
1325 | * 0 on failure | 1325 | * 0 on failure |
1326 | */ | 1326 | */ |
1327 | int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) | 1327 | static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) |
1328 | { | 1328 | { |
1329 | return dev->bus == &iscsi_flashnode_bus; | 1329 | return dev->bus == &iscsi_flashnode_bus; |
1330 | } | 1330 | } |
1331 | EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev); | ||
1332 | 1331 | ||
1333 | static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) | 1332 | static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) |
1334 | { | 1333 | { |
@@ -1783,6 +1782,7 @@ struct iscsi_scan_data { | |||
1783 | unsigned int channel; | 1782 | unsigned int channel; |
1784 | unsigned int id; | 1783 | unsigned int id; |
1785 | u64 lun; | 1784 | u64 lun; |
1785 | enum scsi_scan_mode rescan; | ||
1786 | }; | 1786 | }; |
1787 | 1787 | ||
1788 | static int iscsi_user_scan_session(struct device *dev, void *data) | 1788 | static int iscsi_user_scan_session(struct device *dev, void *data) |
@@ -1819,7 +1819,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data) | |||
1819 | (scan_data->id == SCAN_WILD_CARD || | 1819 | (scan_data->id == SCAN_WILD_CARD || |
1820 | scan_data->id == id)) | 1820 | scan_data->id == id)) |
1821 | scsi_scan_target(&session->dev, 0, id, | 1821 | scsi_scan_target(&session->dev, 0, id, |
1822 | scan_data->lun, 1); | 1822 | scan_data->lun, scan_data->rescan); |
1823 | } | 1823 | } |
1824 | 1824 | ||
1825 | user_scan_exit: | 1825 | user_scan_exit: |
@@ -1836,6 +1836,7 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, | |||
1836 | scan_data.channel = channel; | 1836 | scan_data.channel = channel; |
1837 | scan_data.id = id; | 1837 | scan_data.id = id; |
1838 | scan_data.lun = lun; | 1838 | scan_data.lun = lun; |
1839 | scan_data.rescan = SCSI_SCAN_MANUAL; | ||
1839 | 1840 | ||
1840 | return device_for_each_child(&shost->shost_gendev, &scan_data, | 1841 | return device_for_each_child(&shost->shost_gendev, &scan_data, |
1841 | iscsi_user_scan_session); | 1842 | iscsi_user_scan_session); |
@@ -1852,6 +1853,7 @@ static void iscsi_scan_session(struct work_struct *work) | |||
1852 | scan_data.channel = 0; | 1853 | scan_data.channel = 0; |
1853 | scan_data.id = SCAN_WILD_CARD; | 1854 | scan_data.id = SCAN_WILD_CARD; |
1854 | scan_data.lun = SCAN_WILD_CARD; | 1855 | scan_data.lun = SCAN_WILD_CARD; |
1856 | scan_data.rescan = SCSI_SCAN_RESCAN; | ||
1855 | 1857 | ||
1856 | iscsi_user_scan_session(&session->dev, &scan_data); | 1858 | iscsi_user_scan_session(&session->dev, &scan_data); |
1857 | atomic_dec(&ihost->nr_scans); | 1859 | atomic_dec(&ihost->nr_scans); |
@@ -2067,13 +2069,10 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session); | |||
2067 | 2069 | ||
2068 | int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) | 2070 | int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) |
2069 | { | 2071 | { |
2070 | struct Scsi_Host *shost = iscsi_session_to_shost(session); | ||
2071 | struct iscsi_cls_host *ihost; | ||
2072 | unsigned long flags; | 2072 | unsigned long flags; |
2073 | int id = 0; | 2073 | int id = 0; |
2074 | int err; | 2074 | int err; |
2075 | 2075 | ||
2076 | ihost = shost->shost_data; | ||
2077 | session->sid = atomic_add_return(1, &iscsi_session_nr); | 2076 | session->sid = atomic_add_return(1, &iscsi_session_nr); |
2078 | 2077 | ||
2079 | if (target_id == ISCSI_MAX_TARGET) { | 2078 | if (target_id == ISCSI_MAX_TARGET) { |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index b6f958193dad..3f0ff072184b 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -1614,7 +1614,8 @@ int sas_rphy_add(struct sas_rphy *rphy) | |||
1614 | else | 1614 | else |
1615 | lun = 0; | 1615 | lun = 0; |
1616 | 1616 | ||
1617 | scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0); | 1617 | scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, |
1618 | SCSI_SCAN_INITIAL); | ||
1618 | } | 1619 | } |
1619 | 1620 | ||
1620 | return 0; | 1621 | return 0; |
@@ -1739,8 +1740,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, | |||
1739 | 1740 | ||
1740 | if ((channel == SCAN_WILD_CARD || channel == 0) && | 1741 | if ((channel == SCAN_WILD_CARD || channel == 0) && |
1741 | (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { | 1742 | (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { |
1742 | scsi_scan_target(&rphy->dev, 0, | 1743 | scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, |
1743 | rphy->scsi_target_id, lun, 1); | 1744 | lun, SCSI_SCAN_MANUAL); |
1744 | } | 1745 | } |
1745 | } | 1746 | } |
1746 | mutex_unlock(&sas_host->lock); | 1747 | mutex_unlock(&sas_host->lock); |
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h new file mode 100644 index 000000000000..e4e1dccd1f2f --- /dev/null +++ b/drivers/scsi/sense_codes.h | |||
@@ -0,0 +1,826 @@ | |||
1 | /* | ||
2 | * The canonical list of T10 Additional Sense Codes is available at: | ||
3 | * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] | ||
4 | */ | ||
5 | |||
6 | SENSE_CODE(0x0000, "No additional sense information") | ||
7 | SENSE_CODE(0x0001, "Filemark detected") | ||
8 | SENSE_CODE(0x0002, "End-of-partition/medium detected") | ||
9 | SENSE_CODE(0x0003, "Setmark detected") | ||
10 | SENSE_CODE(0x0004, "Beginning-of-partition/medium detected") | ||
11 | SENSE_CODE(0x0005, "End-of-data detected") | ||
12 | SENSE_CODE(0x0006, "I/O process terminated") | ||
13 | SENSE_CODE(0x0007, "Programmable early warning detected") | ||
14 | SENSE_CODE(0x0011, "Audio play operation in progress") | ||
15 | SENSE_CODE(0x0012, "Audio play operation paused") | ||
16 | SENSE_CODE(0x0013, "Audio play operation successfully completed") | ||
17 | SENSE_CODE(0x0014, "Audio play operation stopped due to error") | ||
18 | SENSE_CODE(0x0015, "No current audio status to return") | ||
19 | SENSE_CODE(0x0016, "Operation in progress") | ||
20 | SENSE_CODE(0x0017, "Cleaning requested") | ||
21 | SENSE_CODE(0x0018, "Erase operation in progress") | ||
22 | SENSE_CODE(0x0019, "Locate operation in progress") | ||
23 | SENSE_CODE(0x001A, "Rewind operation in progress") | ||
24 | SENSE_CODE(0x001B, "Set capacity operation in progress") | ||
25 | SENSE_CODE(0x001C, "Verify operation in progress") | ||
26 | SENSE_CODE(0x001D, "ATA pass through information available") | ||
27 | SENSE_CODE(0x001E, "Conflicting SA creation request") | ||
28 | SENSE_CODE(0x001F, "Logical unit transitioning to another power condition") | ||
29 | SENSE_CODE(0x0020, "Extended copy information available") | ||
30 | SENSE_CODE(0x0021, "Atomic command aborted due to ACA") | ||
31 | |||
32 | SENSE_CODE(0x0100, "No index/sector signal") | ||
33 | |||
34 | SENSE_CODE(0x0200, "No seek complete") | ||
35 | |||
36 | SENSE_CODE(0x0300, "Peripheral device write fault") | ||
37 | SENSE_CODE(0x0301, "No write current") | ||
38 | SENSE_CODE(0x0302, "Excessive write errors") | ||
39 | |||
40 | SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable") | ||
41 | SENSE_CODE(0x0401, "Logical unit is in process of becoming ready") | ||
42 | SENSE_CODE(0x0402, "Logical unit not ready, initializing command required") | ||
43 | SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required") | ||
44 | SENSE_CODE(0x0404, "Logical unit not ready, format in progress") | ||
45 | SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress") | ||
46 | SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress") | ||
47 | SENSE_CODE(0x0407, "Logical unit not ready, operation in progress") | ||
48 | SENSE_CODE(0x0408, "Logical unit not ready, long write in progress") | ||
49 | SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress") | ||
50 | SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition") | ||
51 | SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state") | ||
52 | SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state") | ||
53 | SENSE_CODE(0x040D, "Logical unit not ready, structure check required") | ||
54 | SENSE_CODE(0x040E, "Logical unit not ready, security session in progress") | ||
55 | SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible") | ||
56 | SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required") | ||
57 | SENSE_CODE(0x0412, "Logical unit not ready, offline") | ||
58 | SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress") | ||
59 | SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress") | ||
60 | SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled") | ||
61 | SENSE_CODE(0x0416, "Logical unit not ready, configuration required") | ||
62 | SENSE_CODE(0x0417, "Logical unit not ready, calibration required") | ||
63 | SENSE_CODE(0x0418, "Logical unit not ready, a door is open") | ||
64 | SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode") | ||
65 | SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress") | ||
66 | SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress") | ||
67 | SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted") | ||
68 | SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress") | ||
69 | SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required") | ||
70 | SENSE_CODE(0x041F, "Logical unit not ready, microcode download required") | ||
71 | SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required") | ||
72 | SENSE_CODE(0x0421, "Logical unit not ready, hard reset required") | ||
73 | SENSE_CODE(0x0422, "Logical unit not ready, power cycle required") | ||
74 | |||
75 | SENSE_CODE(0x0500, "Logical unit does not respond to selection") | ||
76 | |||
77 | SENSE_CODE(0x0600, "No reference position found") | ||
78 | |||
79 | SENSE_CODE(0x0700, "Multiple peripheral devices selected") | ||
80 | |||
81 | SENSE_CODE(0x0800, "Logical unit communication failure") | ||
82 | SENSE_CODE(0x0801, "Logical unit communication time-out") | ||
83 | SENSE_CODE(0x0802, "Logical unit communication parity error") | ||
84 | SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)") | ||
85 | SENSE_CODE(0x0804, "Unreachable copy target") | ||
86 | |||
87 | SENSE_CODE(0x0900, "Track following error") | ||
88 | SENSE_CODE(0x0901, "Tracking servo failure") | ||
89 | SENSE_CODE(0x0902, "Focus servo failure") | ||
90 | SENSE_CODE(0x0903, "Spindle servo failure") | ||
91 | SENSE_CODE(0x0904, "Head select fault") | ||
92 | SENSE_CODE(0x0905, "Vibration induced tracking error") | ||
93 | |||
94 | SENSE_CODE(0x0A00, "Error log overflow") | ||
95 | |||
96 | SENSE_CODE(0x0B00, "Warning") | ||
97 | SENSE_CODE(0x0B01, "Warning - specified temperature exceeded") | ||
98 | SENSE_CODE(0x0B02, "Warning - enclosure degraded") | ||
99 | SENSE_CODE(0x0B03, "Warning - background self-test failed") | ||
100 | SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error") | ||
101 | SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error") | ||
102 | SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile") | ||
103 | SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache") | ||
104 | SENSE_CODE(0x0B08, "Warning - power loss expected") | ||
105 | SENSE_CODE(0x0B09, "Warning - device statistics notification active") | ||
106 | |||
107 | SENSE_CODE(0x0C00, "Write error") | ||
108 | SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation") | ||
109 | SENSE_CODE(0x0C02, "Write error - auto reallocation failed") | ||
110 | SENSE_CODE(0x0C03, "Write error - recommend reassignment") | ||
111 | SENSE_CODE(0x0C04, "Compression check miscompare error") | ||
112 | SENSE_CODE(0x0C05, "Data expansion occurred during compression") | ||
113 | SENSE_CODE(0x0C06, "Block not compressible") | ||
114 | SENSE_CODE(0x0C07, "Write error - recovery needed") | ||
115 | SENSE_CODE(0x0C08, "Write error - recovery failed") | ||
116 | SENSE_CODE(0x0C09, "Write error - loss of streaming") | ||
117 | SENSE_CODE(0x0C0A, "Write error - padding blocks added") | ||
118 | SENSE_CODE(0x0C0B, "Auxiliary memory write error") | ||
119 | SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data") | ||
120 | SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data") | ||
121 | SENSE_CODE(0x0C0E, "Multiple write errors") | ||
122 | SENSE_CODE(0x0C0F, "Defects in error window") | ||
123 | SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations") | ||
124 | |||
125 | SENSE_CODE(0x0D00, "Error detected by third party temporary initiator") | ||
126 | SENSE_CODE(0x0D01, "Third party device failure") | ||
127 | SENSE_CODE(0x0D02, "Copy target device not reachable") | ||
128 | SENSE_CODE(0x0D03, "Incorrect copy target device type") | ||
129 | SENSE_CODE(0x0D04, "Copy target device data underrun") | ||
130 | SENSE_CODE(0x0D05, "Copy target device data overrun") | ||
131 | |||
132 | SENSE_CODE(0x0E00, "Invalid information unit") | ||
133 | SENSE_CODE(0x0E01, "Information unit too short") | ||
134 | SENSE_CODE(0x0E02, "Information unit too long") | ||
135 | SENSE_CODE(0x0E03, "Invalid field in command information unit") | ||
136 | |||
137 | SENSE_CODE(0x1000, "Id CRC or ECC error") | ||
138 | SENSE_CODE(0x1001, "Logical block guard check failed") | ||
139 | SENSE_CODE(0x1002, "Logical block application tag check failed") | ||
140 | SENSE_CODE(0x1003, "Logical block reference tag check failed") | ||
141 | SENSE_CODE(0x1004, "Logical block protection error on recover buffered data") | ||
142 | SENSE_CODE(0x1005, "Logical block protection method error") | ||
143 | |||
144 | SENSE_CODE(0x1100, "Unrecovered read error") | ||
145 | SENSE_CODE(0x1101, "Read retries exhausted") | ||
146 | SENSE_CODE(0x1102, "Error too long to correct") | ||
147 | SENSE_CODE(0x1103, "Multiple read errors") | ||
148 | SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed") | ||
149 | SENSE_CODE(0x1105, "L-EC uncorrectable error") | ||
150 | SENSE_CODE(0x1106, "CIRC unrecovered error") | ||
151 | SENSE_CODE(0x1107, "Data re-synchronization error") | ||
152 | SENSE_CODE(0x1108, "Incomplete block read") | ||
153 | SENSE_CODE(0x1109, "No gap found") | ||
154 | SENSE_CODE(0x110A, "Miscorrected error") | ||
155 | SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment") | ||
156 | SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data") | ||
157 | SENSE_CODE(0x110D, "De-compression CRC error") | ||
158 | SENSE_CODE(0x110E, "Cannot decompress using declared algorithm") | ||
159 | SENSE_CODE(0x110F, "Error reading UPC/EAN number") | ||
160 | SENSE_CODE(0x1110, "Error reading ISRC number") | ||
161 | SENSE_CODE(0x1111, "Read error - loss of streaming") | ||
162 | SENSE_CODE(0x1112, "Auxiliary memory read error") | ||
163 | SENSE_CODE(0x1113, "Read error - failed retransmission request") | ||
164 | SENSE_CODE(0x1114, "Read error - lba marked bad by application client") | ||
165 | SENSE_CODE(0x1115, "Write after sanitize required") | ||
166 | |||
167 | SENSE_CODE(0x1200, "Address mark not found for id field") | ||
168 | |||
169 | SENSE_CODE(0x1300, "Address mark not found for data field") | ||
170 | |||
171 | SENSE_CODE(0x1400, "Recorded entity not found") | ||
172 | SENSE_CODE(0x1401, "Record not found") | ||
173 | SENSE_CODE(0x1402, "Filemark or setmark not found") | ||
174 | SENSE_CODE(0x1403, "End-of-data not found") | ||
175 | SENSE_CODE(0x1404, "Block sequence error") | ||
176 | SENSE_CODE(0x1405, "Record not found - recommend reassignment") | ||
177 | SENSE_CODE(0x1406, "Record not found - data auto-reallocated") | ||
178 | SENSE_CODE(0x1407, "Locate operation failure") | ||
179 | |||
180 | SENSE_CODE(0x1500, "Random positioning error") | ||
181 | SENSE_CODE(0x1501, "Mechanical positioning error") | ||
182 | SENSE_CODE(0x1502, "Positioning error detected by read of medium") | ||
183 | |||
184 | SENSE_CODE(0x1600, "Data synchronization mark error") | ||
185 | SENSE_CODE(0x1601, "Data sync error - data rewritten") | ||
186 | SENSE_CODE(0x1602, "Data sync error - recommend rewrite") | ||
187 | SENSE_CODE(0x1603, "Data sync error - data auto-reallocated") | ||
188 | SENSE_CODE(0x1604, "Data sync error - recommend reassignment") | ||
189 | |||
190 | SENSE_CODE(0x1700, "Recovered data with no error correction applied") | ||
191 | SENSE_CODE(0x1701, "Recovered data with retries") | ||
192 | SENSE_CODE(0x1702, "Recovered data with positive head offset") | ||
193 | SENSE_CODE(0x1703, "Recovered data with negative head offset") | ||
194 | SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied") | ||
195 | SENSE_CODE(0x1705, "Recovered data using previous sector id") | ||
196 | SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated") | ||
197 | SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment") | ||
198 | SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite") | ||
199 | SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten") | ||
200 | |||
201 | SENSE_CODE(0x1800, "Recovered data with error correction applied") | ||
202 | SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied") | ||
203 | SENSE_CODE(0x1802, "Recovered data - data auto-reallocated") | ||
204 | SENSE_CODE(0x1803, "Recovered data with CIRC") | ||
205 | SENSE_CODE(0x1804, "Recovered data with L-EC") | ||
206 | SENSE_CODE(0x1805, "Recovered data - recommend reassignment") | ||
207 | SENSE_CODE(0x1806, "Recovered data - recommend rewrite") | ||
208 | SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten") | ||
209 | SENSE_CODE(0x1808, "Recovered data with linking") | ||
210 | |||
211 | SENSE_CODE(0x1900, "Defect list error") | ||
212 | SENSE_CODE(0x1901, "Defect list not available") | ||
213 | SENSE_CODE(0x1902, "Defect list error in primary list") | ||
214 | SENSE_CODE(0x1903, "Defect list error in grown list") | ||
215 | |||
216 | SENSE_CODE(0x1A00, "Parameter list length error") | ||
217 | |||
218 | SENSE_CODE(0x1B00, "Synchronous data transfer error") | ||
219 | |||
220 | SENSE_CODE(0x1C00, "Defect list not found") | ||
221 | SENSE_CODE(0x1C01, "Primary defect list not found") | ||
222 | SENSE_CODE(0x1C02, "Grown defect list not found") | ||
223 | |||
224 | SENSE_CODE(0x1D00, "Miscompare during verify operation") | ||
225 | SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA") | ||
226 | |||
227 | SENSE_CODE(0x1E00, "Recovered id with ECC correction") | ||
228 | |||
229 | SENSE_CODE(0x1F00, "Partial defect list transfer") | ||
230 | |||
231 | SENSE_CODE(0x2000, "Invalid command operation code") | ||
232 | SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled") | ||
233 | SENSE_CODE(0x2002, "Access denied - no access rights") | ||
234 | SENSE_CODE(0x2003, "Access denied - invalid mgmt id key") | ||
235 | SENSE_CODE(0x2004, "Illegal command while in write capable state") | ||
236 | SENSE_CODE(0x2005, "Obsolete") | ||
237 | SENSE_CODE(0x2006, "Illegal command while in explicit address mode") | ||
238 | SENSE_CODE(0x2007, "Illegal command while in implicit address mode") | ||
239 | SENSE_CODE(0x2008, "Access denied - enrollment conflict") | ||
240 | SENSE_CODE(0x2009, "Access denied - invalid LU identifier") | ||
241 | SENSE_CODE(0x200A, "Access denied - invalid proxy token") | ||
242 | SENSE_CODE(0x200B, "Access denied - ACL LUN conflict") | ||
243 | SENSE_CODE(0x200C, "Illegal command when not in append-only mode") | ||
244 | |||
245 | SENSE_CODE(0x2100, "Logical block address out of range") | ||
246 | SENSE_CODE(0x2101, "Invalid element address") | ||
247 | SENSE_CODE(0x2102, "Invalid address for write") | ||
248 | SENSE_CODE(0x2103, "Invalid write crossing layer jump") | ||
249 | SENSE_CODE(0x2104, "Unaligned write command") | ||
250 | SENSE_CODE(0x2105, "Write boundary violation") | ||
251 | SENSE_CODE(0x2106, "Attempt to read invalid data") | ||
252 | SENSE_CODE(0x2107, "Read boundary violation") | ||
253 | |||
254 | SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)") | ||
255 | |||
256 | SENSE_CODE(0x2300, "Invalid token operation, cause not reportable") | ||
257 | SENSE_CODE(0x2301, "Invalid token operation, unsupported token type") | ||
258 | SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported") | ||
259 | SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported") | ||
260 | SENSE_CODE(0x2304, "Invalid token operation, token unknown") | ||
261 | SENSE_CODE(0x2305, "Invalid token operation, token corrupt") | ||
262 | SENSE_CODE(0x2306, "Invalid token operation, token revoked") | ||
263 | SENSE_CODE(0x2307, "Invalid token operation, token expired") | ||
264 | SENSE_CODE(0x2308, "Invalid token operation, token cancelled") | ||
265 | SENSE_CODE(0x2309, "Invalid token operation, token deleted") | ||
266 | SENSE_CODE(0x230A, "Invalid token operation, invalid token length") | ||
267 | |||
268 | SENSE_CODE(0x2400, "Invalid field in cdb") | ||
269 | SENSE_CODE(0x2401, "CDB decryption error") | ||
270 | SENSE_CODE(0x2402, "Obsolete") | ||
271 | SENSE_CODE(0x2403, "Obsolete") | ||
272 | SENSE_CODE(0x2404, "Security audit value frozen") | ||
273 | SENSE_CODE(0x2405, "Security working key frozen") | ||
274 | SENSE_CODE(0x2406, "Nonce not unique") | ||
275 | SENSE_CODE(0x2407, "Nonce timestamp out of range") | ||
276 | SENSE_CODE(0x2408, "Invalid XCDB") | ||
277 | |||
278 | SENSE_CODE(0x2500, "Logical unit not supported") | ||
279 | |||
280 | SENSE_CODE(0x2600, "Invalid field in parameter list") | ||
281 | SENSE_CODE(0x2601, "Parameter not supported") | ||
282 | SENSE_CODE(0x2602, "Parameter value invalid") | ||
283 | SENSE_CODE(0x2603, "Threshold parameters not supported") | ||
284 | SENSE_CODE(0x2604, "Invalid release of persistent reservation") | ||
285 | SENSE_CODE(0x2605, "Data decryption error") | ||
286 | SENSE_CODE(0x2606, "Too many target descriptors") | ||
287 | SENSE_CODE(0x2607, "Unsupported target descriptor type code") | ||
288 | SENSE_CODE(0x2608, "Too many segment descriptors") | ||
289 | SENSE_CODE(0x2609, "Unsupported segment descriptor type code") | ||
290 | SENSE_CODE(0x260A, "Unexpected inexact segment") | ||
291 | SENSE_CODE(0x260B, "Inline data length exceeded") | ||
292 | SENSE_CODE(0x260C, "Invalid operation for copy source or destination") | ||
293 | SENSE_CODE(0x260D, "Copy segment granularity violation") | ||
294 | SENSE_CODE(0x260E, "Invalid parameter while port is enabled") | ||
295 | SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value") | ||
296 | SENSE_CODE(0x2610, "Data decryption key fail limit reached") | ||
297 | SENSE_CODE(0x2611, "Incomplete key-associated data set") | ||
298 | SENSE_CODE(0x2612, "Vendor specific key reference not found") | ||
299 | |||
300 | SENSE_CODE(0x2700, "Write protected") | ||
301 | SENSE_CODE(0x2701, "Hardware write protected") | ||
302 | SENSE_CODE(0x2702, "Logical unit software write protected") | ||
303 | SENSE_CODE(0x2703, "Associated write protect") | ||
304 | SENSE_CODE(0x2704, "Persistent write protect") | ||
305 | SENSE_CODE(0x2705, "Permanent write protect") | ||
306 | SENSE_CODE(0x2706, "Conditional write protect") | ||
307 | SENSE_CODE(0x2707, "Space allocation failed write protect") | ||
308 | SENSE_CODE(0x2708, "Zone is read only") | ||
309 | |||
310 | SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed") | ||
311 | SENSE_CODE(0x2801, "Import or export element accessed") | ||
312 | SENSE_CODE(0x2802, "Format-layer may have changed") | ||
313 | SENSE_CODE(0x2803, "Import/export element accessed, medium changed") | ||
314 | |||
315 | SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred") | ||
316 | SENSE_CODE(0x2901, "Power on occurred") | ||
317 | SENSE_CODE(0x2902, "Scsi bus reset occurred") | ||
318 | SENSE_CODE(0x2903, "Bus device reset function occurred") | ||
319 | SENSE_CODE(0x2904, "Device internal reset") | ||
320 | SENSE_CODE(0x2905, "Transceiver mode changed to single-ended") | ||
321 | SENSE_CODE(0x2906, "Transceiver mode changed to lvd") | ||
322 | SENSE_CODE(0x2907, "I_T nexus loss occurred") | ||
323 | |||
324 | SENSE_CODE(0x2A00, "Parameters changed") | ||
325 | SENSE_CODE(0x2A01, "Mode parameters changed") | ||
326 | SENSE_CODE(0x2A02, "Log parameters changed") | ||
327 | SENSE_CODE(0x2A03, "Reservations preempted") | ||
328 | SENSE_CODE(0x2A04, "Reservations released") | ||
329 | SENSE_CODE(0x2A05, "Registrations preempted") | ||
330 | SENSE_CODE(0x2A06, "Asymmetric access state changed") | ||
331 | SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed") | ||
332 | SENSE_CODE(0x2A08, "Priority changed") | ||
333 | SENSE_CODE(0x2A09, "Capacity data has changed") | ||
334 | SENSE_CODE(0x2A0A, "Error history I_T nexus cleared") | ||
335 | SENSE_CODE(0x2A0B, "Error history snapshot released") | ||
336 | SENSE_CODE(0x2A0C, "Error recovery attributes have changed") | ||
337 | SENSE_CODE(0x2A0D, "Data encryption capabilities changed") | ||
338 | SENSE_CODE(0x2A10, "Timestamp changed") | ||
339 | SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus") | ||
340 | SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event") | ||
341 | SENSE_CODE(0x2A13, "Data encryption key instance counter has changed") | ||
342 | SENSE_CODE(0x2A14, "SA creation capabilities data has changed") | ||
343 | SENSE_CODE(0x2A15, "Medium removal prevention preempted") | ||
344 | |||
345 | SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect") | ||
346 | |||
347 | SENSE_CODE(0x2C00, "Command sequence error") | ||
348 | SENSE_CODE(0x2C01, "Too many windows specified") | ||
349 | SENSE_CODE(0x2C02, "Invalid combination of windows specified") | ||
350 | SENSE_CODE(0x2C03, "Current program area is not empty") | ||
351 | SENSE_CODE(0x2C04, "Current program area is empty") | ||
352 | SENSE_CODE(0x2C05, "Illegal power condition request") | ||
353 | SENSE_CODE(0x2C06, "Persistent prevent conflict") | ||
354 | SENSE_CODE(0x2C07, "Previous busy status") | ||
355 | SENSE_CODE(0x2C08, "Previous task set full status") | ||
356 | SENSE_CODE(0x2C09, "Previous reservation conflict status") | ||
357 | SENSE_CODE(0x2C0A, "Partition or collection contains user objects") | ||
358 | SENSE_CODE(0x2C0B, "Not reserved") | ||
359 | SENSE_CODE(0x2C0C, "Orwrite generation does not match") | ||
360 | SENSE_CODE(0x2C0D, "Reset write pointer not allowed") | ||
361 | SENSE_CODE(0x2C0E, "Zone is offline") | ||
362 | |||
363 | SENSE_CODE(0x2D00, "Overwrite error on update in place") | ||
364 | |||
365 | SENSE_CODE(0x2E00, "Insufficient time for operation") | ||
366 | SENSE_CODE(0x2E01, "Command timeout before processing") | ||
367 | SENSE_CODE(0x2E02, "Command timeout during processing") | ||
368 | SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery") | ||
369 | |||
370 | SENSE_CODE(0x2F00, "Commands cleared by another initiator") | ||
371 | SENSE_CODE(0x2F01, "Commands cleared by power loss notification") | ||
372 | SENSE_CODE(0x2F02, "Commands cleared by device server") | ||
373 | SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event") | ||
374 | |||
375 | SENSE_CODE(0x3000, "Incompatible medium installed") | ||
376 | SENSE_CODE(0x3001, "Cannot read medium - unknown format") | ||
377 | SENSE_CODE(0x3002, "Cannot read medium - incompatible format") | ||
378 | SENSE_CODE(0x3003, "Cleaning cartridge installed") | ||
379 | SENSE_CODE(0x3004, "Cannot write medium - unknown format") | ||
380 | SENSE_CODE(0x3005, "Cannot write medium - incompatible format") | ||
381 | SENSE_CODE(0x3006, "Cannot format medium - incompatible medium") | ||
382 | SENSE_CODE(0x3007, "Cleaning failure") | ||
383 | SENSE_CODE(0x3008, "Cannot write - application code mismatch") | ||
384 | SENSE_CODE(0x3009, "Current session not fixated for append") | ||
385 | SENSE_CODE(0x300A, "Cleaning request rejected") | ||
386 | SENSE_CODE(0x300C, "WORM medium - overwrite attempted") | ||
387 | SENSE_CODE(0x300D, "WORM medium - integrity check") | ||
388 | SENSE_CODE(0x3010, "Medium not formatted") | ||
389 | SENSE_CODE(0x3011, "Incompatible volume type") | ||
390 | SENSE_CODE(0x3012, "Incompatible volume qualifier") | ||
391 | SENSE_CODE(0x3013, "Cleaning volume expired") | ||
392 | |||
393 | SENSE_CODE(0x3100, "Medium format corrupted") | ||
394 | SENSE_CODE(0x3101, "Format command failed") | ||
395 | SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking") | ||
396 | SENSE_CODE(0x3103, "Sanitize command failed") | ||
397 | |||
398 | SENSE_CODE(0x3200, "No defect spare location available") | ||
399 | SENSE_CODE(0x3201, "Defect list update failure") | ||
400 | |||
401 | SENSE_CODE(0x3300, "Tape length error") | ||
402 | |||
403 | SENSE_CODE(0x3400, "Enclosure failure") | ||
404 | |||
405 | SENSE_CODE(0x3500, "Enclosure services failure") | ||
406 | SENSE_CODE(0x3501, "Unsupported enclosure function") | ||
407 | SENSE_CODE(0x3502, "Enclosure services unavailable") | ||
408 | SENSE_CODE(0x3503, "Enclosure services transfer failure") | ||
409 | SENSE_CODE(0x3504, "Enclosure services transfer refused") | ||
410 | SENSE_CODE(0x3505, "Enclosure services checksum error") | ||
411 | |||
412 | SENSE_CODE(0x3600, "Ribbon, ink, or toner failure") | ||
413 | |||
414 | SENSE_CODE(0x3700, "Rounded parameter") | ||
415 | |||
416 | SENSE_CODE(0x3800, "Event status notification") | ||
417 | SENSE_CODE(0x3802, "Esn - power management class event") | ||
418 | SENSE_CODE(0x3804, "Esn - media class event") | ||
419 | SENSE_CODE(0x3806, "Esn - device busy class event") | ||
420 | SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached") | ||
421 | |||
422 | SENSE_CODE(0x3900, "Saving parameters not supported") | ||
423 | |||
424 | SENSE_CODE(0x3A00, "Medium not present") | ||
425 | SENSE_CODE(0x3A01, "Medium not present - tray closed") | ||
426 | SENSE_CODE(0x3A02, "Medium not present - tray open") | ||
427 | SENSE_CODE(0x3A03, "Medium not present - loadable") | ||
428 | SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible") | ||
429 | |||
430 | SENSE_CODE(0x3B00, "Sequential positioning error") | ||
431 | SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium") | ||
432 | SENSE_CODE(0x3B02, "Tape position error at end-of-medium") | ||
433 | SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready") | ||
434 | SENSE_CODE(0x3B04, "Slew failure") | ||
435 | SENSE_CODE(0x3B05, "Paper jam") | ||
436 | SENSE_CODE(0x3B06, "Failed to sense top-of-form") | ||
437 | SENSE_CODE(0x3B07, "Failed to sense bottom-of-form") | ||
438 | SENSE_CODE(0x3B08, "Reposition error") | ||
439 | SENSE_CODE(0x3B09, "Read past end of medium") | ||
440 | SENSE_CODE(0x3B0A, "Read past beginning of medium") | ||
441 | SENSE_CODE(0x3B0B, "Position past end of medium") | ||
442 | SENSE_CODE(0x3B0C, "Position past beginning of medium") | ||
443 | SENSE_CODE(0x3B0D, "Medium destination element full") | ||
444 | SENSE_CODE(0x3B0E, "Medium source element empty") | ||
445 | SENSE_CODE(0x3B0F, "End of medium reached") | ||
446 | SENSE_CODE(0x3B11, "Medium magazine not accessible") | ||
447 | SENSE_CODE(0x3B12, "Medium magazine removed") | ||
448 | SENSE_CODE(0x3B13, "Medium magazine inserted") | ||
449 | SENSE_CODE(0x3B14, "Medium magazine locked") | ||
450 | SENSE_CODE(0x3B15, "Medium magazine unlocked") | ||
451 | SENSE_CODE(0x3B16, "Mechanical positioning or changer error") | ||
452 | SENSE_CODE(0x3B17, "Read past end of user object") | ||
453 | SENSE_CODE(0x3B18, "Element disabled") | ||
454 | SENSE_CODE(0x3B19, "Element enabled") | ||
455 | SENSE_CODE(0x3B1A, "Data transfer device removed") | ||
456 | SENSE_CODE(0x3B1B, "Data transfer device inserted") | ||
457 | SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation") | ||
458 | |||
459 | SENSE_CODE(0x3D00, "Invalid bits in identify message") | ||
460 | |||
461 | SENSE_CODE(0x3E00, "Logical unit has not self-configured yet") | ||
462 | SENSE_CODE(0x3E01, "Logical unit failure") | ||
463 | SENSE_CODE(0x3E02, "Timeout on logical unit") | ||
464 | SENSE_CODE(0x3E03, "Logical unit failed self-test") | ||
465 | SENSE_CODE(0x3E04, "Logical unit unable to update self-test log") | ||
466 | |||
467 | SENSE_CODE(0x3F00, "Target operating conditions have changed") | ||
468 | SENSE_CODE(0x3F01, "Microcode has been changed") | ||
469 | SENSE_CODE(0x3F02, "Changed operating definition") | ||
470 | SENSE_CODE(0x3F03, "Inquiry data has changed") | ||
471 | SENSE_CODE(0x3F04, "Component device attached") | ||
472 | SENSE_CODE(0x3F05, "Device identifier changed") | ||
473 | SENSE_CODE(0x3F06, "Redundancy group created or modified") | ||
474 | SENSE_CODE(0x3F07, "Redundancy group deleted") | ||
475 | SENSE_CODE(0x3F08, "Spare created or modified") | ||
476 | SENSE_CODE(0x3F09, "Spare deleted") | ||
477 | SENSE_CODE(0x3F0A, "Volume set created or modified") | ||
478 | SENSE_CODE(0x3F0B, "Volume set deleted") | ||
479 | SENSE_CODE(0x3F0C, "Volume set deassigned") | ||
480 | SENSE_CODE(0x3F0D, "Volume set reassigned") | ||
481 | SENSE_CODE(0x3F0E, "Reported luns data has changed") | ||
482 | SENSE_CODE(0x3F0F, "Echo buffer overwritten") | ||
483 | SENSE_CODE(0x3F10, "Medium loadable") | ||
484 | SENSE_CODE(0x3F11, "Medium auxiliary memory accessible") | ||
485 | SENSE_CODE(0x3F12, "iSCSI IP address added") | ||
486 | SENSE_CODE(0x3F13, "iSCSI IP address removed") | ||
487 | SENSE_CODE(0x3F14, "iSCSI IP address changed") | ||
488 | SENSE_CODE(0x3F15, "Inspect referrals sense descriptors") | ||
489 | SENSE_CODE(0x3F16, "Microcode has been changed without reset") | ||
490 | /* | ||
491 | * SENSE_CODE(0x40NN, "Ram failure") | ||
492 | * SENSE_CODE(0x40NN, "Diagnostic failure on component nn") | ||
493 | * SENSE_CODE(0x41NN, "Data path failure") | ||
494 | * SENSE_CODE(0x42NN, "Power-on or self-test failure") | ||
495 | */ | ||
496 | SENSE_CODE(0x4300, "Message error") | ||
497 | |||
498 | SENSE_CODE(0x4400, "Internal target failure") | ||
499 | SENSE_CODE(0x4401, "Persistent reservation information lost") | ||
500 | SENSE_CODE(0x4471, "ATA device failed set features") | ||
501 | |||
502 | SENSE_CODE(0x4500, "Select or reselect failure") | ||
503 | |||
504 | SENSE_CODE(0x4600, "Unsuccessful soft reset") | ||
505 | |||
506 | SENSE_CODE(0x4700, "Scsi parity error") | ||
507 | SENSE_CODE(0x4701, "Data phase CRC error detected") | ||
508 | SENSE_CODE(0x4702, "Scsi parity error detected during st data phase") | ||
509 | SENSE_CODE(0x4703, "Information unit iuCRC error detected") | ||
510 | SENSE_CODE(0x4704, "Asynchronous information protection error detected") | ||
511 | SENSE_CODE(0x4705, "Protocol service CRC error") | ||
512 | SENSE_CODE(0x4706, "Phy test function in progress") | ||
513 | SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event") | ||
514 | |||
515 | SENSE_CODE(0x4800, "Initiator detected error message received") | ||
516 | |||
517 | SENSE_CODE(0x4900, "Invalid message error") | ||
518 | |||
519 | SENSE_CODE(0x4A00, "Command phase error") | ||
520 | |||
521 | SENSE_CODE(0x4B00, "Data phase error") | ||
522 | SENSE_CODE(0x4B01, "Invalid target port transfer tag received") | ||
523 | SENSE_CODE(0x4B02, "Too much write data") | ||
524 | SENSE_CODE(0x4B03, "Ack/nak timeout") | ||
525 | SENSE_CODE(0x4B04, "Nak received") | ||
526 | SENSE_CODE(0x4B05, "Data offset error") | ||
527 | SENSE_CODE(0x4B06, "Initiator response timeout") | ||
528 | SENSE_CODE(0x4B07, "Connection lost") | ||
529 | SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size") | ||
530 | SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area") | ||
531 | SENSE_CODE(0x4B0A, "Data-in buffer error") | ||
532 | SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size") | ||
533 | SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area") | ||
534 | SENSE_CODE(0x4B0D, "Data-out buffer error") | ||
535 | SENSE_CODE(0x4B0E, "PCIe fabric error") | ||
536 | SENSE_CODE(0x4B0F, "PCIe completion timeout") | ||
537 | SENSE_CODE(0x4B10, "PCIe completer abort") | ||
538 | SENSE_CODE(0x4B11, "PCIe poisoned tlp received") | ||
539 | SENSE_CODE(0x4B12, "PCIe eCRC check failed") | ||
540 | SENSE_CODE(0x4B13, "PCIe unsupported request") | ||
541 | SENSE_CODE(0x4B14, "PCIe acs violation") | ||
542 | SENSE_CODE(0x4B15, "PCIe tlp prefix blocked") | ||
543 | |||
544 | SENSE_CODE(0x4C00, "Logical unit failed self-configuration") | ||
545 | /* | ||
546 | * SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)") | ||
547 | */ | ||
548 | SENSE_CODE(0x4E00, "Overlapped commands attempted") | ||
549 | |||
550 | SENSE_CODE(0x5000, "Write append error") | ||
551 | SENSE_CODE(0x5001, "Write append position error") | ||
552 | SENSE_CODE(0x5002, "Position error related to timing") | ||
553 | |||
554 | SENSE_CODE(0x5100, "Erase failure") | ||
555 | SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected") | ||
556 | |||
557 | SENSE_CODE(0x5200, "Cartridge fault") | ||
558 | |||
559 | SENSE_CODE(0x5300, "Media load or eject failed") | ||
560 | SENSE_CODE(0x5301, "Unload tape failure") | ||
561 | SENSE_CODE(0x5302, "Medium removal prevented") | ||
562 | SENSE_CODE(0x5303, "Medium removal prevented by data transfer element") | ||
563 | SENSE_CODE(0x5304, "Medium thread or unthread failure") | ||
564 | SENSE_CODE(0x5305, "Volume identifier invalid") | ||
565 | SENSE_CODE(0x5306, "Volume identifier missing") | ||
566 | SENSE_CODE(0x5307, "Duplicate volume identifier") | ||
567 | SENSE_CODE(0x5308, "Element status unknown") | ||
568 | SENSE_CODE(0x5309, "Data transfer device error - load failed") | ||
569 | SENSE_CODE(0x530a, "Data transfer device error - unload failed") | ||
570 | SENSE_CODE(0x530b, "Data transfer device error - unload missing") | ||
571 | SENSE_CODE(0x530c, "Data transfer device error - eject failed") | ||
572 | SENSE_CODE(0x530d, "Data transfer device error - library communication failed") | ||
573 | |||
574 | SENSE_CODE(0x5400, "Scsi to host system interface failure") | ||
575 | |||
576 | SENSE_CODE(0x5500, "System resource failure") | ||
577 | SENSE_CODE(0x5501, "System buffer full") | ||
578 | SENSE_CODE(0x5502, "Insufficient reservation resources") | ||
579 | SENSE_CODE(0x5503, "Insufficient resources") | ||
580 | SENSE_CODE(0x5504, "Insufficient registration resources") | ||
581 | SENSE_CODE(0x5505, "Insufficient access control resources") | ||
582 | SENSE_CODE(0x5506, "Auxiliary memory out of space") | ||
583 | SENSE_CODE(0x5507, "Quota error") | ||
584 | SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded") | ||
585 | SENSE_CODE(0x5509, "Medium auxiliary memory not accessible") | ||
586 | SENSE_CODE(0x550A, "Data currently unavailable") | ||
587 | SENSE_CODE(0x550B, "Insufficient power for operation") | ||
588 | SENSE_CODE(0x550C, "Insufficient resources to create rod") | ||
589 | SENSE_CODE(0x550D, "Insufficient resources to create rod token") | ||
590 | SENSE_CODE(0x550E, "Insufficient zone resources") | ||
591 | |||
592 | SENSE_CODE(0x5700, "Unable to recover table-of-contents") | ||
593 | |||
594 | SENSE_CODE(0x5800, "Generation does not exist") | ||
595 | |||
596 | SENSE_CODE(0x5900, "Updated block read") | ||
597 | |||
598 | SENSE_CODE(0x5A00, "Operator request or state change input") | ||
599 | SENSE_CODE(0x5A01, "Operator medium removal request") | ||
600 | SENSE_CODE(0x5A02, "Operator selected write protect") | ||
601 | SENSE_CODE(0x5A03, "Operator selected write permit") | ||
602 | |||
603 | SENSE_CODE(0x5B00, "Log exception") | ||
604 | SENSE_CODE(0x5B01, "Threshold condition met") | ||
605 | SENSE_CODE(0x5B02, "Log counter at maximum") | ||
606 | SENSE_CODE(0x5B03, "Log list codes exhausted") | ||
607 | |||
608 | SENSE_CODE(0x5C00, "Rpl status change") | ||
609 | SENSE_CODE(0x5C01, "Spindles synchronized") | ||
610 | SENSE_CODE(0x5C02, "Spindles not synchronized") | ||
611 | |||
612 | SENSE_CODE(0x5D00, "Failure prediction threshold exceeded") | ||
613 | SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded") | ||
614 | SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded") | ||
615 | SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded") | ||
616 | SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure") | ||
617 | SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high") | ||
618 | SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high") | ||
619 | SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high") | ||
620 | SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns") | ||
621 | SENSE_CODE(0x5D15, "Hardware impending failure access times too high") | ||
622 | SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high") | ||
623 | SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics") | ||
624 | SENSE_CODE(0x5D18, "Hardware impending failure controller detected") | ||
625 | SENSE_CODE(0x5D19, "Hardware impending failure throughput performance") | ||
626 | SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance") | ||
627 | SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count") | ||
628 | SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count") | ||
629 | SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure") | ||
630 | SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high") | ||
631 | SENSE_CODE(0x5D22, "Controller impending failure data error rate too high") | ||
632 | SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high") | ||
633 | SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns") | ||
634 | SENSE_CODE(0x5D25, "Controller impending failure access times too high") | ||
635 | SENSE_CODE(0x5D26, "Controller impending failure start unit times too high") | ||
636 | SENSE_CODE(0x5D27, "Controller impending failure channel parametrics") | ||
637 | SENSE_CODE(0x5D28, "Controller impending failure controller detected") | ||
638 | SENSE_CODE(0x5D29, "Controller impending failure throughput performance") | ||
639 | SENSE_CODE(0x5D2A, "Controller impending failure seek time performance") | ||
640 | SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count") | ||
641 | SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count") | ||
642 | SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure") | ||
643 | SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high") | ||
644 | SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high") | ||
645 | SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high") | ||
646 | SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns") | ||
647 | SENSE_CODE(0x5D35, "Data channel impending failure access times too high") | ||
648 | SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high") | ||
649 | SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics") | ||
650 | SENSE_CODE(0x5D38, "Data channel impending failure controller detected") | ||
651 | SENSE_CODE(0x5D39, "Data channel impending failure throughput performance") | ||
652 | SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance") | ||
653 | SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count") | ||
654 | SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count") | ||
655 | SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure") | ||
656 | SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high") | ||
657 | SENSE_CODE(0x5D42, "Servo impending failure data error rate too high") | ||
658 | SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high") | ||
659 | SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns") | ||
660 | SENSE_CODE(0x5D45, "Servo impending failure access times too high") | ||
661 | SENSE_CODE(0x5D46, "Servo impending failure start unit times too high") | ||
662 | SENSE_CODE(0x5D47, "Servo impending failure channel parametrics") | ||
663 | SENSE_CODE(0x5D48, "Servo impending failure controller detected") | ||
664 | SENSE_CODE(0x5D49, "Servo impending failure throughput performance") | ||
665 | SENSE_CODE(0x5D4A, "Servo impending failure seek time performance") | ||
666 | SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count") | ||
667 | SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count") | ||
668 | SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure") | ||
669 | SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high") | ||
670 | SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high") | ||
671 | SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high") | ||
672 | SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns") | ||
673 | SENSE_CODE(0x5D55, "Spindle impending failure access times too high") | ||
674 | SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high") | ||
675 | SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics") | ||
676 | SENSE_CODE(0x5D58, "Spindle impending failure controller detected") | ||
677 | SENSE_CODE(0x5D59, "Spindle impending failure throughput performance") | ||
678 | SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance") | ||
679 | SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count") | ||
680 | SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count") | ||
681 | SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure") | ||
682 | SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high") | ||
683 | SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high") | ||
684 | SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high") | ||
685 | SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns") | ||
686 | SENSE_CODE(0x5D65, "Firmware impending failure access times too high") | ||
687 | SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high") | ||
688 | SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics") | ||
689 | SENSE_CODE(0x5D68, "Firmware impending failure controller detected") | ||
690 | SENSE_CODE(0x5D69, "Firmware impending failure throughput performance") | ||
691 | SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance") | ||
692 | SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count") | ||
693 | SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count") | ||
694 | SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)") | ||
695 | |||
696 | SENSE_CODE(0x5E00, "Low power condition on") | ||
697 | SENSE_CODE(0x5E01, "Idle condition activated by timer") | ||
698 | SENSE_CODE(0x5E02, "Standby condition activated by timer") | ||
699 | SENSE_CODE(0x5E03, "Idle condition activated by command") | ||
700 | SENSE_CODE(0x5E04, "Standby condition activated by command") | ||
701 | SENSE_CODE(0x5E05, "Idle_b condition activated by timer") | ||
702 | SENSE_CODE(0x5E06, "Idle_b condition activated by command") | ||
703 | SENSE_CODE(0x5E07, "Idle_c condition activated by timer") | ||
704 | SENSE_CODE(0x5E08, "Idle_c condition activated by command") | ||
705 | SENSE_CODE(0x5E09, "Standby_y condition activated by timer") | ||
706 | SENSE_CODE(0x5E0A, "Standby_y condition activated by command") | ||
707 | SENSE_CODE(0x5E41, "Power state change to active") | ||
708 | SENSE_CODE(0x5E42, "Power state change to idle") | ||
709 | SENSE_CODE(0x5E43, "Power state change to standby") | ||
710 | SENSE_CODE(0x5E45, "Power state change to sleep") | ||
711 | SENSE_CODE(0x5E47, "Power state change to device control") | ||
712 | |||
713 | SENSE_CODE(0x6000, "Lamp failure") | ||
714 | |||
715 | SENSE_CODE(0x6100, "Video acquisition error") | ||
716 | SENSE_CODE(0x6101, "Unable to acquire video") | ||
717 | SENSE_CODE(0x6102, "Out of focus") | ||
718 | |||
719 | SENSE_CODE(0x6200, "Scan head positioning error") | ||
720 | |||
721 | SENSE_CODE(0x6300, "End of user area encountered on this track") | ||
722 | SENSE_CODE(0x6301, "Packet does not fit in available space") | ||
723 | |||
724 | SENSE_CODE(0x6400, "Illegal mode for this track") | ||
725 | SENSE_CODE(0x6401, "Invalid packet size") | ||
726 | |||
727 | SENSE_CODE(0x6500, "Voltage fault") | ||
728 | |||
729 | SENSE_CODE(0x6600, "Automatic document feeder cover up") | ||
730 | SENSE_CODE(0x6601, "Automatic document feeder lift up") | ||
731 | SENSE_CODE(0x6602, "Document jam in automatic document feeder") | ||
732 | SENSE_CODE(0x6603, "Document miss feed automatic in document feeder") | ||
733 | |||
734 | SENSE_CODE(0x6700, "Configuration failure") | ||
735 | SENSE_CODE(0x6701, "Configuration of incapable logical units failed") | ||
736 | SENSE_CODE(0x6702, "Add logical unit failed") | ||
737 | SENSE_CODE(0x6703, "Modification of logical unit failed") | ||
738 | SENSE_CODE(0x6704, "Exchange of logical unit failed") | ||
739 | SENSE_CODE(0x6705, "Remove of logical unit failed") | ||
740 | SENSE_CODE(0x6706, "Attachment of logical unit failed") | ||
741 | SENSE_CODE(0x6707, "Creation of logical unit failed") | ||
742 | SENSE_CODE(0x6708, "Assign failure occurred") | ||
743 | SENSE_CODE(0x6709, "Multiply assigned logical unit") | ||
744 | SENSE_CODE(0x670A, "Set target port groups command failed") | ||
745 | SENSE_CODE(0x670B, "ATA device feature not enabled") | ||
746 | |||
747 | SENSE_CODE(0x6800, "Logical unit not configured") | ||
748 | SENSE_CODE(0x6801, "Subsidiary logical unit not configured") | ||
749 | |||
750 | SENSE_CODE(0x6900, "Data loss on logical unit") | ||
751 | SENSE_CODE(0x6901, "Multiple logical unit failures") | ||
752 | SENSE_CODE(0x6902, "Parity/data mismatch") | ||
753 | |||
754 | SENSE_CODE(0x6A00, "Informational, refer to log") | ||
755 | |||
756 | SENSE_CODE(0x6B00, "State change has occurred") | ||
757 | SENSE_CODE(0x6B01, "Redundancy level got better") | ||
758 | SENSE_CODE(0x6B02, "Redundancy level got worse") | ||
759 | |||
760 | SENSE_CODE(0x6C00, "Rebuild failure occurred") | ||
761 | |||
762 | SENSE_CODE(0x6D00, "Recalculate failure occurred") | ||
763 | |||
764 | SENSE_CODE(0x6E00, "Command to logical unit failed") | ||
765 | |||
766 | SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure") | ||
767 | SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present") | ||
768 | SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established") | ||
769 | SENSE_CODE(0x6F03, "Read of scrambled sector without authentication") | ||
770 | SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region") | ||
771 | SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error") | ||
772 | SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording") | ||
773 | SENSE_CODE(0x6F07, "Conflict in binding nonce recording") | ||
774 | /* | ||
775 | * SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn") | ||
776 | */ | ||
777 | SENSE_CODE(0x7100, "Decompression exception long algorithm id") | ||
778 | |||
779 | SENSE_CODE(0x7200, "Session fixation error") | ||
780 | SENSE_CODE(0x7201, "Session fixation error writing lead-in") | ||
781 | SENSE_CODE(0x7202, "Session fixation error writing lead-out") | ||
782 | SENSE_CODE(0x7203, "Session fixation error - incomplete track in session") | ||
783 | SENSE_CODE(0x7204, "Empty or partially written reserved track") | ||
784 | SENSE_CODE(0x7205, "No more track reservations allowed") | ||
785 | SENSE_CODE(0x7206, "RMZ extension is not allowed") | ||
786 | SENSE_CODE(0x7207, "No more test zone extensions are allowed") | ||
787 | |||
788 | SENSE_CODE(0x7300, "Cd control error") | ||
789 | SENSE_CODE(0x7301, "Power calibration area almost full") | ||
790 | SENSE_CODE(0x7302, "Power calibration area is full") | ||
791 | SENSE_CODE(0x7303, "Power calibration area error") | ||
792 | SENSE_CODE(0x7304, "Program memory area update failure") | ||
793 | SENSE_CODE(0x7305, "Program memory area is full") | ||
794 | SENSE_CODE(0x7306, "RMA/PMA is almost full") | ||
795 | SENSE_CODE(0x7310, "Current power calibration area almost full") | ||
796 | SENSE_CODE(0x7311, "Current power calibration area is full") | ||
797 | SENSE_CODE(0x7317, "RDZ is full") | ||
798 | |||
799 | SENSE_CODE(0x7400, "Security error") | ||
800 | SENSE_CODE(0x7401, "Unable to decrypt data") | ||
801 | SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting") | ||
802 | SENSE_CODE(0x7403, "Incorrect data encryption key") | ||
803 | SENSE_CODE(0x7404, "Cryptographic integrity validation failed") | ||
804 | SENSE_CODE(0x7405, "Error decrypting data") | ||
805 | SENSE_CODE(0x7406, "Unknown signature verification key") | ||
806 | SENSE_CODE(0x7407, "Encryption parameters not useable") | ||
807 | SENSE_CODE(0x7408, "Digital signature validation failure") | ||
808 | SENSE_CODE(0x7409, "Encryption mode mismatch on read") | ||
809 | SENSE_CODE(0x740A, "Encrypted block not raw read enabled") | ||
810 | SENSE_CODE(0x740B, "Incorrect Encryption parameters") | ||
811 | SENSE_CODE(0x740C, "Unable to decrypt parameter list") | ||
812 | SENSE_CODE(0x740D, "Encryption algorithm disabled") | ||
813 | SENSE_CODE(0x7410, "SA creation parameter value invalid") | ||
814 | SENSE_CODE(0x7411, "SA creation parameter value rejected") | ||
815 | SENSE_CODE(0x7412, "Invalid SA usage") | ||
816 | SENSE_CODE(0x7421, "Data Encryption configuration prevented") | ||
817 | SENSE_CODE(0x7430, "SA creation parameter not supported") | ||
818 | SENSE_CODE(0x7440, "Authentication failed") | ||
819 | SENSE_CODE(0x7461, "External data encryption key manager access error") | ||
820 | SENSE_CODE(0x7462, "External data encryption key manager error") | ||
821 | SENSE_CODE(0x7463, "External data encryption key not found") | ||
822 | SENSE_CODE(0x7464, "External data encryption request not authorized") | ||
823 | SENSE_CODE(0x746E, "External data encryption control timeout") | ||
824 | SENSE_CODE(0x746F, "External data encryption control error") | ||
825 | SENSE_CODE(0x7471, "Logical unit access not authorized") | ||
826 | SENSE_CODE(0x7479, "Security conflict in translated device") | ||
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h index d7f5ba6ba84c..8ed778d4dbb9 100644 --- a/drivers/scsi/snic/snic.h +++ b/drivers/scsi/snic/snic.h | |||
@@ -95,6 +95,8 @@ | |||
95 | #define SNIC_DEV_RST_NOTSUP BIT(25) | 95 | #define SNIC_DEV_RST_NOTSUP BIT(25) |
96 | #define SNIC_SCSI_CLEANUP BIT(26) | 96 | #define SNIC_SCSI_CLEANUP BIT(26) |
97 | #define SNIC_HOST_RESET_ISSUED BIT(27) | 97 | #define SNIC_HOST_RESET_ISSUED BIT(27) |
98 | #define SNIC_HOST_RESET_CMD_TERM \ | ||
99 | (SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED) | ||
98 | 100 | ||
99 | #define SNIC_ABTS_TIMEOUT 30000 /* msec */ | 101 | #define SNIC_ABTS_TIMEOUT 30000 /* msec */ |
100 | #define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ | 102 | #define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ |
@@ -216,9 +218,10 @@ enum snic_msix_intr_index { | |||
216 | SNIC_MSIX_INTR_MAX, | 218 | SNIC_MSIX_INTR_MAX, |
217 | }; | 219 | }; |
218 | 220 | ||
221 | #define SNIC_INTRHDLR_NAMSZ (2 * IFNAMSIZ) | ||
219 | struct snic_msix_entry { | 222 | struct snic_msix_entry { |
220 | int requested; | 223 | int requested; |
221 | char devname[IFNAMSIZ]; | 224 | char devname[SNIC_INTRHDLR_NAMSZ]; |
222 | irqreturn_t (*isr)(int, void *); | 225 | irqreturn_t (*isr)(int, void *); |
223 | void *devid; | 226 | void *devid; |
224 | }; | 227 | }; |
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c index ab0e06b0b4ff..449b03f3bbd3 100644 --- a/drivers/scsi/snic/snic_ctl.c +++ b/drivers/scsi/snic/snic_ctl.c | |||
@@ -39,17 +39,15 @@ snic_handle_link(struct work_struct *work) | |||
39 | { | 39 | { |
40 | struct snic *snic = container_of(work, struct snic, link_work); | 40 | struct snic *snic = container_of(work, struct snic, link_work); |
41 | 41 | ||
42 | if (snic->config.xpt_type != SNIC_DAS) { | 42 | if (snic->config.xpt_type == SNIC_DAS) |
43 | SNIC_HOST_INFO(snic->shost, "Link Event Received.\n"); | ||
44 | SNIC_ASSERT_NOT_IMPL(1); | ||
45 | |||
46 | return; | 43 | return; |
47 | } | ||
48 | 44 | ||
49 | snic->link_status = svnic_dev_link_status(snic->vdev); | 45 | snic->link_status = svnic_dev_link_status(snic->vdev); |
50 | snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); | 46 | snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); |
51 | SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", | 47 | SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", |
52 | ((snic->link_status) ? "Up" : "Down")); | 48 | ((snic->link_status) ? "Up" : "Down")); |
49 | |||
50 | SNIC_ASSERT_NOT_IMPL(1); | ||
53 | } | 51 | } |
54 | 52 | ||
55 | 53 | ||
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index 1686f0196251..d30280326bde 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c | |||
@@ -264,12 +264,14 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
264 | "Aborts Fail : %lld\n" | 264 | "Aborts Fail : %lld\n" |
265 | "Aborts Driver Timeout : %lld\n" | 265 | "Aborts Driver Timeout : %lld\n" |
266 | "Abort FW Timeout : %lld\n" | 266 | "Abort FW Timeout : %lld\n" |
267 | "Abort IO NOT Found : %lld\n", | 267 | "Abort IO NOT Found : %lld\n" |
268 | "Abort Queuing Failed : %lld\n", | ||
268 | (u64) atomic64_read(&stats->abts.num), | 269 | (u64) atomic64_read(&stats->abts.num), |
269 | (u64) atomic64_read(&stats->abts.fail), | 270 | (u64) atomic64_read(&stats->abts.fail), |
270 | (u64) atomic64_read(&stats->abts.drv_tmo), | 271 | (u64) atomic64_read(&stats->abts.drv_tmo), |
271 | (u64) atomic64_read(&stats->abts.fw_tmo), | 272 | (u64) atomic64_read(&stats->abts.fw_tmo), |
272 | (u64) atomic64_read(&stats->abts.io_not_found)); | 273 | (u64) atomic64_read(&stats->abts.io_not_found), |
274 | (u64) atomic64_read(&stats->abts.q_fail)); | ||
273 | 275 | ||
274 | /* Dump Reset Stats */ | 276 | /* Dump Reset Stats */ |
275 | seq_printf(sfp, | 277 | seq_printf(sfp, |
@@ -316,7 +318,9 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
316 | seq_printf(sfp, | 318 | seq_printf(sfp, |
317 | "Last ISR Time : %llu (%8lu.%8lu)\n" | 319 | "Last ISR Time : %llu (%8lu.%8lu)\n" |
318 | "Last Ack Time : %llu (%8lu.%8lu)\n" | 320 | "Last Ack Time : %llu (%8lu.%8lu)\n" |
319 | "ISRs : %llu\n" | 321 | "Ack ISRs : %llu\n" |
322 | "IO Cmpl ISRs : %llu\n" | ||
323 | "Err Notify ISRs : %llu\n" | ||
320 | "Max CQ Entries : %lld\n" | 324 | "Max CQ Entries : %lld\n" |
321 | "Data Count Mismatch : %lld\n" | 325 | "Data Count Mismatch : %lld\n" |
322 | "IOs w/ Timeout Status : %lld\n" | 326 | "IOs w/ Timeout Status : %lld\n" |
@@ -324,12 +328,17 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
324 | "IOs w/ SGL Invalid Stat : %lld\n" | 328 | "IOs w/ SGL Invalid Stat : %lld\n" |
325 | "WQ Desc Alloc Fail : %lld\n" | 329 | "WQ Desc Alloc Fail : %lld\n" |
326 | "Queue Full : %lld\n" | 330 | "Queue Full : %lld\n" |
331 | "Queue Ramp Up : %lld\n" | ||
332 | "Queue Ramp Down : %lld\n" | ||
333 | "Queue Last Queue Depth : %lld\n" | ||
327 | "Target Not Ready : %lld\n", | 334 | "Target Not Ready : %lld\n", |
328 | (u64) stats->misc.last_isr_time, | 335 | (u64) stats->misc.last_isr_time, |
329 | last_isr_tms.tv_sec, last_isr_tms.tv_nsec, | 336 | last_isr_tms.tv_sec, last_isr_tms.tv_nsec, |
330 | (u64)stats->misc.last_ack_time, | 337 | (u64)stats->misc.last_ack_time, |
331 | last_ack_tms.tv_sec, last_ack_tms.tv_nsec, | 338 | last_ack_tms.tv_sec, last_ack_tms.tv_nsec, |
332 | (u64) atomic64_read(&stats->misc.isr_cnt), | 339 | (u64) atomic64_read(&stats->misc.ack_isr_cnt), |
340 | (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), | ||
341 | (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), | ||
333 | (u64) atomic64_read(&stats->misc.max_cq_ents), | 342 | (u64) atomic64_read(&stats->misc.max_cq_ents), |
334 | (u64) atomic64_read(&stats->misc.data_cnt_mismat), | 343 | (u64) atomic64_read(&stats->misc.data_cnt_mismat), |
335 | (u64) atomic64_read(&stats->misc.io_tmo), | 344 | (u64) atomic64_read(&stats->misc.io_tmo), |
@@ -337,6 +346,9 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
337 | (u64) atomic64_read(&stats->misc.sgl_inval), | 346 | (u64) atomic64_read(&stats->misc.sgl_inval), |
338 | (u64) atomic64_read(&stats->misc.wq_alloc_fail), | 347 | (u64) atomic64_read(&stats->misc.wq_alloc_fail), |
339 | (u64) atomic64_read(&stats->misc.qfull), | 348 | (u64) atomic64_read(&stats->misc.qfull), |
349 | (u64) atomic64_read(&stats->misc.qsz_rampup), | ||
350 | (u64) atomic64_read(&stats->misc.qsz_rampdown), | ||
351 | (u64) atomic64_read(&stats->misc.last_qsz), | ||
340 | (u64) atomic64_read(&stats->misc.tgt_not_rdy)); | 352 | (u64) atomic64_read(&stats->misc.tgt_not_rdy)); |
341 | 353 | ||
342 | return 0; | 354 | return 0; |
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 5f6321759ad9..b0fefd67cac3 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c | |||
@@ -171,7 +171,7 @@ snic_scsi_scan_tgt(struct work_struct *work) | |||
171 | tgt->channel, | 171 | tgt->channel, |
172 | tgt->scsi_tgt_id, | 172 | tgt->scsi_tgt_id, |
173 | SCAN_WILD_CARD, | 173 | SCAN_WILD_CARD, |
174 | 1); | 174 | SCSI_SCAN_RESCAN); |
175 | 175 | ||
176 | spin_lock_irqsave(shost->host_lock, flags); | 176 | spin_lock_irqsave(shost->host_lock, flags); |
177 | tgt->flags &= ~SNIC_TGT_SCAN_PENDING; | 177 | tgt->flags &= ~SNIC_TGT_SCAN_PENDING; |
@@ -480,10 +480,21 @@ int | |||
480 | snic_disc_start(struct snic *snic) | 480 | snic_disc_start(struct snic *snic) |
481 | { | 481 | { |
482 | struct snic_disc *disc = &snic->disc; | 482 | struct snic_disc *disc = &snic->disc; |
483 | unsigned long flags; | ||
483 | int ret = 0; | 484 | int ret = 0; |
484 | 485 | ||
485 | SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); | 486 | SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); |
486 | 487 | ||
488 | spin_lock_irqsave(&snic->snic_lock, flags); | ||
489 | if (snic->in_remove) { | ||
490 | spin_unlock_irqrestore(&snic->snic_lock, flags); | ||
491 | SNIC_ERR("snic driver removal in progress ...\n"); | ||
492 | ret = 0; | ||
493 | |||
494 | return ret; | ||
495 | } | ||
496 | spin_unlock_irqrestore(&snic->snic_lock, flags); | ||
497 | |||
487 | mutex_lock(&disc->mutex); | 498 | mutex_lock(&disc->mutex); |
488 | if (disc->state == SNIC_DISC_PENDING) { | 499 | if (disc->state == SNIC_DISC_PENDING) { |
489 | disc->req_cnt++; | 500 | disc->req_cnt++; |
@@ -533,6 +544,8 @@ snic_tgt_del_all(struct snic *snic) | |||
533 | struct list_head *cur, *nxt; | 544 | struct list_head *cur, *nxt; |
534 | unsigned long flags; | 545 | unsigned long flags; |
535 | 546 | ||
547 | scsi_flush_work(snic->shost); | ||
548 | |||
536 | mutex_lock(&snic->disc.mutex); | 549 | mutex_lock(&snic->disc.mutex); |
537 | spin_lock_irqsave(snic->shost->host_lock, flags); | 550 | spin_lock_irqsave(snic->shost->host_lock, flags); |
538 | 551 | ||
@@ -545,7 +558,7 @@ snic_tgt_del_all(struct snic *snic) | |||
545 | tgt = NULL; | 558 | tgt = NULL; |
546 | } | 559 | } |
547 | spin_unlock_irqrestore(snic->shost->host_lock, flags); | 560 | spin_unlock_irqrestore(snic->shost->host_lock, flags); |
548 | |||
549 | scsi_flush_work(snic->shost); | ||
550 | mutex_unlock(&snic->disc.mutex); | 561 | mutex_unlock(&snic->disc.mutex); |
562 | |||
563 | flush_workqueue(snic_glob->event_q); | ||
551 | } /* end of snic_tgt_del_all */ | 564 | } /* end of snic_tgt_del_all */ |
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h index 2cfaf2dc915f..c5f9e1917a8e 100644 --- a/drivers/scsi/snic/snic_fwint.h +++ b/drivers/scsi/snic/snic_fwint.h | |||
@@ -414,7 +414,7 @@ enum snic_ev_type { | |||
414 | /* Payload 88 bytes = 128 - 24 - 16 */ | 414 | /* Payload 88 bytes = 128 - 24 - 16 */ |
415 | #define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ | 415 | #define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ |
416 | sizeof(struct snic_io_hdr) - \ | 416 | sizeof(struct snic_io_hdr) - \ |
417 | (2 * sizeof(u64)))) | 417 | (2 * sizeof(u64)) - sizeof(ulong))) |
418 | 418 | ||
419 | /* | 419 | /* |
420 | * snic_host_req: host -> firmware request | 420 | * snic_host_req: host -> firmware request |
@@ -448,6 +448,8 @@ struct snic_host_req { | |||
448 | /* hba reset */ | 448 | /* hba reset */ |
449 | struct snic_hba_reset reset; | 449 | struct snic_hba_reset reset; |
450 | } u; | 450 | } u; |
451 | |||
452 | ulong req_pa; | ||
451 | }; /* end of snic_host_req structure */ | 453 | }; /* end of snic_host_req structure */ |
452 | 454 | ||
453 | 455 | ||
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c index 993db7de4e4b..8e69548395b9 100644 --- a/drivers/scsi/snic/snic_io.c +++ b/drivers/scsi/snic/snic_io.c | |||
@@ -48,7 +48,7 @@ snic_wq_cmpl_frame_send(struct vnic_wq *wq, | |||
48 | SNIC_TRC(snic->shost->host_no, 0, 0, | 48 | SNIC_TRC(snic->shost->host_no, 0, 0, |
49 | ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, | 49 | ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, |
50 | 0); | 50 | 0); |
51 | pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); | 51 | |
52 | buf->os_buf = NULL; | 52 | buf->os_buf = NULL; |
53 | } | 53 | } |
54 | 54 | ||
@@ -137,13 +137,36 @@ snic_select_wq(struct snic *snic) | |||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static int | ||
141 | snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) | ||
142 | { | ||
143 | int nr_wqdesc = snic->config.wq_enet_desc_count; | ||
144 | |||
145 | if (q_num > 0) { | ||
146 | /* | ||
147 | * Multi Queue case, additional care is required. | ||
148 | * Per WQ active requests need to be maintained. | ||
149 | */ | ||
150 | SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); | ||
151 | SNIC_BUG_ON(q_num > 0); | ||
152 | |||
153 | return -1; | ||
154 | } | ||
155 | |||
156 | nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); | ||
157 | |||
158 | return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); | ||
159 | } | ||
160 | |||
140 | int | 161 | int |
141 | snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) | 162 | snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) |
142 | { | 163 | { |
143 | dma_addr_t pa = 0; | 164 | dma_addr_t pa = 0; |
144 | unsigned long flags; | 165 | unsigned long flags; |
145 | struct snic_fw_stats *fwstats = &snic->s_stats.fw; | 166 | struct snic_fw_stats *fwstats = &snic->s_stats.fw; |
167 | struct snic_host_req *req = (struct snic_host_req *) os_buf; | ||
146 | long act_reqs; | 168 | long act_reqs; |
169 | long desc_avail = 0; | ||
147 | int q_num = 0; | 170 | int q_num = 0; |
148 | 171 | ||
149 | snic_print_desc(__func__, os_buf, len); | 172 | snic_print_desc(__func__, os_buf, len); |
@@ -156,11 +179,15 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) | |||
156 | return -ENOMEM; | 179 | return -ENOMEM; |
157 | } | 180 | } |
158 | 181 | ||
182 | req->req_pa = (ulong)pa; | ||
183 | |||
159 | q_num = snic_select_wq(snic); | 184 | q_num = snic_select_wq(snic); |
160 | 185 | ||
161 | spin_lock_irqsave(&snic->wq_lock[q_num], flags); | 186 | spin_lock_irqsave(&snic->wq_lock[q_num], flags); |
162 | if (!svnic_wq_desc_avail(snic->wq)) { | 187 | desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); |
188 | if (desc_avail <= 0) { | ||
163 | pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); | 189 | pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); |
190 | req->req_pa = 0; | ||
164 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); | 191 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); |
165 | atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); | 192 | atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); |
166 | SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); | 193 | SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); |
@@ -169,10 +196,13 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) | |||
169 | } | 196 | } |
170 | 197 | ||
171 | snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); | 198 | snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); |
199 | /* | ||
200 | * Update stats | ||
201 | * note: when multi queue enabled, fw actv_reqs should be per queue. | ||
202 | */ | ||
203 | act_reqs = atomic64_inc_return(&fwstats->actv_reqs); | ||
172 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); | 204 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); |
173 | 205 | ||
174 | /* Update stats */ | ||
175 | act_reqs = atomic64_inc_return(&fwstats->actv_reqs); | ||
176 | if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) | 206 | if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) |
177 | atomic64_set(&fwstats->max_actv_reqs, act_reqs); | 207 | atomic64_set(&fwstats->max_actv_reqs, act_reqs); |
178 | 208 | ||
@@ -318,11 +348,31 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi) | |||
318 | "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", | 348 | "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", |
319 | rqi, rqi->req, rqi->abort_req, rqi->dr_req); | 349 | rqi, rqi->req, rqi->abort_req, rqi->dr_req); |
320 | 350 | ||
321 | if (rqi->abort_req) | 351 | if (rqi->abort_req) { |
352 | if (rqi->abort_req->req_pa) | ||
353 | pci_unmap_single(snic->pdev, | ||
354 | rqi->abort_req->req_pa, | ||
355 | sizeof(struct snic_host_req), | ||
356 | PCI_DMA_TODEVICE); | ||
357 | |||
322 | mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); | 358 | mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); |
359 | } | ||
360 | |||
361 | if (rqi->dr_req) { | ||
362 | if (rqi->dr_req->req_pa) | ||
363 | pci_unmap_single(snic->pdev, | ||
364 | rqi->dr_req->req_pa, | ||
365 | sizeof(struct snic_host_req), | ||
366 | PCI_DMA_TODEVICE); | ||
323 | 367 | ||
324 | if (rqi->dr_req) | ||
325 | mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); | 368 | mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); |
369 | } | ||
370 | |||
371 | if (rqi->req->req_pa) | ||
372 | pci_unmap_single(snic->pdev, | ||
373 | rqi->req->req_pa, | ||
374 | rqi->req_len, | ||
375 | PCI_DMA_TODEVICE); | ||
326 | 376 | ||
327 | mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); | 377 | mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); |
328 | } | 378 | } |
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c index a85fae25ec8c..f552003128c6 100644 --- a/drivers/scsi/snic/snic_isr.c +++ b/drivers/scsi/snic/snic_isr.c | |||
@@ -38,7 +38,7 @@ snic_isr_msix_wq(int irq, void *data) | |||
38 | unsigned long wq_work_done = 0; | 38 | unsigned long wq_work_done = 0; |
39 | 39 | ||
40 | snic->s_stats.misc.last_isr_time = jiffies; | 40 | snic->s_stats.misc.last_isr_time = jiffies; |
41 | atomic64_inc(&snic->s_stats.misc.isr_cnt); | 41 | atomic64_inc(&snic->s_stats.misc.ack_isr_cnt); |
42 | 42 | ||
43 | wq_work_done = snic_wq_cmpl_handler(snic, -1); | 43 | wq_work_done = snic_wq_cmpl_handler(snic, -1); |
44 | svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], | 44 | svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], |
@@ -56,7 +56,7 @@ snic_isr_msix_io_cmpl(int irq, void *data) | |||
56 | unsigned long iocmpl_work_done = 0; | 56 | unsigned long iocmpl_work_done = 0; |
57 | 57 | ||
58 | snic->s_stats.misc.last_isr_time = jiffies; | 58 | snic->s_stats.misc.last_isr_time = jiffies; |
59 | atomic64_inc(&snic->s_stats.misc.isr_cnt); | 59 | atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt); |
60 | 60 | ||
61 | iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); | 61 | iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); |
62 | svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], | 62 | svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], |
@@ -73,7 +73,7 @@ snic_isr_msix_err_notify(int irq, void *data) | |||
73 | struct snic *snic = data; | 73 | struct snic *snic = data; |
74 | 74 | ||
75 | snic->s_stats.misc.last_isr_time = jiffies; | 75 | snic->s_stats.misc.last_isr_time = jiffies; |
76 | atomic64_inc(&snic->s_stats.misc.isr_cnt); | 76 | atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt); |
77 | 77 | ||
78 | svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); | 78 | svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); |
79 | snic_log_q_error(snic); | 79 | snic_log_q_error(snic); |
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 2b3c25371d76..396b32dca074 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c | |||
@@ -98,11 +98,18 @@ snic_slave_configure(struct scsi_device *sdev) | |||
98 | static int | 98 | static int |
99 | snic_change_queue_depth(struct scsi_device *sdev, int qdepth) | 99 | snic_change_queue_depth(struct scsi_device *sdev, int qdepth) |
100 | { | 100 | { |
101 | struct snic *snic = shost_priv(sdev->host); | ||
101 | int qsz = 0; | 102 | int qsz = 0; |
102 | 103 | ||
103 | qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); | 104 | qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); |
105 | if (qsz < sdev->queue_depth) | ||
106 | atomic64_inc(&snic->s_stats.misc.qsz_rampdown); | ||
107 | else if (qsz > sdev->queue_depth) | ||
108 | atomic64_inc(&snic->s_stats.misc.qsz_rampup); | ||
109 | |||
110 | atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); | ||
111 | |||
104 | scsi_change_queue_depth(sdev, qsz); | 112 | scsi_change_queue_depth(sdev, qsz); |
105 | SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth); | ||
106 | 113 | ||
107 | return sdev->queue_depth; | 114 | return sdev->queue_depth; |
108 | } | 115 | } |
@@ -624,19 +631,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
624 | goto err_free_tmreq_pool; | 631 | goto err_free_tmreq_pool; |
625 | } | 632 | } |
626 | 633 | ||
627 | /* | ||
628 | * Initialization done with PCI system, hardware, firmware. | ||
629 | * Add shost to SCSI | ||
630 | */ | ||
631 | ret = snic_add_host(shost, pdev); | ||
632 | if (ret) { | ||
633 | SNIC_HOST_ERR(shost, | ||
634 | "Adding scsi host Failed ... exiting. %d\n", | ||
635 | ret); | ||
636 | |||
637 | goto err_notify_unset; | ||
638 | } | ||
639 | |||
640 | spin_lock_irqsave(&snic_glob->snic_list_lock, flags); | 634 | spin_lock_irqsave(&snic_glob->snic_list_lock, flags); |
641 | list_add_tail(&snic->list, &snic_glob->snic_list); | 635 | list_add_tail(&snic->list, &snic_glob->snic_list); |
642 | spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); | 636 | spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); |
@@ -669,8 +663,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
669 | for (i = 0; i < snic->intr_count; i++) | 663 | for (i = 0; i < snic->intr_count; i++) |
670 | svnic_intr_unmask(&snic->intr[i]); | 664 | svnic_intr_unmask(&snic->intr[i]); |
671 | 665 | ||
672 | snic_set_state(snic, SNIC_ONLINE); | ||
673 | |||
674 | /* Get snic params */ | 666 | /* Get snic params */ |
675 | ret = snic_get_conf(snic); | 667 | ret = snic_get_conf(snic); |
676 | if (ret) { | 668 | if (ret) { |
@@ -681,6 +673,21 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
681 | goto err_get_conf; | 673 | goto err_get_conf; |
682 | } | 674 | } |
683 | 675 | ||
676 | /* | ||
677 | * Initialization done with PCI system, hardware, firmware. | ||
678 | * Add shost to SCSI | ||
679 | */ | ||
680 | ret = snic_add_host(shost, pdev); | ||
681 | if (ret) { | ||
682 | SNIC_HOST_ERR(shost, | ||
683 | "Adding scsi host Failed ... exiting. %d\n", | ||
684 | ret); | ||
685 | |||
686 | goto err_get_conf; | ||
687 | } | ||
688 | |||
689 | snic_set_state(snic, SNIC_ONLINE); | ||
690 | |||
684 | ret = snic_disc_start(snic); | 691 | ret = snic_disc_start(snic); |
685 | if (ret) { | 692 | if (ret) { |
686 | SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", | 693 | SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", |
@@ -705,6 +712,8 @@ err_req_intr: | |||
705 | svnic_dev_disable(snic->vdev); | 712 | svnic_dev_disable(snic->vdev); |
706 | 713 | ||
707 | err_vdev_enable: | 714 | err_vdev_enable: |
715 | svnic_dev_notify_unset(snic->vdev); | ||
716 | |||
708 | for (i = 0; i < snic->wq_count; i++) { | 717 | for (i = 0; i < snic->wq_count; i++) { |
709 | int rc = 0; | 718 | int rc = 0; |
710 | 719 | ||
@@ -718,9 +727,6 @@ err_vdev_enable: | |||
718 | } | 727 | } |
719 | snic_del_host(snic->shost); | 728 | snic_del_host(snic->shost); |
720 | 729 | ||
721 | err_notify_unset: | ||
722 | svnic_dev_notify_unset(snic->vdev); | ||
723 | |||
724 | err_free_tmreq_pool: | 730 | err_free_tmreq_pool: |
725 | mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); | 731 | mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); |
726 | 732 | ||
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index 2c7b4c321cbe..abada16b375b 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c | |||
@@ -221,11 +221,15 @@ snic_queue_icmnd_req(struct snic *snic, | |||
221 | pa, /* sense buffer pa */ | 221 | pa, /* sense buffer pa */ |
222 | SCSI_SENSE_BUFFERSIZE); | 222 | SCSI_SENSE_BUFFERSIZE); |
223 | 223 | ||
224 | atomic64_inc(&snic->s_stats.io.active); | ||
224 | ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); | 225 | ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); |
225 | if (ret) | 226 | if (ret) { |
227 | atomic64_dec(&snic->s_stats.io.active); | ||
226 | SNIC_HOST_ERR(snic->shost, | 228 | SNIC_HOST_ERR(snic->shost, |
227 | "QIcmnd: Queuing Icmnd Failed. ret = %d\n", | 229 | "QIcmnd: Queuing Icmnd Failed. ret = %d\n", |
228 | ret); | 230 | ret); |
231 | } else | ||
232 | snic_stats_update_active_ios(&snic->s_stats); | ||
229 | 233 | ||
230 | return ret; | 234 | return ret; |
231 | } /* end of snic_queue_icmnd_req */ | 235 | } /* end of snic_queue_icmnd_req */ |
@@ -361,8 +365,7 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) | |||
361 | if (ret) { | 365 | if (ret) { |
362 | SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); | 366 | SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); |
363 | ret = SCSI_MLQUEUE_HOST_BUSY; | 367 | ret = SCSI_MLQUEUE_HOST_BUSY; |
364 | } else | 368 | } |
365 | snic_stats_update_active_ios(&snic->s_stats); | ||
366 | 369 | ||
367 | atomic_dec(&snic->ios_inflight); | 370 | atomic_dec(&snic->ios_inflight); |
368 | 371 | ||
@@ -598,6 +601,12 @@ snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | |||
598 | sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), | 601 | sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), |
599 | CMD_FLAGS(sc), rqi); | 602 | CMD_FLAGS(sc), rqi); |
600 | 603 | ||
604 | if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { | ||
605 | spin_unlock_irqrestore(io_lock, flags); | ||
606 | |||
607 | return; | ||
608 | } | ||
609 | |||
601 | SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); | 610 | SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); |
602 | WARN_ON_ONCE(req); | 611 | WARN_ON_ONCE(req); |
603 | if (!rqi) { | 612 | if (!rqi) { |
@@ -779,6 +788,11 @@ snic_process_itmf_cmpl(struct snic *snic, | |||
779 | 788 | ||
780 | io_lock = snic_io_lock_hash(snic, sc); | 789 | io_lock = snic_io_lock_hash(snic, sc); |
781 | spin_lock_irqsave(io_lock, flags); | 790 | spin_lock_irqsave(io_lock, flags); |
791 | if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { | ||
792 | spin_unlock_irqrestore(io_lock, flags); | ||
793 | |||
794 | return ret; | ||
795 | } | ||
782 | rqi = (struct snic_req_info *) CMD_SP(sc); | 796 | rqi = (struct snic_req_info *) CMD_SP(sc); |
783 | WARN_ON_ONCE(!rqi); | 797 | WARN_ON_ONCE(!rqi); |
784 | 798 | ||
@@ -1001,10 +1015,11 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | |||
1001 | unsigned long flags, gflags; | 1015 | unsigned long flags, gflags; |
1002 | int ret = 0; | 1016 | int ret = 0; |
1003 | 1017 | ||
1018 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
1004 | SNIC_HOST_INFO(snic->shost, | 1019 | SNIC_HOST_INFO(snic->shost, |
1005 | "reset_cmpl:HBA Reset Completion received.\n"); | 1020 | "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n", |
1021 | cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); | ||
1006 | 1022 | ||
1007 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
1008 | SNIC_SCSI_DBG(snic->shost, | 1023 | SNIC_SCSI_DBG(snic->shost, |
1009 | "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", | 1024 | "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", |
1010 | typ, hdr_stat, cmnd_id, hid, ctx); | 1025 | typ, hdr_stat, cmnd_id, hid, ctx); |
@@ -1012,6 +1027,9 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | |||
1012 | /* spl case, host reset issued through ioctl */ | 1027 | /* spl case, host reset issued through ioctl */ |
1013 | if (cmnd_id == SCSI_NO_TAG) { | 1028 | if (cmnd_id == SCSI_NO_TAG) { |
1014 | rqi = (struct snic_req_info *) ctx; | 1029 | rqi = (struct snic_req_info *) ctx; |
1030 | SNIC_HOST_INFO(snic->shost, | ||
1031 | "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n", | ||
1032 | cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); | ||
1015 | sc = rqi->sc; | 1033 | sc = rqi->sc; |
1016 | 1034 | ||
1017 | goto ioctl_hba_rst; | 1035 | goto ioctl_hba_rst; |
@@ -1038,6 +1056,10 @@ ioctl_hba_rst: | |||
1038 | return ret; | 1056 | return ret; |
1039 | } | 1057 | } |
1040 | 1058 | ||
1059 | SNIC_HOST_INFO(snic->shost, | ||
1060 | "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n", | ||
1061 | sc, rqi, cmnd_id, CMD_FLAGS(sc)); | ||
1062 | |||
1041 | io_lock = snic_io_lock_hash(snic, sc); | 1063 | io_lock = snic_io_lock_hash(snic, sc); |
1042 | spin_lock_irqsave(io_lock, flags); | 1064 | spin_lock_irqsave(io_lock, flags); |
1043 | 1065 | ||
@@ -1454,11 +1476,19 @@ snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc) | |||
1454 | case SNIC_STAT_IO_SUCCESS: | 1476 | case SNIC_STAT_IO_SUCCESS: |
1455 | case SNIC_STAT_IO_NOT_FOUND: | 1477 | case SNIC_STAT_IO_NOT_FOUND: |
1456 | ret = SUCCESS; | 1478 | ret = SUCCESS; |
1479 | /* | ||
1480 | * If abort path doesn't call scsi_done(), | ||
1481 | * the # IO timeouts == 2, will cause the LUN offline. | ||
1482 | * Call scsi_done to complete the IO. | ||
1483 | */ | ||
1484 | sc->result = (DID_ERROR << 16); | ||
1485 | sc->scsi_done(sc); | ||
1457 | break; | 1486 | break; |
1458 | 1487 | ||
1459 | default: | 1488 | default: |
1460 | /* Firmware completed abort with error */ | 1489 | /* Firmware completed abort with error */ |
1461 | ret = FAILED; | 1490 | ret = FAILED; |
1491 | rqi = NULL; | ||
1462 | break; | 1492 | break; |
1463 | } | 1493 | } |
1464 | 1494 | ||
@@ -1554,6 +1584,7 @@ snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc) | |||
1554 | /* Now Queue the abort command to firmware */ | 1584 | /* Now Queue the abort command to firmware */ |
1555 | ret = snic_queue_abort_req(snic, rqi, sc, tmf); | 1585 | ret = snic_queue_abort_req(snic, rqi, sc, tmf); |
1556 | if (ret) { | 1586 | if (ret) { |
1587 | atomic64_inc(&snic->s_stats.abts.q_fail); | ||
1557 | SNIC_HOST_ERR(snic->shost, | 1588 | SNIC_HOST_ERR(snic->shost, |
1558 | "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", | 1589 | "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", |
1559 | tag, ret, CMD_FLAGS(sc)); | 1590 | tag, ret, CMD_FLAGS(sc)); |
@@ -1830,6 +1861,9 @@ snic_dr_clean_single_req(struct snic *snic, | |||
1830 | 1861 | ||
1831 | snic_release_req_buf(snic, rqi, sc); | 1862 | snic_release_req_buf(snic, rqi, sc); |
1832 | 1863 | ||
1864 | sc->result = (DID_ERROR << 16); | ||
1865 | sc->scsi_done(sc); | ||
1866 | |||
1833 | ret = 0; | 1867 | ret = 0; |
1834 | 1868 | ||
1835 | return ret; | 1869 | return ret; |
@@ -2384,6 +2418,13 @@ snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc) | |||
2384 | "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", | 2418 | "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", |
2385 | sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); | 2419 | sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); |
2386 | 2420 | ||
2421 | /* | ||
2422 | * CASE : FW didn't post itmf completion due to PCIe Errors. | ||
2423 | * Marking the abort status as Success to call scsi completion | ||
2424 | * in snic_abort_finish() | ||
2425 | */ | ||
2426 | CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS; | ||
2427 | |||
2387 | rqi = (struct snic_req_info *) CMD_SP(sc); | 2428 | rqi = (struct snic_req_info *) CMD_SP(sc); |
2388 | if (!rqi) | 2429 | if (!rqi) |
2389 | return; | 2430 | return; |
@@ -2459,8 +2500,9 @@ snic_scsi_cleanup(struct snic *snic, int ex_tag) | |||
2459 | cleanup: | 2500 | cleanup: |
2460 | sc->result = DID_TRANSPORT_DISRUPTED << 16; | 2501 | sc->result = DID_TRANSPORT_DISRUPTED << 16; |
2461 | SNIC_HOST_INFO(snic->shost, | 2502 | SNIC_HOST_INFO(snic->shost, |
2462 | "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n", | 2503 | "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n", |
2463 | sc, rqi, (jiffies - st_time)); | 2504 | sc, sc->request->tag, CMD_FLAGS(sc), rqi, |
2505 | jiffies_to_msecs(jiffies - st_time)); | ||
2464 | 2506 | ||
2465 | /* Update IO stats */ | 2507 | /* Update IO stats */ |
2466 | snic_stats_update_io_cmpl(&snic->s_stats); | 2508 | snic_stats_update_io_cmpl(&snic->s_stats); |
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h index 11e614849a82..fd1066b1cad5 100644 --- a/drivers/scsi/snic/snic_stats.h +++ b/drivers/scsi/snic/snic_stats.h | |||
@@ -42,6 +42,7 @@ struct snic_abort_stats { | |||
42 | atomic64_t drv_tmo; /* Abort Driver Timeouts */ | 42 | atomic64_t drv_tmo; /* Abort Driver Timeouts */ |
43 | atomic64_t fw_tmo; /* Abort Firmware Timeouts */ | 43 | atomic64_t fw_tmo; /* Abort Firmware Timeouts */ |
44 | atomic64_t io_not_found;/* Abort IO Not Found */ | 44 | atomic64_t io_not_found;/* Abort IO Not Found */ |
45 | atomic64_t q_fail; /* Abort Queuing Failed */ | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | struct snic_reset_stats { | 48 | struct snic_reset_stats { |
@@ -69,7 +70,9 @@ struct snic_fw_stats { | |||
69 | struct snic_misc_stats { | 70 | struct snic_misc_stats { |
70 | u64 last_isr_time; | 71 | u64 last_isr_time; |
71 | u64 last_ack_time; | 72 | u64 last_ack_time; |
72 | atomic64_t isr_cnt; | 73 | atomic64_t ack_isr_cnt; |
74 | atomic64_t cmpl_isr_cnt; | ||
75 | atomic64_t errnotify_isr_cnt; | ||
73 | atomic64_t max_cq_ents; /* Max CQ Entries */ | 76 | atomic64_t max_cq_ents; /* Max CQ Entries */ |
74 | atomic64_t data_cnt_mismat; /* Data Count Mismatch */ | 77 | atomic64_t data_cnt_mismat; /* Data Count Mismatch */ |
75 | atomic64_t io_tmo; | 78 | atomic64_t io_tmo; |
@@ -81,6 +84,9 @@ struct snic_misc_stats { | |||
81 | atomic64_t no_icmnd_itmf_cmpls; | 84 | atomic64_t no_icmnd_itmf_cmpls; |
82 | atomic64_t io_under_run; | 85 | atomic64_t io_under_run; |
83 | atomic64_t qfull; | 86 | atomic64_t qfull; |
87 | atomic64_t qsz_rampup; | ||
88 | atomic64_t qsz_rampdown; | ||
89 | atomic64_t last_qsz; | ||
84 | atomic64_t tgt_not_rdy; | 90 | atomic64_t tgt_not_rdy; |
85 | }; | 91 | }; |
86 | 92 | ||
@@ -101,9 +107,9 @@ static inline void | |||
101 | snic_stats_update_active_ios(struct snic_stats *s_stats) | 107 | snic_stats_update_active_ios(struct snic_stats *s_stats) |
102 | { | 108 | { |
103 | struct snic_io_stats *io = &s_stats->io; | 109 | struct snic_io_stats *io = &s_stats->io; |
104 | u32 nr_active_ios; | 110 | int nr_active_ios; |
105 | 111 | ||
106 | nr_active_ios = atomic64_inc_return(&io->active); | 112 | nr_active_ios = atomic64_read(&io->active); |
107 | if (atomic64_read(&io->max_active) < nr_active_ios) | 113 | if (atomic64_read(&io->max_active) < nr_active_ios) |
108 | atomic64_set(&io->max_active, nr_active_ios); | 114 | atomic64_set(&io->max_active, nr_active_ios); |
109 | 115 | ||
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c index e0b5549bc9fb..dad5fc66effb 100644 --- a/drivers/scsi/snic/vnic_dev.c +++ b/drivers/scsi/snic/vnic_dev.c | |||
@@ -263,12 +263,20 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
263 | int wait) | 263 | int wait) |
264 | { | 264 | { |
265 | struct devcmd2_controller *dc2c = vdev->devcmd2; | 265 | struct devcmd2_controller *dc2c = vdev->devcmd2; |
266 | struct devcmd2_result *result = dc2c->result + dc2c->next_result; | 266 | struct devcmd2_result *result = NULL; |
267 | unsigned int i; | 267 | unsigned int i; |
268 | int delay; | 268 | int delay; |
269 | int err; | 269 | int err; |
270 | u32 posted; | 270 | u32 posted; |
271 | u32 fetch_idx; | ||
271 | u32 new_posted; | 272 | u32 new_posted; |
273 | u8 color; | ||
274 | |||
275 | fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index); | ||
276 | if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ | ||
277 | /* Hardware surprise removal: return error */ | ||
278 | return -ENODEV; | ||
279 | } | ||
272 | 280 | ||
273 | posted = ioread32(&dc2c->wq_ctrl->posted_index); | 281 | posted = ioread32(&dc2c->wq_ctrl->posted_index); |
274 | 282 | ||
@@ -278,6 +286,13 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
278 | } | 286 | } |
279 | 287 | ||
280 | new_posted = (posted + 1) % DEVCMD2_RING_SIZE; | 288 | new_posted = (posted + 1) % DEVCMD2_RING_SIZE; |
289 | if (new_posted == fetch_idx) { | ||
290 | pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n", | ||
291 | pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted); | ||
292 | |||
293 | return -EBUSY; | ||
294 | } | ||
295 | |||
281 | dc2c->cmd_ring[posted].cmd = cmd; | 296 | dc2c->cmd_ring[posted].cmd = cmd; |
282 | dc2c->cmd_ring[posted].flags = 0; | 297 | dc2c->cmd_ring[posted].flags = 0; |
283 | 298 | ||
@@ -299,14 +314,22 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
299 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) | 314 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) |
300 | return 0; | 315 | return 0; |
301 | 316 | ||
317 | result = dc2c->result + dc2c->next_result; | ||
318 | color = dc2c->color; | ||
319 | |||
320 | /* | ||
321 | * Increment next_result, after posting the devcmd, irrespective of | ||
322 | * devcmd result, and it should be done only once. | ||
323 | */ | ||
324 | dc2c->next_result++; | ||
325 | if (dc2c->next_result == dc2c->result_size) { | ||
326 | dc2c->next_result = 0; | ||
327 | dc2c->color = dc2c->color ? 0 : 1; | ||
328 | } | ||
329 | |||
302 | for (delay = 0; delay < wait; delay++) { | 330 | for (delay = 0; delay < wait; delay++) { |
303 | udelay(100); | 331 | udelay(100); |
304 | if (result->color == dc2c->color) { | 332 | if (result->color == color) { |
305 | dc2c->next_result++; | ||
306 | if (dc2c->next_result == dc2c->result_size) { | ||
307 | dc2c->next_result = 0; | ||
308 | dc2c->color = dc2c->color ? 0 : 1; | ||
309 | } | ||
310 | if (result->error) { | 333 | if (result->error) { |
311 | err = (int) result->error; | 334 | err = (int) result->error; |
312 | if (err != ERR_ECMDUNKNOWN || | 335 | if (err != ERR_ECMDUNKNOWN || |
@@ -317,13 +340,6 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
317 | return err; | 340 | return err; |
318 | } | 341 | } |
319 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { | 342 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { |
320 | /* | ||
321 | * Adding the rmb() prevents the compiler | ||
322 | * and/or CPU from reordering the reads which | ||
323 | * would potentially result in reading stale | ||
324 | * values. | ||
325 | */ | ||
326 | rmb(); | ||
327 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) | 343 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
328 | vdev->args[i] = result->results[i]; | 344 | vdev->args[i] = result->results[i]; |
329 | } | 345 | } |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index dbf1882cfbac..7af5226aa55b 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -1974,9 +1974,12 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
1974 | transfer = (int)cmdstatp->uremainder64; | 1974 | transfer = (int)cmdstatp->uremainder64; |
1975 | else | 1975 | else |
1976 | transfer = 0; | 1976 | transfer = 0; |
1977 | if (STp->block_size == 0 && | 1977 | if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) { |
1978 | cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) | 1978 | if (STp->block_size == 0) |
1979 | transfer = bytes; | 1979 | transfer = bytes; |
1980 | /* Some drives set ILI with MEDIUM ERROR */ | ||
1981 | cmdstatp->flags &= ~SENSE_ILI; | ||
1982 | } | ||
1980 | 1983 | ||
1981 | if (cmdstatp->flags & SENSE_ILI) { /* ILI */ | 1984 | if (cmdstatp->flags & SENSE_ILI) { /* ILI */ |
1982 | if (STp->block_size == 0 && | 1985 | if (STp->block_size == 0 && |
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index b9de487bbd31..3c4c07038948 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c | |||
@@ -36,14 +36,10 @@ | |||
36 | #include <scsi/scsi_host.h> | 36 | #include <scsi/scsi_host.h> |
37 | #include "sun3_scsi.h" | 37 | #include "sun3_scsi.h" |
38 | 38 | ||
39 | /* Definitions for the core NCR5380 driver. */ | ||
40 | |||
41 | #define REAL_DMA | ||
42 | /* #define SUPPORT_TAGS */ | ||
43 | /* minimum number of bytes to do dma on */ | 39 | /* minimum number of bytes to do dma on */ |
44 | #define DMA_MIN_SIZE 129 | 40 | #define DMA_MIN_SIZE 129 |
45 | 41 | ||
46 | /* #define MAX_TAGS 32 */ | 42 | /* Definitions for the core NCR5380 driver. */ |
47 | 43 | ||
48 | #define NCR5380_implementation_fields /* none */ | 44 | #define NCR5380_implementation_fields /* none */ |
49 | 45 | ||
@@ -55,14 +51,12 @@ | |||
55 | #define NCR5380_abort sun3scsi_abort | 51 | #define NCR5380_abort sun3scsi_abort |
56 | #define NCR5380_info sun3scsi_info | 52 | #define NCR5380_info sun3scsi_info |
57 | 53 | ||
58 | #define NCR5380_dma_read_setup(instance, data, count) \ | 54 | #define NCR5380_dma_recv_setup(instance, data, count) (count) |
59 | sun3scsi_dma_setup(instance, data, count, 0) | 55 | #define NCR5380_dma_send_setup(instance, data, count) (count) |
60 | #define NCR5380_dma_write_setup(instance, data, count) \ | ||
61 | sun3scsi_dma_setup(instance, data, count, 1) | ||
62 | #define NCR5380_dma_residual(instance) \ | 56 | #define NCR5380_dma_residual(instance) \ |
63 | sun3scsi_dma_residual(instance) | 57 | sun3scsi_dma_residual(instance) |
64 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ | 58 | #define NCR5380_dma_xfer_len(instance, cmd, phase) \ |
65 | sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) | 59 | sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd) |
66 | 60 | ||
67 | #define NCR5380_acquire_dma_irq(instance) (1) | 61 | #define NCR5380_acquire_dma_irq(instance) (1) |
68 | #define NCR5380_release_dma_irq(instance) | 62 | #define NCR5380_release_dma_irq(instance) |
@@ -78,10 +72,6 @@ static int setup_cmd_per_lun = -1; | |||
78 | module_param(setup_cmd_per_lun, int, 0); | 72 | module_param(setup_cmd_per_lun, int, 0); |
79 | static int setup_sg_tablesize = -1; | 73 | static int setup_sg_tablesize = -1; |
80 | module_param(setup_sg_tablesize, int, 0); | 74 | module_param(setup_sg_tablesize, int, 0); |
81 | #ifdef SUPPORT_TAGS | ||
82 | static int setup_use_tagged_queuing = -1; | ||
83 | module_param(setup_use_tagged_queuing, int, 0); | ||
84 | #endif | ||
85 | static int setup_hostid = -1; | 75 | static int setup_hostid = -1; |
86 | module_param(setup_hostid, int, 0); | 76 | module_param(setup_hostid, int, 0); |
87 | 77 | ||
@@ -263,14 +253,13 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) | |||
263 | return last_residual; | 253 | return last_residual; |
264 | } | 254 | } |
265 | 255 | ||
266 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, | 256 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len, |
267 | struct scsi_cmnd *cmd, | 257 | struct scsi_cmnd *cmd) |
268 | int write_flag) | ||
269 | { | 258 | { |
270 | if (cmd->request->cmd_type == REQ_TYPE_FS) | 259 | if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) |
271 | return wanted; | ||
272 | else | ||
273 | return 0; | 260 | return 0; |
261 | |||
262 | return wanted_len; | ||
274 | } | 263 | } |
275 | 264 | ||
276 | static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) | 265 | static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) |
@@ -408,7 +397,7 @@ static int sun3scsi_dma_finish(int write_flag) | |||
408 | 397 | ||
409 | } | 398 | } |
410 | 399 | ||
411 | #include "atari_NCR5380.c" | 400 | #include "NCR5380.c" |
412 | 401 | ||
413 | #ifdef SUN3_SCSI_VME | 402 | #ifdef SUN3_SCSI_VME |
414 | #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" | 403 | #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" |
@@ -516,10 +505,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) | |||
516 | instance->io_port = (unsigned long)ioaddr; | 505 | instance->io_port = (unsigned long)ioaddr; |
517 | instance->irq = irq->start; | 506 | instance->irq = irq->start; |
518 | 507 | ||
519 | #ifdef SUPPORT_TAGS | ||
520 | host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; | ||
521 | #endif | ||
522 | |||
523 | error = NCR5380_init(instance, host_flags); | 508 | error = NCR5380_init(instance, host_flags); |
524 | if (error) | 509 | if (error) |
525 | goto fail_init; | 510 | goto fail_init; |
@@ -527,15 +512,9 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) | |||
527 | error = request_irq(instance->irq, scsi_sun3_intr, 0, | 512 | error = request_irq(instance->irq, scsi_sun3_intr, 0, |
528 | "NCR5380", instance); | 513 | "NCR5380", instance); |
529 | if (error) { | 514 | if (error) { |
530 | #ifdef REAL_DMA | ||
531 | pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", | 515 | pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", |
532 | instance->host_no, instance->irq); | 516 | instance->host_no, instance->irq); |
533 | goto fail_irq; | 517 | goto fail_irq; |
534 | #else | ||
535 | pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n", | ||
536 | instance->host_no, instance->irq); | ||
537 | instance->irq = NO_IRQ; | ||
538 | #endif | ||
539 | } | 518 | } |
540 | 519 | ||
541 | dregs->csr = 0; | 520 | dregs->csr = 0; |
@@ -565,8 +544,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) | |||
565 | return 0; | 544 | return 0; |
566 | 545 | ||
567 | fail_host: | 546 | fail_host: |
568 | if (instance->irq != NO_IRQ) | 547 | free_irq(instance->irq, instance); |
569 | free_irq(instance->irq, instance); | ||
570 | fail_irq: | 548 | fail_irq: |
571 | NCR5380_exit(instance); | 549 | NCR5380_exit(instance); |
572 | fail_init: | 550 | fail_init: |
@@ -583,8 +561,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev) | |||
583 | struct Scsi_Host *instance = platform_get_drvdata(pdev); | 561 | struct Scsi_Host *instance = platform_get_drvdata(pdev); |
584 | 562 | ||
585 | scsi_remove_host(instance); | 563 | scsi_remove_host(instance); |
586 | if (instance->irq != NO_IRQ) | 564 | free_irq(instance->irq, instance); |
587 | free_irq(instance->irq, instance); | ||
588 | NCR5380_exit(instance); | 565 | NCR5380_exit(instance); |
589 | scsi_host_put(instance); | 566 | scsi_host_put(instance); |
590 | if (udc_regs) | 567 | if (udc_regs) |
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 4615fda60dbd..8a8608ac62e6 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c | |||
@@ -1,5 +1,3 @@ | |||
1 | #define PSEUDO_DMA | ||
2 | |||
3 | /* | 1 | /* |
4 | * Trantor T128/T128F/T228 driver | 2 | * Trantor T128/T128F/T228 driver |
5 | * Note : architecturally, the T100 and T130 are different and won't | 3 | * Note : architecturally, the T100 and T130 are different and won't |
@@ -76,7 +74,6 @@ | |||
76 | 74 | ||
77 | #include <scsi/scsi_host.h> | 75 | #include <scsi/scsi_host.h> |
78 | #include "t128.h" | 76 | #include "t128.h" |
79 | #define AUTOPROBE_IRQ | ||
80 | #include "NCR5380.h" | 77 | #include "NCR5380.h" |
81 | 78 | ||
82 | static struct override { | 79 | static struct override { |
@@ -210,7 +207,7 @@ found: | |||
210 | instance->base = base; | 207 | instance->base = base; |
211 | ((struct NCR5380_hostdata *)instance->hostdata)->base = p; | 208 | ((struct NCR5380_hostdata *)instance->hostdata)->base = p; |
212 | 209 | ||
213 | if (NCR5380_init(instance, 0)) | 210 | if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) |
214 | goto out_unregister; | 211 | goto out_unregister; |
215 | 212 | ||
216 | NCR5380_maybe_reset_bus(instance); | 213 | NCR5380_maybe_reset_bus(instance); |
@@ -294,7 +291,7 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, | |||
294 | } | 291 | } |
295 | 292 | ||
296 | /* | 293 | /* |
297 | * Function : int NCR5380_pread (struct Scsi_Host *instance, | 294 | * Function : int t128_pread (struct Scsi_Host *instance, |
298 | * unsigned char *dst, int len) | 295 | * unsigned char *dst, int len) |
299 | * | 296 | * |
300 | * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to | 297 | * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to |
@@ -306,8 +303,8 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, | |||
306 | * timeout. | 303 | * timeout. |
307 | */ | 304 | */ |
308 | 305 | ||
309 | static inline int | 306 | static inline int t128_pread(struct Scsi_Host *instance, |
310 | NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) | 307 | unsigned char *dst, int len) |
311 | { | 308 | { |
312 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 309 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
313 | void __iomem *reg, *base = hostdata->base; | 310 | void __iomem *reg, *base = hostdata->base; |
@@ -340,7 +337,7 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) | |||
340 | } | 337 | } |
341 | 338 | ||
342 | /* | 339 | /* |
343 | * Function : int NCR5380_pwrite (struct Scsi_Host *instance, | 340 | * Function : int t128_pwrite (struct Scsi_Host *instance, |
344 | * unsigned char *src, int len) | 341 | * unsigned char *src, int len) |
345 | * | 342 | * |
346 | * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from | 343 | * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from |
@@ -352,8 +349,8 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) | |||
352 | * timeout. | 349 | * timeout. |
353 | */ | 350 | */ |
354 | 351 | ||
355 | static inline int | 352 | static inline int t128_pwrite(struct Scsi_Host *instance, |
356 | NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) | 353 | unsigned char *src, int len) |
357 | { | 354 | { |
358 | struct NCR5380_hostdata *hostdata = shost_priv(instance); | 355 | struct NCR5380_hostdata *hostdata = shost_priv(instance); |
359 | void __iomem *reg, *base = hostdata->base; | 356 | void __iomem *reg, *base = hostdata->base; |
@@ -394,8 +391,6 @@ static struct scsi_host_template driver_template = { | |||
394 | .detect = t128_detect, | 391 | .detect = t128_detect, |
395 | .release = t128_release, | 392 | .release = t128_release, |
396 | .proc_name = "t128", | 393 | .proc_name = "t128", |
397 | .show_info = t128_show_info, | ||
398 | .write_info = t128_write_info, | ||
399 | .info = t128_info, | 394 | .info = t128_info, |
400 | .queuecommand = t128_queue_command, | 395 | .queuecommand = t128_queue_command, |
401 | .eh_abort_handler = t128_abort, | 396 | .eh_abort_handler = t128_abort, |
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index dd16d85497e1..c95bcd839109 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h | |||
@@ -77,14 +77,17 @@ | |||
77 | #define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) | 77 | #define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) |
78 | 78 | ||
79 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) | 79 | #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) |
80 | #define NCR5380_dma_recv_setup t128_pread | ||
81 | #define NCR5380_dma_send_setup t128_pwrite | ||
82 | #define NCR5380_dma_residual(instance) (0) | ||
80 | 83 | ||
81 | #define NCR5380_intr t128_intr | 84 | #define NCR5380_intr t128_intr |
82 | #define NCR5380_queue_command t128_queue_command | 85 | #define NCR5380_queue_command t128_queue_command |
83 | #define NCR5380_abort t128_abort | 86 | #define NCR5380_abort t128_abort |
84 | #define NCR5380_bus_reset t128_bus_reset | 87 | #define NCR5380_bus_reset t128_bus_reset |
85 | #define NCR5380_info t128_info | 88 | #define NCR5380_info t128_info |
86 | #define NCR5380_show_info t128_show_info | 89 | |
87 | #define NCR5380_write_info t128_write_info | 90 | #define NCR5380_io_delay(x) udelay(x) |
88 | 91 | ||
89 | /* 15 14 12 10 7 5 3 | 92 | /* 15 14 12 10 7 5 3 |
90 | 1101 0100 1010 1000 */ | 93 | 1101 0100 1010 1000 */ |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 90901861bfc0..ae85861051eb 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -563,7 +563,7 @@ static const struct scsi_host_template usb_stor_host_template = { | |||
563 | .target_alloc = target_alloc, | 563 | .target_alloc = target_alloc, |
564 | 564 | ||
565 | /* lots of sg segments can be handled */ | 565 | /* lots of sg segments can be handled */ |
566 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, | 566 | .sg_tablesize = SG_MAX_SEGMENTS, |
567 | 567 | ||
568 | /* limit the total size of a transfer to 120 KB */ | 568 | /* limit the total size of a transfer to 120 KB */ |
569 | .max_sectors = 240, | 569 | .max_sectors = 240, |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 556ec1ea2574..cb3c8fe6acd7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |||
286 | #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) | 286 | #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * The maximum number of SG segments that we will put inside a | ||
290 | * scatterlist (unless chaining is used). Should ideally fit inside a | ||
291 | * single page, to avoid a higher order allocation. We could define this | ||
292 | * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The | ||
293 | * minimum value is 32 | ||
294 | */ | ||
295 | #define SG_CHUNK_SIZE 128 | ||
296 | |||
297 | /* | ||
298 | * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit | ||
299 | * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. | ||
300 | */ | ||
301 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN | ||
302 | #define SG_MAX_SEGMENTS 2048 | ||
303 | #else | ||
304 | #define SG_MAX_SEGMENTS SG_CHUNK_SIZE | ||
305 | #endif | ||
306 | |||
307 | #ifdef CONFIG_SG_POOL | ||
308 | void sg_free_table_chained(struct sg_table *table, bool first_chunk); | ||
309 | int sg_alloc_table_chained(struct sg_table *table, int nents, | ||
310 | struct scatterlist *first_chunk); | ||
311 | #endif | ||
312 | |||
313 | /* | ||
289 | * sg page iterator | 314 | * sg page iterator |
290 | * | 315 | * |
291 | * Iterates over sg entries page-by-page. On each successful iteration, | 316 | * Iterates over sg entries page-by-page. On each successful iteration, |
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index e0a3398b1547..8ec7c30e35af 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h | |||
@@ -18,25 +18,6 @@ enum scsi_timeouts { | |||
18 | }; | 18 | }; |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * The maximum number of SG segments that we will put inside a | ||
22 | * scatterlist (unless chaining is used). Should ideally fit inside a | ||
23 | * single page, to avoid a higher order allocation. We could define this | ||
24 | * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The | ||
25 | * minimum value is 32 | ||
26 | */ | ||
27 | #define SCSI_MAX_SG_SEGMENTS 128 | ||
28 | |||
29 | /* | ||
30 | * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit | ||
31 | * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. | ||
32 | */ | ||
33 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN | ||
34 | #define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 | ||
35 | #else | ||
36 | #define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS | ||
37 | #endif | ||
38 | |||
39 | /* | ||
40 | * DIX-capable adapters effectively support infinite chaining for the | 21 | * DIX-capable adapters effectively support infinite chaining for the |
41 | * protection information scatterlist | 22 | * protection information scatterlist |
42 | */ | 23 | */ |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 74d79bde7075..a6c346df290d 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -50,6 +50,12 @@ enum scsi_device_state { | |||
50 | SDEV_CREATED_BLOCK, /* same as above but for created devices */ | 50 | SDEV_CREATED_BLOCK, /* same as above but for created devices */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | enum scsi_scan_mode { | ||
54 | SCSI_SCAN_INITIAL = 0, | ||
55 | SCSI_SCAN_RESCAN, | ||
56 | SCSI_SCAN_MANUAL, | ||
57 | }; | ||
58 | |||
53 | enum scsi_device_event { | 59 | enum scsi_device_event { |
54 | SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ | 60 | SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ |
55 | SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ | 61 | SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ |
@@ -242,6 +248,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); | |||
242 | enum scsi_target_state { | 248 | enum scsi_target_state { |
243 | STARGET_CREATED = 1, | 249 | STARGET_CREATED = 1, |
244 | STARGET_RUNNING, | 250 | STARGET_RUNNING, |
251 | STARGET_REMOVE, | ||
245 | STARGET_DEL, | 252 | STARGET_DEL, |
246 | }; | 253 | }; |
247 | 254 | ||
@@ -391,7 +398,8 @@ extern void scsi_device_resume(struct scsi_device *sdev); | |||
391 | extern void scsi_target_quiesce(struct scsi_target *); | 398 | extern void scsi_target_quiesce(struct scsi_target *); |
392 | extern void scsi_target_resume(struct scsi_target *); | 399 | extern void scsi_target_resume(struct scsi_target *); |
393 | extern void scsi_scan_target(struct device *parent, unsigned int channel, | 400 | extern void scsi_scan_target(struct device *parent, unsigned int channel, |
394 | unsigned int id, u64 lun, int rescan); | 401 | unsigned int id, u64 lun, |
402 | enum scsi_scan_mode rescan); | ||
395 | extern void scsi_target_reap(struct scsi_target *); | 403 | extern void scsi_target_reap(struct scsi_target *); |
396 | extern void scsi_target_block(struct device *); | 404 | extern void scsi_target_block(struct device *); |
397 | extern void scsi_target_unblock(struct device *, enum scsi_device_state); | 405 | extern void scsi_target_unblock(struct device *, enum scsi_device_state); |
@@ -534,9 +542,9 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev) | |||
534 | /* | 542 | /* |
535 | * Although VPD inquiries can go to SCSI-2 type devices, | 543 | * Although VPD inquiries can go to SCSI-2 type devices, |
536 | * some USB ones crash on receiving them, and the pages | 544 | * some USB ones crash on receiving them, and the pages |
537 | * we currently ask for are for SPC-3 and beyond | 545 | * we currently ask for are mandatory for SPC-2 and beyond |
538 | */ | 546 | */ |
539 | if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages) | 547 | if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages) |
540 | return 1; | 548 | return 1; |
541 | return 0; | 549 | return 0; |
542 | } | 550 | } |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index fcfa3d7f5e7e..76e9d278c334 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -37,7 +37,7 @@ struct blk_queue_tags; | |||
37 | * used in one scatter-gather request. | 37 | * used in one scatter-gather request. |
38 | */ | 38 | */ |
39 | #define SG_NONE 0 | 39 | #define SG_NONE 0 |
40 | #define SG_ALL SCSI_MAX_SG_SEGMENTS | 40 | #define SG_ALL SG_CHUNK_SIZE |
41 | 41 | ||
42 | #define MODE_UNKNOWN 0x00 | 42 | #define MODE_UNKNOWN 0x00 |
43 | #define MODE_INITIATOR 0x01 | 43 | #define MODE_INITIATOR 0x01 |
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index c2ae21cbaa2c..d1defd1ebd95 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h | |||
@@ -115,6 +115,8 @@ | |||
115 | #define VERIFY_16 0x8f | 115 | #define VERIFY_16 0x8f |
116 | #define SYNCHRONIZE_CACHE_16 0x91 | 116 | #define SYNCHRONIZE_CACHE_16 0x91 |
117 | #define WRITE_SAME_16 0x93 | 117 | #define WRITE_SAME_16 0x93 |
118 | #define ZBC_OUT 0x94 | ||
119 | #define ZBC_IN 0x95 | ||
118 | #define SERVICE_ACTION_BIDIRECTIONAL 0x9d | 120 | #define SERVICE_ACTION_BIDIRECTIONAL 0x9d |
119 | #define SERVICE_ACTION_IN_16 0x9e | 121 | #define SERVICE_ACTION_IN_16 0x9e |
120 | #define SERVICE_ACTION_OUT_16 0x9f | 122 | #define SERVICE_ACTION_OUT_16 0x9f |
@@ -143,6 +145,13 @@ | |||
143 | #define MO_SET_PRIORITY 0x0e | 145 | #define MO_SET_PRIORITY 0x0e |
144 | #define MO_SET_TIMESTAMP 0x0f | 146 | #define MO_SET_TIMESTAMP 0x0f |
145 | #define MO_MANAGEMENT_PROTOCOL_OUT 0x10 | 147 | #define MO_MANAGEMENT_PROTOCOL_OUT 0x10 |
148 | /* values for ZBC_IN */ | ||
149 | #define ZI_REPORT_ZONES 0x00 | ||
150 | /* values for ZBC_OUT */ | ||
151 | #define ZO_CLOSE_ZONE 0x01 | ||
152 | #define ZO_FINISH_ZONE 0x02 | ||
153 | #define ZO_OPEN_ZONE 0x03 | ||
154 | #define ZO_RESET_WRITE_POINTER 0x04 | ||
146 | /* values for variable length command */ | 155 | /* values for variable length command */ |
147 | #define XDREAD_32 0x03 | 156 | #define XDREAD_32 0x03 |
148 | #define XDWRITE_32 0x04 | 157 | #define XDWRITE_32 0x04 |
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index 079bd10a01b4..9a9b3e2550af 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h | |||
@@ -94,11 +94,9 @@ | |||
94 | scsi_opcode_name(WRITE_16), \ | 94 | scsi_opcode_name(WRITE_16), \ |
95 | scsi_opcode_name(VERIFY_16), \ | 95 | scsi_opcode_name(VERIFY_16), \ |
96 | scsi_opcode_name(WRITE_SAME_16), \ | 96 | scsi_opcode_name(WRITE_SAME_16), \ |
97 | scsi_opcode_name(ZBC_OUT), \ | ||
98 | scsi_opcode_name(ZBC_IN), \ | ||
97 | scsi_opcode_name(SERVICE_ACTION_IN_16), \ | 99 | scsi_opcode_name(SERVICE_ACTION_IN_16), \ |
98 | scsi_opcode_name(SAI_READ_CAPACITY_16), \ | ||
99 | scsi_opcode_name(SAI_GET_LBA_STATUS), \ | ||
100 | scsi_opcode_name(MI_REPORT_TARGET_PGS), \ | ||
101 | scsi_opcode_name(MO_SET_TARGET_PGS), \ | ||
102 | scsi_opcode_name(READ_32), \ | 100 | scsi_opcode_name(READ_32), \ |
103 | scsi_opcode_name(WRITE_32), \ | 101 | scsi_opcode_name(WRITE_32), \ |
104 | scsi_opcode_name(WRITE_SAME_32), \ | 102 | scsi_opcode_name(WRITE_SAME_32), \ |
diff --git a/lib/Kconfig b/lib/Kconfig index 3cca1222578e..61d55bd0ed89 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -523,6 +523,13 @@ config SG_SPLIT | |||
523 | a scatterlist. This should be selected by a driver or an API which | 523 | a scatterlist. This should be selected by a driver or an API which |
524 | whishes to split a scatterlist amongst multiple DMA channels. | 524 | whishes to split a scatterlist amongst multiple DMA channels. |
525 | 525 | ||
526 | config SG_POOL | ||
527 | def_bool n | ||
528 | help | ||
529 | Provides a helper to allocate chained scatterlists. This should be | ||
530 | selected by a driver or an API which whishes to allocate chained | ||
531 | scatterlist. | ||
532 | |||
526 | # | 533 | # |
527 | # sg chaining option | 534 | # sg chaining option |
528 | # | 535 | # |
diff --git a/lib/Makefile b/lib/Makefile index a65e9a861535..931396ada5eb 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -178,6 +178,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o | |||
178 | obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o | 178 | obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o |
179 | 179 | ||
180 | obj-$(CONFIG_SG_SPLIT) += sg_split.o | 180 | obj-$(CONFIG_SG_SPLIT) += sg_split.o |
181 | obj-$(CONFIG_SG_POOL) += sg_pool.o | ||
181 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o | 182 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o |
182 | obj-$(CONFIG_IRQ_POLL) += irq_poll.o | 183 | obj-$(CONFIG_IRQ_POLL) += irq_poll.o |
183 | 184 | ||
diff --git a/lib/sg_pool.c b/lib/sg_pool.c new file mode 100644 index 000000000000..6dd30615a201 --- /dev/null +++ b/lib/sg_pool.c | |||
@@ -0,0 +1,172 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/scatterlist.h> | ||
3 | #include <linux/mempool.h> | ||
4 | #include <linux/slab.h> | ||
5 | |||
6 | #define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools) | ||
7 | #define SG_MEMPOOL_SIZE 2 | ||
8 | |||
9 | struct sg_pool { | ||
10 | size_t size; | ||
11 | char *name; | ||
12 | struct kmem_cache *slab; | ||
13 | mempool_t *pool; | ||
14 | }; | ||
15 | |||
16 | #define SP(x) { .size = x, "sgpool-" __stringify(x) } | ||
17 | #if (SG_CHUNK_SIZE < 32) | ||
18 | #error SG_CHUNK_SIZE is too small (must be 32 or greater) | ||
19 | #endif | ||
20 | static struct sg_pool sg_pools[] = { | ||
21 | SP(8), | ||
22 | SP(16), | ||
23 | #if (SG_CHUNK_SIZE > 32) | ||
24 | SP(32), | ||
25 | #if (SG_CHUNK_SIZE > 64) | ||
26 | SP(64), | ||
27 | #if (SG_CHUNK_SIZE > 128) | ||
28 | SP(128), | ||
29 | #if (SG_CHUNK_SIZE > 256) | ||
30 | #error SG_CHUNK_SIZE is too large (256 MAX) | ||
31 | #endif | ||
32 | #endif | ||
33 | #endif | ||
34 | #endif | ||
35 | SP(SG_CHUNK_SIZE) | ||
36 | }; | ||
37 | #undef SP | ||
38 | |||
39 | static inline unsigned int sg_pool_index(unsigned short nents) | ||
40 | { | ||
41 | unsigned int index; | ||
42 | |||
43 | BUG_ON(nents > SG_CHUNK_SIZE); | ||
44 | |||
45 | if (nents <= 8) | ||
46 | index = 0; | ||
47 | else | ||
48 | index = get_count_order(nents) - 3; | ||
49 | |||
50 | return index; | ||
51 | } | ||
52 | |||
53 | static void sg_pool_free(struct scatterlist *sgl, unsigned int nents) | ||
54 | { | ||
55 | struct sg_pool *sgp; | ||
56 | |||
57 | sgp = sg_pools + sg_pool_index(nents); | ||
58 | mempool_free(sgl, sgp->pool); | ||
59 | } | ||
60 | |||
61 | static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) | ||
62 | { | ||
63 | struct sg_pool *sgp; | ||
64 | |||
65 | sgp = sg_pools + sg_pool_index(nents); | ||
66 | return mempool_alloc(sgp->pool, gfp_mask); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * sg_free_table_chained - Free a previously mapped sg table | ||
71 | * @table: The sg table header to use | ||
72 | * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? | ||
73 | * | ||
74 | * Description: | ||
75 | * Free an sg table previously allocated and setup with | ||
76 | * sg_alloc_table_chained(). | ||
77 | * | ||
78 | **/ | ||
79 | void sg_free_table_chained(struct sg_table *table, bool first_chunk) | ||
80 | { | ||
81 | if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) | ||
82 | return; | ||
83 | __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(sg_free_table_chained); | ||
86 | |||
87 | /** | ||
88 | * sg_alloc_table_chained - Allocate and chain SGLs in an sg table | ||
89 | * @table: The sg table header to use | ||
90 | * @nents: Number of entries in sg list | ||
91 | * @first_chunk: first SGL | ||
92 | * | ||
93 | * Description: | ||
94 | * Allocate and chain SGLs in an sg table. If @nents@ is larger than | ||
95 | * SG_CHUNK_SIZE a chained sg table will be setup. | ||
96 | * | ||
97 | **/ | ||
98 | int sg_alloc_table_chained(struct sg_table *table, int nents, | ||
99 | struct scatterlist *first_chunk) | ||
100 | { | ||
101 | int ret; | ||
102 | |||
103 | BUG_ON(!nents); | ||
104 | |||
105 | if (first_chunk) { | ||
106 | if (nents <= SG_CHUNK_SIZE) { | ||
107 | table->nents = table->orig_nents = nents; | ||
108 | sg_init_table(table->sgl, nents); | ||
109 | return 0; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, | ||
114 | first_chunk, GFP_ATOMIC, sg_pool_alloc); | ||
115 | if (unlikely(ret)) | ||
116 | sg_free_table_chained(table, (bool)first_chunk); | ||
117 | return ret; | ||
118 | } | ||
119 | EXPORT_SYMBOL_GPL(sg_alloc_table_chained); | ||
120 | |||
121 | static __init int sg_pool_init(void) | ||
122 | { | ||
123 | int i; | ||
124 | |||
125 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
126 | struct sg_pool *sgp = sg_pools + i; | ||
127 | int size = sgp->size * sizeof(struct scatterlist); | ||
128 | |||
129 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | ||
130 | SLAB_HWCACHE_ALIGN, NULL); | ||
131 | if (!sgp->slab) { | ||
132 | printk(KERN_ERR "SG_POOL: can't init sg slab %s\n", | ||
133 | sgp->name); | ||
134 | goto cleanup_sdb; | ||
135 | } | ||
136 | |||
137 | sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, | ||
138 | sgp->slab); | ||
139 | if (!sgp->pool) { | ||
140 | printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n", | ||
141 | sgp->name); | ||
142 | goto cleanup_sdb; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | return 0; | ||
147 | |||
148 | cleanup_sdb: | ||
149 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
150 | struct sg_pool *sgp = sg_pools + i; | ||
151 | if (sgp->pool) | ||
152 | mempool_destroy(sgp->pool); | ||
153 | if (sgp->slab) | ||
154 | kmem_cache_destroy(sgp->slab); | ||
155 | } | ||
156 | |||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | static __exit void sg_pool_exit(void) | ||
161 | { | ||
162 | int i; | ||
163 | |||
164 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
165 | struct sg_pool *sgp = sg_pools + i; | ||
166 | mempool_destroy(sgp->pool); | ||
167 | kmem_cache_destroy(sgp->slab); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | module_init(sg_pool_init); | ||
172 | module_exit(sg_pool_exit); | ||