diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-04-14 11:51:33 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-04-14 11:51:33 -0400 |
commit | 85a3685852d9ac7d92be9d824533c915a4597fa4 (patch) | |
tree | b7c542e2061cf96c9f7ad500fa12567f9ff0b39f /drivers/scsi | |
parent | 92bac83dd79e60e65c475222e41a992a70434beb (diff) | |
parent | 8b8a518ef16be2de27207991e32fc32b0475c767 (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 4.1 merge window.
Diffstat (limited to 'drivers/scsi')
102 files changed, 5173 insertions, 3516 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index cd4129ff7ae4..7600639db4c4 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -608,7 +608,8 @@ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) | |||
608 | } | 608 | } |
609 | 609 | ||
610 | /* Load rest of compatibility struct */ | 610 | /* Load rest of compatibility struct */ |
611 | strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); | 611 | strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, |
612 | sizeof(tw_dev->tw_compat_info.driver_version)); | ||
612 | tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; | 613 | tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; |
613 | tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; | 614 | tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; |
614 | tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; | 615 | tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 8d66a6469e29..c7be7bb37209 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
@@ -3485,7 +3485,7 @@ static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3485 | seq_printf(m, "\n\ | 3485 | seq_printf(m, "\n\ |
3486 | Current Driver Queue Depth: %d\n\ | 3486 | Current Driver Queue Depth: %d\n\ |
3487 | Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); | 3487 | Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); |
3488 | seq_printf(m, "\n\n\ | 3488 | seq_puts(m, "\n\n\ |
3489 | DATA TRANSFER STATISTICS\n\ | 3489 | DATA TRANSFER STATISTICS\n\ |
3490 | \n\ | 3490 | \n\ |
3491 | Target Tagged Queuing Queue Depth Active Attempted Completed\n\ | 3491 | Target Tagged Queuing Queue Depth Active Attempted Completed\n\ |
@@ -3500,7 +3500,7 @@ Target Tagged Queuing Queue Depth Active Attempted Completed\n\ | |||
3500 | seq_printf(m, | 3500 | seq_printf(m, |
3501 | " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); | 3501 | " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); |
3502 | } | 3502 | } |
3503 | seq_printf(m, "\n\ | 3503 | seq_puts(m, "\n\ |
3504 | Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ | 3504 | Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ |
3505 | ====== ============= ============== =================== ===================\n"); | 3505 | ====== ============= ============== =================== ===================\n"); |
3506 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3506 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3517,7 +3517,7 @@ Target Read Commands Write Commands Total Bytes Read Total Bytes Written\ | |||
3517 | else | 3517 | else |
3518 | seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); | 3518 | seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); |
3519 | } | 3519 | } |
3520 | seq_printf(m, "\n\ | 3520 | seq_puts(m, "\n\ |
3521 | Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ | 3521 | Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ |
3522 | ====== ======= ========= ========= ========= ========= =========\n"); | 3522 | ====== ======= ========= ========= ========= ========= =========\n"); |
3523 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3523 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3533,7 +3533,7 @@ Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ | |||
3533 | tgt_stats[tgt].write_sz_buckets[0], | 3533 | tgt_stats[tgt].write_sz_buckets[0], |
3534 | tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); | 3534 | tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); |
3535 | } | 3535 | } |
3536 | seq_printf(m, "\n\ | 3536 | seq_puts(m, "\n\ |
3537 | Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ | 3537 | Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ |
3538 | ====== ======= ========= ========= ========= ========= =========\n"); | 3538 | ====== ======= ========= ========= ========= ========= =========\n"); |
3539 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3539 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3549,7 +3549,7 @@ Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ | |||
3549 | tgt_stats[tgt].write_sz_buckets[5], | 3549 | tgt_stats[tgt].write_sz_buckets[5], |
3550 | tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); | 3550 | tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); |
3551 | } | 3551 | } |
3552 | seq_printf(m, "\n\n\ | 3552 | seq_puts(m, "\n\n\ |
3553 | ERROR RECOVERY STATISTICS\n\ | 3553 | ERROR RECOVERY STATISTICS\n\ |
3554 | \n\ | 3554 | \n\ |
3555 | Command Aborts Bus Device Resets Host Adapter Resets\n\ | 3555 | Command Aborts Bus Device Resets Host Adapter Resets\n\ |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9c92f415229f..b021bcb88537 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -201,12 +201,12 @@ config SCSI_ENCLOSURE | |||
201 | certain enclosure conditions to be reported and is not required. | 201 | certain enclosure conditions to be reported and is not required. |
202 | 202 | ||
203 | config SCSI_CONSTANTS | 203 | config SCSI_CONSTANTS |
204 | bool "Verbose SCSI error reporting (kernel size +=12K)" | 204 | bool "Verbose SCSI error reporting (kernel size +=75K)" |
205 | depends on SCSI | 205 | depends on SCSI |
206 | help | 206 | help |
207 | The error messages regarding your SCSI hardware will be easier to | 207 | The error messages regarding your SCSI hardware will be easier to |
208 | understand if you say Y here; it will enlarge your kernel by about | 208 | understand if you say Y here; it will enlarge your kernel by about |
209 | 12 KB. If in doubt, say Y. | 209 | 75 KB. If in doubt, say Y. |
210 | 210 | ||
211 | config SCSI_LOGGING | 211 | config SCSI_LOGGING |
212 | bool "SCSI logging facility" | 212 | bool "SCSI logging facility" |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 58158f11ed7b..dee160a4f163 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -159,15 +159,15 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ | |||
159 | 159 | ||
160 | # This goes last, so that "real" scsi devices probe earlier | 160 | # This goes last, so that "real" scsi devices probe earlier |
161 | obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o | 161 | obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o |
162 | 162 | scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ | |
163 | scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ | ||
164 | scsicam.o scsi_error.o scsi_lib.o | 163 | scsicam.o scsi_error.o scsi_lib.o |
164 | scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o | ||
165 | scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o | 165 | scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o |
166 | scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o | 166 | scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o |
167 | scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o | 167 | scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o |
168 | scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o | 168 | scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o |
169 | scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o | 169 | scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o |
170 | scsi_mod-y += scsi_trace.o | 170 | scsi_mod-y += scsi_trace.o scsi_logging.o |
171 | scsi_mod-$(CONFIG_PM) += scsi_pm.o | 171 | scsi_mod-$(CONFIG_PM) += scsi_pm.o |
172 | 172 | ||
173 | hv_storvsc-y := storvsc_drv.o | 173 | hv_storvsc-y := storvsc_drv.o |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 36244d63def2..8981701802ca 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -716,8 +716,6 @@ static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, | |||
716 | } | 716 | } |
717 | #endif | 717 | #endif |
718 | 718 | ||
719 | #undef SPRINTF | ||
720 | #define SPRINTF(args...) seq_printf(m, ## args) | ||
721 | static | 719 | static |
722 | void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); | 720 | void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); |
723 | static | 721 | static |
@@ -734,19 +732,19 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, | |||
734 | hostdata = (struct NCR5380_hostdata *) instance->hostdata; | 732 | hostdata = (struct NCR5380_hostdata *) instance->hostdata; |
735 | 733 | ||
736 | #ifdef PSEUDO_DMA | 734 | #ifdef PSEUDO_DMA |
737 | SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n", | 735 | seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", |
738 | hostdata->spin_max_w, hostdata->spin_max_r); | 736 | hostdata->spin_max_w, hostdata->spin_max_r); |
739 | #endif | 737 | #endif |
740 | spin_lock_irq(instance->host_lock); | 738 | spin_lock_irq(instance->host_lock); |
741 | if (!hostdata->connected) | 739 | if (!hostdata->connected) |
742 | SPRINTF("scsi%d: no currently connected command\n", instance->host_no); | 740 | seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no); |
743 | else | 741 | else |
744 | lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); | 742 | lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); |
745 | SPRINTF("scsi%d: issue_queue\n", instance->host_no); | 743 | seq_printf(m, "scsi%d: issue_queue\n", instance->host_no); |
746 | for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) | 744 | for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) |
747 | lprint_Scsi_Cmnd(ptr, m); | 745 | lprint_Scsi_Cmnd(ptr, m); |
748 | 746 | ||
749 | SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); | 747 | seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no); |
750 | for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) | 748 | for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) |
751 | lprint_Scsi_Cmnd(ptr, m); | 749 | lprint_Scsi_Cmnd(ptr, m); |
752 | spin_unlock_irq(instance->host_lock); | 750 | spin_unlock_irq(instance->host_lock); |
@@ -755,8 +753,8 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, | |||
755 | 753 | ||
756 | static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) | 754 | static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) |
757 | { | 755 | { |
758 | SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); | 756 | seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); |
759 | SPRINTF(" command = "); | 757 | seq_puts(m, " command = "); |
760 | lprint_command(cmd->cmnd, m); | 758 | lprint_command(cmd->cmnd, m); |
761 | } | 759 | } |
762 | 760 | ||
@@ -765,13 +763,13 @@ static void lprint_command(unsigned char *command, struct seq_file *m) | |||
765 | int i, s; | 763 | int i, s; |
766 | lprint_opcode(command[0], m); | 764 | lprint_opcode(command[0], m); |
767 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) | 765 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) |
768 | SPRINTF("%02x ", command[i]); | 766 | seq_printf(m, "%02x ", command[i]); |
769 | SPRINTF("\n"); | 767 | seq_putc(m, '\n'); |
770 | } | 768 | } |
771 | 769 | ||
772 | static void lprint_opcode(int opcode, struct seq_file *m) | 770 | static void lprint_opcode(int opcode, struct seq_file *m) |
773 | { | 771 | { |
774 | SPRINTF("%2d (0x%02x)", opcode, opcode); | 772 | seq_printf(m, "%2d (0x%02x)", opcode, opcode); |
775 | } | 773 | } |
776 | 774 | ||
777 | 775 | ||
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 2c5ce48c8f95..ae95e347f37d 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -2880,7 +2880,7 @@ static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost) | |||
2880 | chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; | 2880 | chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; |
2881 | } | 2881 | } |
2882 | 2882 | ||
2883 | seq_printf(m, "Target IDs Detected:"); | 2883 | seq_puts(m, "Target IDs Detected:"); |
2884 | for (i = 0; i <= ADV_MAX_TID; i++) { | 2884 | for (i = 0; i <= ADV_MAX_TID; i++) { |
2885 | if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) | 2885 | if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) |
2886 | seq_printf(m, " %X,", i); | 2886 | seq_printf(m, " %X,", i); |
@@ -2896,18 +2896,16 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) | |||
2896 | struct asc_board *boardp = shost_priv(shost); | 2896 | struct asc_board *boardp = shost_priv(shost); |
2897 | ushort major, minor, letter; | 2897 | ushort major, minor, letter; |
2898 | 2898 | ||
2899 | seq_printf(m, "\nROM BIOS Version: "); | 2899 | seq_puts(m, "\nROM BIOS Version: "); |
2900 | 2900 | ||
2901 | /* | 2901 | /* |
2902 | * If the BIOS saved a valid signature, then fill in | 2902 | * If the BIOS saved a valid signature, then fill in |
2903 | * the BIOS code segment base address. | 2903 | * the BIOS code segment base address. |
2904 | */ | 2904 | */ |
2905 | if (boardp->bios_signature != 0x55AA) { | 2905 | if (boardp->bios_signature != 0x55AA) { |
2906 | seq_printf(m, "Disabled or Pre-3.1\n"); | 2906 | seq_puts(m, "Disabled or Pre-3.1\n" |
2907 | seq_printf(m, | 2907 | "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n" |
2908 | "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); | 2908 | "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); |
2909 | seq_printf(m, | ||
2910 | "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); | ||
2911 | } else { | 2909 | } else { |
2912 | major = (boardp->bios_version >> 12) & 0xF; | 2910 | major = (boardp->bios_version >> 12) & 0xF; |
2913 | minor = (boardp->bios_version >> 8) & 0xF; | 2911 | minor = (boardp->bios_version >> 8) & 0xF; |
@@ -2923,10 +2921,8 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) | |||
2923 | */ | 2921 | */ |
2924 | if (major < 3 || (major <= 3 && minor < 1) || | 2922 | if (major < 3 || (major <= 3 && minor < 1) || |
2925 | (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { | 2923 | (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { |
2926 | seq_printf(m, | 2924 | seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n" |
2927 | "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); | 2925 | "ftp://ftp.connectcom.net/pub\n"); |
2928 | seq_printf(m, | ||
2929 | "ftp://ftp.connectcom.net/pub\n"); | ||
2930 | } | 2926 | } |
2931 | } | 2927 | } |
2932 | } | 2928 | } |
@@ -3056,11 +3052,10 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3056 | == ASC_TRUE) | 3052 | == ASC_TRUE) |
3057 | seq_printf(m, " Serial Number: %s\n", serialstr); | 3053 | seq_printf(m, " Serial Number: %s\n", serialstr); |
3058 | else if (ep->adapter_info[5] == 0xBB) | 3054 | else if (ep->adapter_info[5] == 0xBB) |
3059 | seq_printf(m, | 3055 | seq_puts(m, |
3060 | " Default Settings Used for EEPROM-less Adapter.\n"); | 3056 | " Default Settings Used for EEPROM-less Adapter.\n"); |
3061 | else | 3057 | else |
3062 | seq_printf(m, | 3058 | seq_puts(m, " Serial Number Signature Not Present.\n"); |
3063 | " Serial Number Signature Not Present.\n"); | ||
3064 | 3059 | ||
3065 | seq_printf(m, | 3060 | seq_printf(m, |
3066 | " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", | 3061 | " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", |
@@ -3070,34 +3065,30 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3070 | seq_printf(m, | 3065 | seq_printf(m, |
3071 | " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); | 3066 | " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); |
3072 | 3067 | ||
3073 | seq_printf(m, " Target ID: "); | 3068 | seq_puts(m, " Target ID: "); |
3074 | for (i = 0; i <= ASC_MAX_TID; i++) | 3069 | for (i = 0; i <= ASC_MAX_TID; i++) |
3075 | seq_printf(m, " %d", i); | 3070 | seq_printf(m, " %d", i); |
3076 | seq_printf(m, "\n"); | ||
3077 | 3071 | ||
3078 | seq_printf(m, " Disconnects: "); | 3072 | seq_puts(m, "\n Disconnects: "); |
3079 | for (i = 0; i <= ASC_MAX_TID; i++) | 3073 | for (i = 0; i <= ASC_MAX_TID; i++) |
3080 | seq_printf(m, " %c", | 3074 | seq_printf(m, " %c", |
3081 | (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3075 | (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3082 | seq_printf(m, "\n"); | ||
3083 | 3076 | ||
3084 | seq_printf(m, " Command Queuing: "); | 3077 | seq_puts(m, "\n Command Queuing: "); |
3085 | for (i = 0; i <= ASC_MAX_TID; i++) | 3078 | for (i = 0; i <= ASC_MAX_TID; i++) |
3086 | seq_printf(m, " %c", | 3079 | seq_printf(m, " %c", |
3087 | (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3080 | (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3088 | seq_printf(m, "\n"); | ||
3089 | 3081 | ||
3090 | seq_printf(m, " Start Motor: "); | 3082 | seq_puts(m, "\n Start Motor: "); |
3091 | for (i = 0; i <= ASC_MAX_TID; i++) | 3083 | for (i = 0; i <= ASC_MAX_TID; i++) |
3092 | seq_printf(m, " %c", | 3084 | seq_printf(m, " %c", |
3093 | (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3085 | (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3094 | seq_printf(m, "\n"); | ||
3095 | 3086 | ||
3096 | seq_printf(m, " Synchronous Transfer:"); | 3087 | seq_puts(m, "\n Synchronous Transfer:"); |
3097 | for (i = 0; i <= ASC_MAX_TID; i++) | 3088 | for (i = 0; i <= ASC_MAX_TID; i++) |
3098 | seq_printf(m, " %c", | 3089 | seq_printf(m, " %c", |
3099 | (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3090 | (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3100 | seq_printf(m, "\n"); | 3091 | seq_putc(m, '\n'); |
3101 | 3092 | ||
3102 | #ifdef CONFIG_ISA | 3093 | #ifdef CONFIG_ISA |
3103 | if (asc_dvc_varp->bus_type & ASC_IS_ISA) { | 3094 | if (asc_dvc_varp->bus_type & ASC_IS_ISA) { |
@@ -3151,7 +3142,7 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3151 | if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) | 3142 | if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) |
3152 | seq_printf(m, " Serial Number: %s\n", serialstr); | 3143 | seq_printf(m, " Serial Number: %s\n", serialstr); |
3153 | else | 3144 | else |
3154 | seq_printf(m, " Serial Number Signature Not Present.\n"); | 3145 | seq_puts(m, " Serial Number Signature Not Present.\n"); |
3155 | 3146 | ||
3156 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) | 3147 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) |
3157 | seq_printf(m, | 3148 | seq_printf(m, |
@@ -3209,10 +3200,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3209 | ep_38C1600->termination_lvd, termstr, | 3200 | ep_38C1600->termination_lvd, termstr, |
3210 | ep_38C1600->bios_ctrl); | 3201 | ep_38C1600->bios_ctrl); |
3211 | 3202 | ||
3212 | seq_printf(m, " Target ID: "); | 3203 | seq_puts(m, " Target ID: "); |
3213 | for (i = 0; i <= ADV_MAX_TID; i++) | 3204 | for (i = 0; i <= ADV_MAX_TID; i++) |
3214 | seq_printf(m, " %X", i); | 3205 | seq_printf(m, " %X", i); |
3215 | seq_printf(m, "\n"); | 3206 | seq_putc(m, '\n'); |
3216 | 3207 | ||
3217 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3208 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3218 | word = ep_3550->disc_enable; | 3209 | word = ep_3550->disc_enable; |
@@ -3221,11 +3212,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3221 | } else { | 3212 | } else { |
3222 | word = ep_38C1600->disc_enable; | 3213 | word = ep_38C1600->disc_enable; |
3223 | } | 3214 | } |
3224 | seq_printf(m, " Disconnects: "); | 3215 | seq_puts(m, " Disconnects: "); |
3225 | for (i = 0; i <= ADV_MAX_TID; i++) | 3216 | for (i = 0; i <= ADV_MAX_TID; i++) |
3226 | seq_printf(m, " %c", | 3217 | seq_printf(m, " %c", |
3227 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3218 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3228 | seq_printf(m, "\n"); | 3219 | seq_putc(m, '\n'); |
3229 | 3220 | ||
3230 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3221 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3231 | word = ep_3550->tagqng_able; | 3222 | word = ep_3550->tagqng_able; |
@@ -3234,11 +3225,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3234 | } else { | 3225 | } else { |
3235 | word = ep_38C1600->tagqng_able; | 3226 | word = ep_38C1600->tagqng_able; |
3236 | } | 3227 | } |
3237 | seq_printf(m, " Command Queuing: "); | 3228 | seq_puts(m, " Command Queuing: "); |
3238 | for (i = 0; i <= ADV_MAX_TID; i++) | 3229 | for (i = 0; i <= ADV_MAX_TID; i++) |
3239 | seq_printf(m, " %c", | 3230 | seq_printf(m, " %c", |
3240 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3231 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3241 | seq_printf(m, "\n"); | 3232 | seq_putc(m, '\n'); |
3242 | 3233 | ||
3243 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3234 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3244 | word = ep_3550->start_motor; | 3235 | word = ep_3550->start_motor; |
@@ -3247,28 +3238,28 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3247 | } else { | 3238 | } else { |
3248 | word = ep_38C1600->start_motor; | 3239 | word = ep_38C1600->start_motor; |
3249 | } | 3240 | } |
3250 | seq_printf(m, " Start Motor: "); | 3241 | seq_puts(m, " Start Motor: "); |
3251 | for (i = 0; i <= ADV_MAX_TID; i++) | 3242 | for (i = 0; i <= ADV_MAX_TID; i++) |
3252 | seq_printf(m, " %c", | 3243 | seq_printf(m, " %c", |
3253 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3244 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3254 | seq_printf(m, "\n"); | 3245 | seq_putc(m, '\n'); |
3255 | 3246 | ||
3256 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3247 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3257 | seq_printf(m, " Synchronous Transfer:"); | 3248 | seq_puts(m, " Synchronous Transfer:"); |
3258 | for (i = 0; i <= ADV_MAX_TID; i++) | 3249 | for (i = 0; i <= ADV_MAX_TID; i++) |
3259 | seq_printf(m, " %c", | 3250 | seq_printf(m, " %c", |
3260 | (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? | 3251 | (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? |
3261 | 'Y' : 'N'); | 3252 | 'Y' : 'N'); |
3262 | seq_printf(m, "\n"); | 3253 | seq_putc(m, '\n'); |
3263 | } | 3254 | } |
3264 | 3255 | ||
3265 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3256 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3266 | seq_printf(m, " Ultra Transfer: "); | 3257 | seq_puts(m, " Ultra Transfer: "); |
3267 | for (i = 0; i <= ADV_MAX_TID; i++) | 3258 | for (i = 0; i <= ADV_MAX_TID; i++) |
3268 | seq_printf(m, " %c", | 3259 | seq_printf(m, " %c", |
3269 | (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) | 3260 | (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) |
3270 | ? 'Y' : 'N'); | 3261 | ? 'Y' : 'N'); |
3271 | seq_printf(m, "\n"); | 3262 | seq_putc(m, '\n'); |
3272 | } | 3263 | } |
3273 | 3264 | ||
3274 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3265 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
@@ -3278,16 +3269,15 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3278 | } else { | 3269 | } else { |
3279 | word = ep_38C1600->wdtr_able; | 3270 | word = ep_38C1600->wdtr_able; |
3280 | } | 3271 | } |
3281 | seq_printf(m, " Wide Transfer: "); | 3272 | seq_puts(m, " Wide Transfer: "); |
3282 | for (i = 0; i <= ADV_MAX_TID; i++) | 3273 | for (i = 0; i <= ADV_MAX_TID; i++) |
3283 | seq_printf(m, " %c", | 3274 | seq_printf(m, " %c", |
3284 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3275 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3285 | seq_printf(m, "\n"); | 3276 | seq_putc(m, '\n'); |
3286 | 3277 | ||
3287 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || | 3278 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || |
3288 | adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { | 3279 | adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { |
3289 | seq_printf(m, | 3280 | seq_puts(m, " Synchronous Transfer Speed (Mhz):\n "); |
3290 | " Synchronous Transfer Speed (Mhz):\n "); | ||
3291 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3281 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3292 | char *speed_str; | 3282 | char *speed_str; |
3293 | 3283 | ||
@@ -3325,10 +3315,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3325 | } | 3315 | } |
3326 | seq_printf(m, "%X:%s ", i, speed_str); | 3316 | seq_printf(m, "%X:%s ", i, speed_str); |
3327 | if (i == 7) | 3317 | if (i == 7) |
3328 | seq_printf(m, "\n "); | 3318 | seq_puts(m, "\n "); |
3329 | sdtr_speed >>= 4; | 3319 | sdtr_speed >>= 4; |
3330 | } | 3320 | } |
3331 | seq_printf(m, "\n"); | 3321 | seq_putc(m, '\n'); |
3332 | } | 3322 | } |
3333 | } | 3323 | } |
3334 | 3324 | ||
@@ -3403,7 +3393,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3403 | seq_printf(m, | 3393 | seq_printf(m, |
3404 | " Total Command Pending: %d\n", v->cur_total_qng); | 3394 | " Total Command Pending: %d\n", v->cur_total_qng); |
3405 | 3395 | ||
3406 | seq_printf(m, " Command Queuing:"); | 3396 | seq_puts(m, " Command Queuing:"); |
3407 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3397 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3408 | if ((chip_scsi_id == i) || | 3398 | if ((chip_scsi_id == i) || |
3409 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3399 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3413,10 +3403,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3413 | i, | 3403 | i, |
3414 | (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3404 | (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3415 | } | 3405 | } |
3416 | seq_printf(m, "\n"); | ||
3417 | 3406 | ||
3418 | /* Current number of commands waiting for a device. */ | 3407 | /* Current number of commands waiting for a device. */ |
3419 | seq_printf(m, " Command Queue Pending:"); | 3408 | seq_puts(m, "\n Command Queue Pending:"); |
3420 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3409 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3421 | if ((chip_scsi_id == i) || | 3410 | if ((chip_scsi_id == i) || |
3422 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3411 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3424,10 +3413,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3424 | } | 3413 | } |
3425 | seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); | 3414 | seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); |
3426 | } | 3415 | } |
3427 | seq_printf(m, "\n"); | ||
3428 | 3416 | ||
3429 | /* Current limit on number of commands that can be sent to a device. */ | 3417 | /* Current limit on number of commands that can be sent to a device. */ |
3430 | seq_printf(m, " Command Queue Limit:"); | 3418 | seq_puts(m, "\n Command Queue Limit:"); |
3431 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3419 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3432 | if ((chip_scsi_id == i) || | 3420 | if ((chip_scsi_id == i) || |
3433 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3421 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3435,10 +3423,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3435 | } | 3423 | } |
3436 | seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); | 3424 | seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); |
3437 | } | 3425 | } |
3438 | seq_printf(m, "\n"); | ||
3439 | 3426 | ||
3440 | /* Indicate whether the device has returned queue full status. */ | 3427 | /* Indicate whether the device has returned queue full status. */ |
3441 | seq_printf(m, " Command Queue Full:"); | 3428 | seq_puts(m, "\n Command Queue Full:"); |
3442 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3429 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3443 | if ((chip_scsi_id == i) || | 3430 | if ((chip_scsi_id == i) || |
3444 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3431 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3450,9 +3437,8 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3450 | else | 3437 | else |
3451 | seq_printf(m, " %X:N", i); | 3438 | seq_printf(m, " %X:N", i); |
3452 | } | 3439 | } |
3453 | seq_printf(m, "\n"); | ||
3454 | 3440 | ||
3455 | seq_printf(m, " Synchronous Transfer:"); | 3441 | seq_puts(m, "\n Synchronous Transfer:"); |
3456 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3442 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3457 | if ((chip_scsi_id == i) || | 3443 | if ((chip_scsi_id == i) || |
3458 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3444 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3462,7 +3448,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3462 | i, | 3448 | i, |
3463 | (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3449 | (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3464 | } | 3450 | } |
3465 | seq_printf(m, "\n"); | 3451 | seq_putc(m, '\n'); |
3466 | 3452 | ||
3467 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3453 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3468 | uchar syn_period_ix; | 3454 | uchar syn_period_ix; |
@@ -3476,7 +3462,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3476 | seq_printf(m, " %X:", i); | 3462 | seq_printf(m, " %X:", i); |
3477 | 3463 | ||
3478 | if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { | 3464 | if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { |
3479 | seq_printf(m, " Asynchronous"); | 3465 | seq_puts(m, " Asynchronous"); |
3480 | } else { | 3466 | } else { |
3481 | syn_period_ix = | 3467 | syn_period_ix = |
3482 | (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - | 3468 | (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - |
@@ -3494,16 +3480,15 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3494 | } | 3480 | } |
3495 | 3481 | ||
3496 | if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3482 | if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3497 | seq_printf(m, "*\n"); | 3483 | seq_puts(m, "*\n"); |
3498 | renegotiate = 1; | 3484 | renegotiate = 1; |
3499 | } else { | 3485 | } else { |
3500 | seq_printf(m, "\n"); | 3486 | seq_putc(m, '\n'); |
3501 | } | 3487 | } |
3502 | } | 3488 | } |
3503 | 3489 | ||
3504 | if (renegotiate) { | 3490 | if (renegotiate) { |
3505 | seq_printf(m, | 3491 | seq_puts(m, " * = Re-negotiation pending before next command.\n"); |
3506 | " * = Re-negotiation pending before next command.\n"); | ||
3507 | } | 3492 | } |
3508 | } | 3493 | } |
3509 | 3494 | ||
@@ -3548,7 +3533,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3548 | c->mcode_date, c->mcode_version); | 3533 | c->mcode_date, c->mcode_version); |
3549 | 3534 | ||
3550 | AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); | 3535 | AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); |
3551 | seq_printf(m, " Queuing Enabled:"); | 3536 | seq_puts(m, " Queuing Enabled:"); |
3552 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3537 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3553 | if ((chip_scsi_id == i) || | 3538 | if ((chip_scsi_id == i) || |
3554 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3539 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3559,9 +3544,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3559 | i, | 3544 | i, |
3560 | (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3545 | (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3561 | } | 3546 | } |
3562 | seq_printf(m, "\n"); | ||
3563 | 3547 | ||
3564 | seq_printf(m, " Queue Limit:"); | 3548 | seq_puts(m, "\n Queue Limit:"); |
3565 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3549 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3566 | if ((chip_scsi_id == i) || | 3550 | if ((chip_scsi_id == i) || |
3567 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3551 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3573,9 +3557,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3573 | 3557 | ||
3574 | seq_printf(m, " %X:%d", i, lrambyte); | 3558 | seq_printf(m, " %X:%d", i, lrambyte); |
3575 | } | 3559 | } |
3576 | seq_printf(m, "\n"); | ||
3577 | 3560 | ||
3578 | seq_printf(m, " Command Pending:"); | 3561 | seq_puts(m, "\n Command Pending:"); |
3579 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3562 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3580 | if ((chip_scsi_id == i) || | 3563 | if ((chip_scsi_id == i) || |
3581 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3564 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3587,10 +3570,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3587 | 3570 | ||
3588 | seq_printf(m, " %X:%d", i, lrambyte); | 3571 | seq_printf(m, " %X:%d", i, lrambyte); |
3589 | } | 3572 | } |
3590 | seq_printf(m, "\n"); | 3573 | seq_putc(m, '\n'); |
3591 | 3574 | ||
3592 | AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); | 3575 | AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); |
3593 | seq_printf(m, " Wide Enabled:"); | 3576 | seq_puts(m, " Wide Enabled:"); |
3594 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3577 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3595 | if ((chip_scsi_id == i) || | 3578 | if ((chip_scsi_id == i) || |
3596 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3579 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3601,10 +3584,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3601 | i, | 3584 | i, |
3602 | (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3585 | (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3603 | } | 3586 | } |
3604 | seq_printf(m, "\n"); | 3587 | seq_putc(m, '\n'); |
3605 | 3588 | ||
3606 | AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); | 3589 | AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); |
3607 | seq_printf(m, " Transfer Bit Width:"); | 3590 | seq_puts(m, " Transfer Bit Width:"); |
3608 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3591 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3609 | if ((chip_scsi_id == i) || | 3592 | if ((chip_scsi_id == i) || |
3610 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3593 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3620,14 +3603,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3620 | 3603 | ||
3621 | if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && | 3604 | if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && |
3622 | (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3605 | (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3623 | seq_printf(m, "*"); | 3606 | seq_putc(m, '*'); |
3624 | renegotiate = 1; | 3607 | renegotiate = 1; |
3625 | } | 3608 | } |
3626 | } | 3609 | } |
3627 | seq_printf(m, "\n"); | 3610 | seq_putc(m, '\n'); |
3628 | 3611 | ||
3629 | AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); | 3612 | AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); |
3630 | seq_printf(m, " Synchronous Enabled:"); | 3613 | seq_puts(m, " Synchronous Enabled:"); |
3631 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3614 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3632 | if ((chip_scsi_id == i) || | 3615 | if ((chip_scsi_id == i) || |
3633 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3616 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3638,7 +3621,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3638 | i, | 3621 | i, |
3639 | (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3622 | (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3640 | } | 3623 | } |
3641 | seq_printf(m, "\n"); | 3624 | seq_putc(m, '\n'); |
3642 | 3625 | ||
3643 | AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); | 3626 | AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); |
3644 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3627 | for (i = 0; i <= ADV_MAX_TID; i++) { |
@@ -3657,14 +3640,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3657 | seq_printf(m, " %X:", i); | 3640 | seq_printf(m, " %X:", i); |
3658 | 3641 | ||
3659 | if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ | 3642 | if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ |
3660 | seq_printf(m, " Asynchronous"); | 3643 | seq_puts(m, " Asynchronous"); |
3661 | } else { | 3644 | } else { |
3662 | seq_printf(m, " Transfer Period Factor: "); | 3645 | seq_puts(m, " Transfer Period Factor: "); |
3663 | 3646 | ||
3664 | if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ | 3647 | if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ |
3665 | seq_printf(m, "9 (80.0 Mhz),"); | 3648 | seq_puts(m, "9 (80.0 Mhz),"); |
3666 | } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ | 3649 | } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ |
3667 | seq_printf(m, "10 (40.0 Mhz),"); | 3650 | seq_puts(m, "10 (40.0 Mhz),"); |
3668 | } else { /* 20 Mhz or below. */ | 3651 | } else { /* 20 Mhz or below. */ |
3669 | 3652 | ||
3670 | period = (((lramword >> 8) * 25) + 50) / 4; | 3653 | period = (((lramword >> 8) * 25) + 50) / 4; |
@@ -3684,16 +3667,15 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3684 | } | 3667 | } |
3685 | 3668 | ||
3686 | if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3669 | if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3687 | seq_printf(m, "*\n"); | 3670 | seq_puts(m, "*\n"); |
3688 | renegotiate = 1; | 3671 | renegotiate = 1; |
3689 | } else { | 3672 | } else { |
3690 | seq_printf(m, "\n"); | 3673 | seq_putc(m, '\n'); |
3691 | } | 3674 | } |
3692 | } | 3675 | } |
3693 | 3676 | ||
3694 | if (renegotiate) { | 3677 | if (renegotiate) { |
3695 | seq_printf(m, | 3678 | seq_puts(m, " * = Re-negotiation pending before next command.\n"); |
3696 | " * = Re-negotiation pending before next command.\n"); | ||
3697 | } | 3679 | } |
3698 | } | 3680 | } |
3699 | 3681 | ||
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 2b960b326daf..e31c460a1335 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -2490,299 +2490,296 @@ static void show_queues(struct Scsi_Host *shpnt) | |||
2490 | disp_enintr(shpnt); | 2490 | disp_enintr(shpnt); |
2491 | } | 2491 | } |
2492 | 2492 | ||
2493 | #undef SPRINTF | ||
2494 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
2495 | |||
2496 | static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) | 2493 | static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) |
2497 | { | 2494 | { |
2498 | int i; | 2495 | int i; |
2499 | 2496 | ||
2500 | SPRINTF("%p: target=%d; lun=%d; cmnd=( ", | 2497 | seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", |
2501 | ptr, ptr->device->id, (u8)ptr->device->lun); | 2498 | ptr, ptr->device->id, (u8)ptr->device->lun); |
2502 | 2499 | ||
2503 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) | 2500 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) |
2504 | SPRINTF("0x%02x ", ptr->cmnd[i]); | 2501 | seq_printf(m, "0x%02x ", ptr->cmnd[i]); |
2505 | 2502 | ||
2506 | SPRINTF("); resid=%d; residual=%d; buffers=%d; phase |", | 2503 | seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", |
2507 | scsi_get_resid(ptr), ptr->SCp.this_residual, | 2504 | scsi_get_resid(ptr), ptr->SCp.this_residual, |
2508 | ptr->SCp.buffers_residual); | 2505 | ptr->SCp.buffers_residual); |
2509 | 2506 | ||
2510 | if (ptr->SCp.phase & not_issued) | 2507 | if (ptr->SCp.phase & not_issued) |
2511 | SPRINTF("not issued|"); | 2508 | seq_puts(m, "not issued|"); |
2512 | if (ptr->SCp.phase & selecting) | 2509 | if (ptr->SCp.phase & selecting) |
2513 | SPRINTF("selecting|"); | 2510 | seq_puts(m, "selecting|"); |
2514 | if (ptr->SCp.phase & disconnected) | 2511 | if (ptr->SCp.phase & disconnected) |
2515 | SPRINTF("disconnected|"); | 2512 | seq_puts(m, "disconnected|"); |
2516 | if (ptr->SCp.phase & aborted) | 2513 | if (ptr->SCp.phase & aborted) |
2517 | SPRINTF("aborted|"); | 2514 | seq_puts(m, "aborted|"); |
2518 | if (ptr->SCp.phase & identified) | 2515 | if (ptr->SCp.phase & identified) |
2519 | SPRINTF("identified|"); | 2516 | seq_puts(m, "identified|"); |
2520 | if (ptr->SCp.phase & completed) | 2517 | if (ptr->SCp.phase & completed) |
2521 | SPRINTF("completed|"); | 2518 | seq_puts(m, "completed|"); |
2522 | if (ptr->SCp.phase & spiordy) | 2519 | if (ptr->SCp.phase & spiordy) |
2523 | SPRINTF("spiordy|"); | 2520 | seq_puts(m, "spiordy|"); |
2524 | if (ptr->SCp.phase & syncneg) | 2521 | if (ptr->SCp.phase & syncneg) |
2525 | SPRINTF("syncneg|"); | 2522 | seq_puts(m, "syncneg|"); |
2526 | SPRINTF("; next=0x%p\n", SCNEXT(ptr)); | 2523 | seq_printf(m, "; next=0x%p\n", SCNEXT(ptr)); |
2527 | } | 2524 | } |
2528 | 2525 | ||
2529 | static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) | 2526 | static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) |
2530 | { | 2527 | { |
2531 | int s; | 2528 | int s; |
2532 | 2529 | ||
2533 | SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); | 2530 | seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); |
2534 | 2531 | ||
2535 | s = GETPORT(SCSISEQ); | 2532 | s = GETPORT(SCSISEQ); |
2536 | SPRINTF("SCSISEQ( "); | 2533 | seq_puts(m, "SCSISEQ( "); |
2537 | if (s & TEMODEO) | 2534 | if (s & TEMODEO) |
2538 | SPRINTF("TARGET MODE "); | 2535 | seq_puts(m, "TARGET MODE "); |
2539 | if (s & ENSELO) | 2536 | if (s & ENSELO) |
2540 | SPRINTF("SELO "); | 2537 | seq_puts(m, "SELO "); |
2541 | if (s & ENSELI) | 2538 | if (s & ENSELI) |
2542 | SPRINTF("SELI "); | 2539 | seq_puts(m, "SELI "); |
2543 | if (s & ENRESELI) | 2540 | if (s & ENRESELI) |
2544 | SPRINTF("RESELI "); | 2541 | seq_puts(m, "RESELI "); |
2545 | if (s & ENAUTOATNO) | 2542 | if (s & ENAUTOATNO) |
2546 | SPRINTF("AUTOATNO "); | 2543 | seq_puts(m, "AUTOATNO "); |
2547 | if (s & ENAUTOATNI) | 2544 | if (s & ENAUTOATNI) |
2548 | SPRINTF("AUTOATNI "); | 2545 | seq_puts(m, "AUTOATNI "); |
2549 | if (s & ENAUTOATNP) | 2546 | if (s & ENAUTOATNP) |
2550 | SPRINTF("AUTOATNP "); | 2547 | seq_puts(m, "AUTOATNP "); |
2551 | if (s & SCSIRSTO) | 2548 | if (s & SCSIRSTO) |
2552 | SPRINTF("SCSIRSTO "); | 2549 | seq_puts(m, "SCSIRSTO "); |
2553 | SPRINTF(");"); | 2550 | seq_puts(m, ");"); |
2554 | 2551 | ||
2555 | SPRINTF(" SCSISIG("); | 2552 | seq_puts(m, " SCSISIG("); |
2556 | s = GETPORT(SCSISIG); | 2553 | s = GETPORT(SCSISIG); |
2557 | switch (s & P_MASK) { | 2554 | switch (s & P_MASK) { |
2558 | case P_DATAO: | 2555 | case P_DATAO: |
2559 | SPRINTF("DATA OUT"); | 2556 | seq_puts(m, "DATA OUT"); |
2560 | break; | 2557 | break; |
2561 | case P_DATAI: | 2558 | case P_DATAI: |
2562 | SPRINTF("DATA IN"); | 2559 | seq_puts(m, "DATA IN"); |
2563 | break; | 2560 | break; |
2564 | case P_CMD: | 2561 | case P_CMD: |
2565 | SPRINTF("COMMAND"); | 2562 | seq_puts(m, "COMMAND"); |
2566 | break; | 2563 | break; |
2567 | case P_STATUS: | 2564 | case P_STATUS: |
2568 | SPRINTF("STATUS"); | 2565 | seq_puts(m, "STATUS"); |
2569 | break; | 2566 | break; |
2570 | case P_MSGO: | 2567 | case P_MSGO: |
2571 | SPRINTF("MESSAGE OUT"); | 2568 | seq_puts(m, "MESSAGE OUT"); |
2572 | break; | 2569 | break; |
2573 | case P_MSGI: | 2570 | case P_MSGI: |
2574 | SPRINTF("MESSAGE IN"); | 2571 | seq_puts(m, "MESSAGE IN"); |
2575 | break; | 2572 | break; |
2576 | default: | 2573 | default: |
2577 | SPRINTF("*invalid*"); | 2574 | seq_puts(m, "*invalid*"); |
2578 | break; | 2575 | break; |
2579 | } | 2576 | } |
2580 | 2577 | ||
2581 | SPRINTF("); "); | 2578 | seq_puts(m, "); "); |
2582 | 2579 | ||
2583 | SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); | 2580 | seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); |
2584 | 2581 | ||
2585 | SPRINTF("SSTAT( "); | 2582 | seq_puts(m, "SSTAT( "); |
2586 | s = GETPORT(SSTAT0); | 2583 | s = GETPORT(SSTAT0); |
2587 | if (s & TARGET) | 2584 | if (s & TARGET) |
2588 | SPRINTF("TARGET "); | 2585 | seq_puts(m, "TARGET "); |
2589 | if (s & SELDO) | 2586 | if (s & SELDO) |
2590 | SPRINTF("SELDO "); | 2587 | seq_puts(m, "SELDO "); |
2591 | if (s & SELDI) | 2588 | if (s & SELDI) |
2592 | SPRINTF("SELDI "); | 2589 | seq_puts(m, "SELDI "); |
2593 | if (s & SELINGO) | 2590 | if (s & SELINGO) |
2594 | SPRINTF("SELINGO "); | 2591 | seq_puts(m, "SELINGO "); |
2595 | if (s & SWRAP) | 2592 | if (s & SWRAP) |
2596 | SPRINTF("SWRAP "); | 2593 | seq_puts(m, "SWRAP "); |
2597 | if (s & SDONE) | 2594 | if (s & SDONE) |
2598 | SPRINTF("SDONE "); | 2595 | seq_puts(m, "SDONE "); |
2599 | if (s & SPIORDY) | 2596 | if (s & SPIORDY) |
2600 | SPRINTF("SPIORDY "); | 2597 | seq_puts(m, "SPIORDY "); |
2601 | if (s & DMADONE) | 2598 | if (s & DMADONE) |
2602 | SPRINTF("DMADONE "); | 2599 | seq_puts(m, "DMADONE "); |
2603 | 2600 | ||
2604 | s = GETPORT(SSTAT1); | 2601 | s = GETPORT(SSTAT1); |
2605 | if (s & SELTO) | 2602 | if (s & SELTO) |
2606 | SPRINTF("SELTO "); | 2603 | seq_puts(m, "SELTO "); |
2607 | if (s & ATNTARG) | 2604 | if (s & ATNTARG) |
2608 | SPRINTF("ATNTARG "); | 2605 | seq_puts(m, "ATNTARG "); |
2609 | if (s & SCSIRSTI) | 2606 | if (s & SCSIRSTI) |
2610 | SPRINTF("SCSIRSTI "); | 2607 | seq_puts(m, "SCSIRSTI "); |
2611 | if (s & PHASEMIS) | 2608 | if (s & PHASEMIS) |
2612 | SPRINTF("PHASEMIS "); | 2609 | seq_puts(m, "PHASEMIS "); |
2613 | if (s & BUSFREE) | 2610 | if (s & BUSFREE) |
2614 | SPRINTF("BUSFREE "); | 2611 | seq_puts(m, "BUSFREE "); |
2615 | if (s & SCSIPERR) | 2612 | if (s & SCSIPERR) |
2616 | SPRINTF("SCSIPERR "); | 2613 | seq_puts(m, "SCSIPERR "); |
2617 | if (s & PHASECHG) | 2614 | if (s & PHASECHG) |
2618 | SPRINTF("PHASECHG "); | 2615 | seq_puts(m, "PHASECHG "); |
2619 | if (s & REQINIT) | 2616 | if (s & REQINIT) |
2620 | SPRINTF("REQINIT "); | 2617 | seq_puts(m, "REQINIT "); |
2621 | SPRINTF("); "); | 2618 | seq_puts(m, "); "); |
2622 | 2619 | ||
2623 | 2620 | ||
2624 | SPRINTF("SSTAT( "); | 2621 | seq_puts(m, "SSTAT( "); |
2625 | 2622 | ||
2626 | s = GETPORT(SSTAT0) & GETPORT(SIMODE0); | 2623 | s = GETPORT(SSTAT0) & GETPORT(SIMODE0); |
2627 | 2624 | ||
2628 | if (s & TARGET) | 2625 | if (s & TARGET) |
2629 | SPRINTF("TARGET "); | 2626 | seq_puts(m, "TARGET "); |
2630 | if (s & SELDO) | 2627 | if (s & SELDO) |
2631 | SPRINTF("SELDO "); | 2628 | seq_puts(m, "SELDO "); |
2632 | if (s & SELDI) | 2629 | if (s & SELDI) |
2633 | SPRINTF("SELDI "); | 2630 | seq_puts(m, "SELDI "); |
2634 | if (s & SELINGO) | 2631 | if (s & SELINGO) |
2635 | SPRINTF("SELINGO "); | 2632 | seq_puts(m, "SELINGO "); |
2636 | if (s & SWRAP) | 2633 | if (s & SWRAP) |
2637 | SPRINTF("SWRAP "); | 2634 | seq_puts(m, "SWRAP "); |
2638 | if (s & SDONE) | 2635 | if (s & SDONE) |
2639 | SPRINTF("SDONE "); | 2636 | seq_puts(m, "SDONE "); |
2640 | if (s & SPIORDY) | 2637 | if (s & SPIORDY) |
2641 | SPRINTF("SPIORDY "); | 2638 | seq_puts(m, "SPIORDY "); |
2642 | if (s & DMADONE) | 2639 | if (s & DMADONE) |
2643 | SPRINTF("DMADONE "); | 2640 | seq_puts(m, "DMADONE "); |
2644 | 2641 | ||
2645 | s = GETPORT(SSTAT1) & GETPORT(SIMODE1); | 2642 | s = GETPORT(SSTAT1) & GETPORT(SIMODE1); |
2646 | 2643 | ||
2647 | if (s & SELTO) | 2644 | if (s & SELTO) |
2648 | SPRINTF("SELTO "); | 2645 | seq_puts(m, "SELTO "); |
2649 | if (s & ATNTARG) | 2646 | if (s & ATNTARG) |
2650 | SPRINTF("ATNTARG "); | 2647 | seq_puts(m, "ATNTARG "); |
2651 | if (s & SCSIRSTI) | 2648 | if (s & SCSIRSTI) |
2652 | SPRINTF("SCSIRSTI "); | 2649 | seq_puts(m, "SCSIRSTI "); |
2653 | if (s & PHASEMIS) | 2650 | if (s & PHASEMIS) |
2654 | SPRINTF("PHASEMIS "); | 2651 | seq_puts(m, "PHASEMIS "); |
2655 | if (s & BUSFREE) | 2652 | if (s & BUSFREE) |
2656 | SPRINTF("BUSFREE "); | 2653 | seq_puts(m, "BUSFREE "); |
2657 | if (s & SCSIPERR) | 2654 | if (s & SCSIPERR) |
2658 | SPRINTF("SCSIPERR "); | 2655 | seq_puts(m, "SCSIPERR "); |
2659 | if (s & PHASECHG) | 2656 | if (s & PHASECHG) |
2660 | SPRINTF("PHASECHG "); | 2657 | seq_puts(m, "PHASECHG "); |
2661 | if (s & REQINIT) | 2658 | if (s & REQINIT) |
2662 | SPRINTF("REQINIT "); | 2659 | seq_puts(m, "REQINIT "); |
2663 | SPRINTF("); "); | 2660 | seq_puts(m, "); "); |
2664 | 2661 | ||
2665 | SPRINTF("SXFRCTL0( "); | 2662 | seq_puts(m, "SXFRCTL0( "); |
2666 | 2663 | ||
2667 | s = GETPORT(SXFRCTL0); | 2664 | s = GETPORT(SXFRCTL0); |
2668 | if (s & SCSIEN) | 2665 | if (s & SCSIEN) |
2669 | SPRINTF("SCSIEN "); | 2666 | seq_puts(m, "SCSIEN "); |
2670 | if (s & DMAEN) | 2667 | if (s & DMAEN) |
2671 | SPRINTF("DMAEN "); | 2668 | seq_puts(m, "DMAEN "); |
2672 | if (s & CH1) | 2669 | if (s & CH1) |
2673 | SPRINTF("CH1 "); | 2670 | seq_puts(m, "CH1 "); |
2674 | if (s & CLRSTCNT) | 2671 | if (s & CLRSTCNT) |
2675 | SPRINTF("CLRSTCNT "); | 2672 | seq_puts(m, "CLRSTCNT "); |
2676 | if (s & SPIOEN) | 2673 | if (s & SPIOEN) |
2677 | SPRINTF("SPIOEN "); | 2674 | seq_puts(m, "SPIOEN "); |
2678 | if (s & CLRCH1) | 2675 | if (s & CLRCH1) |
2679 | SPRINTF("CLRCH1 "); | 2676 | seq_puts(m, "CLRCH1 "); |
2680 | SPRINTF("); "); | 2677 | seq_puts(m, "); "); |
2681 | 2678 | ||
2682 | SPRINTF("SIGNAL( "); | 2679 | seq_puts(m, "SIGNAL( "); |
2683 | 2680 | ||
2684 | s = GETPORT(SCSISIG); | 2681 | s = GETPORT(SCSISIG); |
2685 | if (s & SIG_ATNI) | 2682 | if (s & SIG_ATNI) |
2686 | SPRINTF("ATNI "); | 2683 | seq_puts(m, "ATNI "); |
2687 | if (s & SIG_SELI) | 2684 | if (s & SIG_SELI) |
2688 | SPRINTF("SELI "); | 2685 | seq_puts(m, "SELI "); |
2689 | if (s & SIG_BSYI) | 2686 | if (s & SIG_BSYI) |
2690 | SPRINTF("BSYI "); | 2687 | seq_puts(m, "BSYI "); |
2691 | if (s & SIG_REQI) | 2688 | if (s & SIG_REQI) |
2692 | SPRINTF("REQI "); | 2689 | seq_puts(m, "REQI "); |
2693 | if (s & SIG_ACKI) | 2690 | if (s & SIG_ACKI) |
2694 | SPRINTF("ACKI "); | 2691 | seq_puts(m, "ACKI "); |
2695 | SPRINTF("); "); | 2692 | seq_puts(m, "); "); |
2696 | 2693 | ||
2697 | SPRINTF("SELID(%02x), ", GETPORT(SELID)); | 2694 | seq_printf(m, "SELID(%02x), ", GETPORT(SELID)); |
2698 | 2695 | ||
2699 | SPRINTF("STCNT(%d), ", GETSTCNT()); | 2696 | seq_printf(m, "STCNT(%d), ", GETSTCNT()); |
2700 | 2697 | ||
2701 | SPRINTF("SSTAT2( "); | 2698 | seq_puts(m, "SSTAT2( "); |
2702 | 2699 | ||
2703 | s = GETPORT(SSTAT2); | 2700 | s = GETPORT(SSTAT2); |
2704 | if (s & SOFFSET) | 2701 | if (s & SOFFSET) |
2705 | SPRINTF("SOFFSET "); | 2702 | seq_puts(m, "SOFFSET "); |
2706 | if (s & SEMPTY) | 2703 | if (s & SEMPTY) |
2707 | SPRINTF("SEMPTY "); | 2704 | seq_puts(m, "SEMPTY "); |
2708 | if (s & SFULL) | 2705 | if (s & SFULL) |
2709 | SPRINTF("SFULL "); | 2706 | seq_puts(m, "SFULL "); |
2710 | SPRINTF("); SFCNT (%d); ", s & (SFULL | SFCNT)); | 2707 | seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT)); |
2711 | 2708 | ||
2712 | s = GETPORT(SSTAT3); | 2709 | s = GETPORT(SSTAT3); |
2713 | SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); | 2710 | seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); |
2714 | 2711 | ||
2715 | SPRINTF("SSTAT4( "); | 2712 | seq_puts(m, "SSTAT4( "); |
2716 | s = GETPORT(SSTAT4); | 2713 | s = GETPORT(SSTAT4); |
2717 | if (s & SYNCERR) | 2714 | if (s & SYNCERR) |
2718 | SPRINTF("SYNCERR "); | 2715 | seq_puts(m, "SYNCERR "); |
2719 | if (s & FWERR) | 2716 | if (s & FWERR) |
2720 | SPRINTF("FWERR "); | 2717 | seq_puts(m, "FWERR "); |
2721 | if (s & FRERR) | 2718 | if (s & FRERR) |
2722 | SPRINTF("FRERR "); | 2719 | seq_puts(m, "FRERR "); |
2723 | SPRINTF("); "); | 2720 | seq_puts(m, "); "); |
2724 | 2721 | ||
2725 | SPRINTF("DMACNTRL0( "); | 2722 | seq_puts(m, "DMACNTRL0( "); |
2726 | s = GETPORT(DMACNTRL0); | 2723 | s = GETPORT(DMACNTRL0); |
2727 | SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT"); | 2724 | seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT"); |
2728 | SPRINTF("%s ", s & DMA ? "DMA" : "PIO"); | 2725 | seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO"); |
2729 | SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ"); | 2726 | seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ"); |
2730 | if (s & ENDMA) | 2727 | if (s & ENDMA) |
2731 | SPRINTF("ENDMA "); | 2728 | seq_puts(m, "ENDMA "); |
2732 | if (s & INTEN) | 2729 | if (s & INTEN) |
2733 | SPRINTF("INTEN "); | 2730 | seq_puts(m, "INTEN "); |
2734 | if (s & RSTFIFO) | 2731 | if (s & RSTFIFO) |
2735 | SPRINTF("RSTFIFO "); | 2732 | seq_puts(m, "RSTFIFO "); |
2736 | if (s & SWINT) | 2733 | if (s & SWINT) |
2737 | SPRINTF("SWINT "); | 2734 | seq_puts(m, "SWINT "); |
2738 | SPRINTF("); "); | 2735 | seq_puts(m, "); "); |
2739 | 2736 | ||
2740 | SPRINTF("DMASTAT( "); | 2737 | seq_puts(m, "DMASTAT( "); |
2741 | s = GETPORT(DMASTAT); | 2738 | s = GETPORT(DMASTAT); |
2742 | if (s & ATDONE) | 2739 | if (s & ATDONE) |
2743 | SPRINTF("ATDONE "); | 2740 | seq_puts(m, "ATDONE "); |
2744 | if (s & WORDRDY) | 2741 | if (s & WORDRDY) |
2745 | SPRINTF("WORDRDY "); | 2742 | seq_puts(m, "WORDRDY "); |
2746 | if (s & DFIFOFULL) | 2743 | if (s & DFIFOFULL) |
2747 | SPRINTF("DFIFOFULL "); | 2744 | seq_puts(m, "DFIFOFULL "); |
2748 | if (s & DFIFOEMP) | 2745 | if (s & DFIFOEMP) |
2749 | SPRINTF("DFIFOEMP "); | 2746 | seq_puts(m, "DFIFOEMP "); |
2750 | SPRINTF(")\n"); | 2747 | seq_puts(m, ")\n"); |
2751 | 2748 | ||
2752 | SPRINTF("enabled interrupts( "); | 2749 | seq_puts(m, "enabled interrupts( "); |
2753 | 2750 | ||
2754 | s = GETPORT(SIMODE0); | 2751 | s = GETPORT(SIMODE0); |
2755 | if (s & ENSELDO) | 2752 | if (s & ENSELDO) |
2756 | SPRINTF("ENSELDO "); | 2753 | seq_puts(m, "ENSELDO "); |
2757 | if (s & ENSELDI) | 2754 | if (s & ENSELDI) |
2758 | SPRINTF("ENSELDI "); | 2755 | seq_puts(m, "ENSELDI "); |
2759 | if (s & ENSELINGO) | 2756 | if (s & ENSELINGO) |
2760 | SPRINTF("ENSELINGO "); | 2757 | seq_puts(m, "ENSELINGO "); |
2761 | if (s & ENSWRAP) | 2758 | if (s & ENSWRAP) |
2762 | SPRINTF("ENSWRAP "); | 2759 | seq_puts(m, "ENSWRAP "); |
2763 | if (s & ENSDONE) | 2760 | if (s & ENSDONE) |
2764 | SPRINTF("ENSDONE "); | 2761 | seq_puts(m, "ENSDONE "); |
2765 | if (s & ENSPIORDY) | 2762 | if (s & ENSPIORDY) |
2766 | SPRINTF("ENSPIORDY "); | 2763 | seq_puts(m, "ENSPIORDY "); |
2767 | if (s & ENDMADONE) | 2764 | if (s & ENDMADONE) |
2768 | SPRINTF("ENDMADONE "); | 2765 | seq_puts(m, "ENDMADONE "); |
2769 | 2766 | ||
2770 | s = GETPORT(SIMODE1); | 2767 | s = GETPORT(SIMODE1); |
2771 | if (s & ENSELTIMO) | 2768 | if (s & ENSELTIMO) |
2772 | SPRINTF("ENSELTIMO "); | 2769 | seq_puts(m, "ENSELTIMO "); |
2773 | if (s & ENATNTARG) | 2770 | if (s & ENATNTARG) |
2774 | SPRINTF("ENATNTARG "); | 2771 | seq_puts(m, "ENATNTARG "); |
2775 | if (s & ENPHASEMIS) | 2772 | if (s & ENPHASEMIS) |
2776 | SPRINTF("ENPHASEMIS "); | 2773 | seq_puts(m, "ENPHASEMIS "); |
2777 | if (s & ENBUSFREE) | 2774 | if (s & ENBUSFREE) |
2778 | SPRINTF("ENBUSFREE "); | 2775 | seq_puts(m, "ENBUSFREE "); |
2779 | if (s & ENSCSIPERR) | 2776 | if (s & ENSCSIPERR) |
2780 | SPRINTF("ENSCSIPERR "); | 2777 | seq_puts(m, "ENSCSIPERR "); |
2781 | if (s & ENPHASECHG) | 2778 | if (s & ENPHASECHG) |
2782 | SPRINTF("ENPHASECHG "); | 2779 | seq_puts(m, "ENPHASECHG "); |
2783 | if (s & ENREQINIT) | 2780 | if (s & ENREQINIT) |
2784 | SPRINTF("ENREQINIT "); | 2781 | seq_puts(m, "ENREQINIT "); |
2785 | SPRINTF(")\n"); | 2782 | seq_puts(m, ")\n"); |
2786 | } | 2783 | } |
2787 | 2784 | ||
2788 | static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) | 2785 | static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) |
@@ -2825,56 +2822,56 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) | |||
2825 | Scsi_Cmnd *ptr; | 2822 | Scsi_Cmnd *ptr; |
2826 | unsigned long flags; | 2823 | unsigned long flags; |
2827 | 2824 | ||
2828 | SPRINTF(AHA152X_REVID "\n"); | 2825 | seq_puts(m, AHA152X_REVID "\n"); |
2829 | 2826 | ||
2830 | SPRINTF("ioports 0x%04lx to 0x%04lx\n", | 2827 | seq_printf(m, "ioports 0x%04lx to 0x%04lx\n", |
2831 | shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); | 2828 | shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); |
2832 | SPRINTF("interrupt 0x%02x\n", shpnt->irq); | 2829 | seq_printf(m, "interrupt 0x%02x\n", shpnt->irq); |
2833 | SPRINTF("disconnection/reconnection %s\n", | 2830 | seq_printf(m, "disconnection/reconnection %s\n", |
2834 | RECONNECT ? "enabled" : "disabled"); | 2831 | RECONNECT ? "enabled" : "disabled"); |
2835 | SPRINTF("parity checking %s\n", | 2832 | seq_printf(m, "parity checking %s\n", |
2836 | PARITY ? "enabled" : "disabled"); | 2833 | PARITY ? "enabled" : "disabled"); |
2837 | SPRINTF("synchronous transfers %s\n", | 2834 | seq_printf(m, "synchronous transfers %s\n", |
2838 | SYNCHRONOUS ? "enabled" : "disabled"); | 2835 | SYNCHRONOUS ? "enabled" : "disabled"); |
2839 | SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands); | 2836 | seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands); |
2840 | 2837 | ||
2841 | if(SYNCHRONOUS) { | 2838 | if(SYNCHRONOUS) { |
2842 | SPRINTF("synchronously operating targets (tick=50 ns):\n"); | 2839 | seq_puts(m, "synchronously operating targets (tick=50 ns):\n"); |
2843 | for (i = 0; i < 8; i++) | 2840 | for (i = 0; i < 8; i++) |
2844 | if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) | 2841 | if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) |
2845 | SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n", | 2842 | seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n", |
2846 | i, | 2843 | i, |
2847 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), | 2844 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), |
2848 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, | 2845 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, |
2849 | HOSTDATA(shpnt)->syncrate[i] & 0x0f); | 2846 | HOSTDATA(shpnt)->syncrate[i] & 0x0f); |
2850 | } | 2847 | } |
2851 | SPRINTF("\nqueue status:\n"); | 2848 | seq_puts(m, "\nqueue status:\n"); |
2852 | DO_LOCK(flags); | 2849 | DO_LOCK(flags); |
2853 | if (ISSUE_SC) { | 2850 | if (ISSUE_SC) { |
2854 | SPRINTF("not yet issued commands:\n"); | 2851 | seq_puts(m, "not yet issued commands:\n"); |
2855 | for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) | 2852 | for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) |
2856 | get_command(m, ptr); | 2853 | get_command(m, ptr); |
2857 | } else | 2854 | } else |
2858 | SPRINTF("no not yet issued commands\n"); | 2855 | seq_puts(m, "no not yet issued commands\n"); |
2859 | DO_UNLOCK(flags); | 2856 | DO_UNLOCK(flags); |
2860 | 2857 | ||
2861 | if (CURRENT_SC) { | 2858 | if (CURRENT_SC) { |
2862 | SPRINTF("current command:\n"); | 2859 | seq_puts(m, "current command:\n"); |
2863 | get_command(m, CURRENT_SC); | 2860 | get_command(m, CURRENT_SC); |
2864 | } else | 2861 | } else |
2865 | SPRINTF("no current command\n"); | 2862 | seq_puts(m, "no current command\n"); |
2866 | 2863 | ||
2867 | if (DISCONNECTED_SC) { | 2864 | if (DISCONNECTED_SC) { |
2868 | SPRINTF("disconnected commands:\n"); | 2865 | seq_puts(m, "disconnected commands:\n"); |
2869 | for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) | 2866 | for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) |
2870 | get_command(m, ptr); | 2867 | get_command(m, ptr); |
2871 | } else | 2868 | } else |
2872 | SPRINTF("no disconnected commands\n"); | 2869 | seq_puts(m, "no disconnected commands\n"); |
2873 | 2870 | ||
2874 | get_ports(m, shpnt); | 2871 | get_ports(m, shpnt); |
2875 | 2872 | ||
2876 | #if defined(AHA152X_STAT) | 2873 | #if defined(AHA152X_STAT) |
2877 | SPRINTF("statistics:\n" | 2874 | seq_printf(m, "statistics:\n" |
2878 | "total commands: %d\n" | 2875 | "total commands: %d\n" |
2879 | "disconnections: %d\n" | 2876 | "disconnections: %d\n" |
2880 | "busfree with check condition: %d\n" | 2877 | "busfree with check condition: %d\n" |
@@ -2894,7 +2891,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) | |||
2894 | HOSTDATA(shpnt)->busfree_without_done_command, | 2891 | HOSTDATA(shpnt)->busfree_without_done_command, |
2895 | HOSTDATA(shpnt)->busfree_without_any_action); | 2892 | HOSTDATA(shpnt)->busfree_without_any_action); |
2896 | for(i=0; i<maxstate; i++) { | 2893 | for(i=0; i<maxstate; i++) { |
2897 | SPRINTF("%-10s %-12d %-12d %-12ld\n", | 2894 | seq_printf(m, "%-10s %-12d %-12d %-12ld\n", |
2898 | states[i].name, | 2895 | states[i].name, |
2899 | HOSTDATA(shpnt)->count_trans[i], | 2896 | HOSTDATA(shpnt)->count_trans[i], |
2900 | HOSTDATA(shpnt)->count[i], | 2897 | HOSTDATA(shpnt)->count[i], |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 0bcacf71aef8..97f2accd3dbb 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -1298,7 +1298,7 @@ rescan_fifos: | |||
1298 | 1298 | ||
1299 | /* | 1299 | /* |
1300 | * Wait for any inprogress DMA to complete and clear DMA state | 1300 | * Wait for any inprogress DMA to complete and clear DMA state |
1301 | * if this if for an SCB in the qinfifo. | 1301 | * if this is for an SCB in the qinfifo. |
1302 | */ | 1302 | */ |
1303 | while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { | 1303 | while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { |
1304 | 1304 | ||
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c index 27dbfccea774..add2da581d66 100644 --- a/drivers/scsi/aic7xxx/aic79xx_proc.c +++ b/drivers/scsi/aic7xxx/aic79xx_proc.c | |||
@@ -97,7 +97,7 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) | |||
97 | u_int mb; | 97 | u_int mb; |
98 | 98 | ||
99 | if (tinfo->period == AHD_PERIOD_UNKNOWN) { | 99 | if (tinfo->period == AHD_PERIOD_UNKNOWN) { |
100 | seq_printf(m, "Renegotiation Pending\n"); | 100 | seq_puts(m, "Renegotiation Pending\n"); |
101 | return; | 101 | return; |
102 | } | 102 | } |
103 | speed = 3300; | 103 | speed = 3300; |
@@ -119,40 +119,38 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) | |||
119 | printed_options = 0; | 119 | printed_options = 0; |
120 | seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); | 120 | seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); |
121 | if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { | 121 | if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { |
122 | seq_printf(m, " RDSTRM"); | 122 | seq_puts(m, " RDSTRM"); |
123 | printed_options++; | 123 | printed_options++; |
124 | } | 124 | } |
125 | if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { | 125 | if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { |
126 | seq_printf(m, "%s", printed_options ? "|DT" : " DT"); | 126 | seq_puts(m, printed_options ? "|DT" : " DT"); |
127 | printed_options++; | 127 | printed_options++; |
128 | } | 128 | } |
129 | if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { | 129 | if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { |
130 | seq_printf(m, "%s", printed_options ? "|IU" : " IU"); | 130 | seq_puts(m, printed_options ? "|IU" : " IU"); |
131 | printed_options++; | 131 | printed_options++; |
132 | } | 132 | } |
133 | if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { | 133 | if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { |
134 | seq_printf(m, "%s", | 134 | seq_puts(m, printed_options ? "|RTI" : " RTI"); |
135 | printed_options ? "|RTI" : " RTI"); | ||
136 | printed_options++; | 135 | printed_options++; |
137 | } | 136 | } |
138 | if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { | 137 | if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { |
139 | seq_printf(m, "%s", | 138 | seq_puts(m, printed_options ? "|QAS" : " QAS"); |
140 | printed_options ? "|QAS" : " QAS"); | ||
141 | printed_options++; | 139 | printed_options++; |
142 | } | 140 | } |
143 | } | 141 | } |
144 | 142 | ||
145 | if (tinfo->width > 0) { | 143 | if (tinfo->width > 0) { |
146 | if (freq != 0) { | 144 | if (freq != 0) { |
147 | seq_printf(m, ", "); | 145 | seq_puts(m, ", "); |
148 | } else { | 146 | } else { |
149 | seq_printf(m, " ("); | 147 | seq_puts(m, " ("); |
150 | } | 148 | } |
151 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); | 149 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); |
152 | } else if (freq != 0) { | 150 | } else if (freq != 0) { |
153 | seq_printf(m, ")"); | 151 | seq_putc(m, ')'); |
154 | } | 152 | } |
155 | seq_printf(m, "\n"); | 153 | seq_putc(m, '\n'); |
156 | } | 154 | } |
157 | 155 | ||
158 | static void | 156 | static void |
@@ -167,15 +165,15 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, | |||
167 | tinfo = ahd_fetch_transinfo(ahd, channel, our_id, | 165 | tinfo = ahd_fetch_transinfo(ahd, channel, our_id, |
168 | target_id, &tstate); | 166 | target_id, &tstate); |
169 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); | 167 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); |
170 | seq_printf(m, "\tUser: "); | 168 | seq_puts(m, "\tUser: "); |
171 | ahd_format_transinfo(m, &tinfo->user); | 169 | ahd_format_transinfo(m, &tinfo->user); |
172 | starget = ahd->platform_data->starget[target_id]; | 170 | starget = ahd->platform_data->starget[target_id]; |
173 | if (starget == NULL) | 171 | if (starget == NULL) |
174 | return; | 172 | return; |
175 | 173 | ||
176 | seq_printf(m, "\tGoal: "); | 174 | seq_puts(m, "\tGoal: "); |
177 | ahd_format_transinfo(m, &tinfo->goal); | 175 | ahd_format_transinfo(m, &tinfo->goal); |
178 | seq_printf(m, "\tCurr: "); | 176 | seq_puts(m, "\tCurr: "); |
179 | ahd_format_transinfo(m, &tinfo->curr); | 177 | ahd_format_transinfo(m, &tinfo->curr); |
180 | 178 | ||
181 | for (lun = 0; lun < AHD_NUM_LUNS; lun++) { | 179 | for (lun = 0; lun < AHD_NUM_LUNS; lun++) { |
@@ -291,19 +289,19 @@ ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
291 | max_targ = 16; | 289 | max_targ = 16; |
292 | 290 | ||
293 | if (ahd->seep_config == NULL) | 291 | if (ahd->seep_config == NULL) |
294 | seq_printf(m, "No Serial EEPROM\n"); | 292 | seq_puts(m, "No Serial EEPROM\n"); |
295 | else { | 293 | else { |
296 | seq_printf(m, "Serial EEPROM:\n"); | 294 | seq_puts(m, "Serial EEPROM:\n"); |
297 | for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { | 295 | for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { |
298 | if (((i % 8) == 0) && (i != 0)) { | 296 | if (((i % 8) == 0) && (i != 0)) { |
299 | seq_printf(m, "\n"); | 297 | seq_putc(m, '\n'); |
300 | } | 298 | } |
301 | seq_printf(m, "0x%.4x ", | 299 | seq_printf(m, "0x%.4x ", |
302 | ((uint16_t*)ahd->seep_config)[i]); | 300 | ((uint16_t*)ahd->seep_config)[i]); |
303 | } | 301 | } |
304 | seq_printf(m, "\n"); | 302 | seq_putc(m, '\n'); |
305 | } | 303 | } |
306 | seq_printf(m, "\n"); | 304 | seq_putc(m, '\n'); |
307 | 305 | ||
308 | if ((ahd->features & AHD_WIDE) == 0) | 306 | if ((ahd->features & AHD_WIDE) == 0) |
309 | max_targ = 8; | 307 | max_targ = 8; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c index 64eec6c07a83..18459605d991 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_proc.c +++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c | |||
@@ -119,15 +119,15 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo) | |||
119 | 119 | ||
120 | if (tinfo->width > 0) { | 120 | if (tinfo->width > 0) { |
121 | if (freq != 0) { | 121 | if (freq != 0) { |
122 | seq_printf(m, ", "); | 122 | seq_puts(m, ", "); |
123 | } else { | 123 | } else { |
124 | seq_printf(m, " ("); | 124 | seq_puts(m, " ("); |
125 | } | 125 | } |
126 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); | 126 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); |
127 | } else if (freq != 0) { | 127 | } else if (freq != 0) { |
128 | seq_printf(m, ")"); | 128 | seq_putc(m, ')'); |
129 | } | 129 | } |
130 | seq_printf(m, "\n"); | 130 | seq_putc(m, '\n'); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void | 133 | static void |
@@ -145,15 +145,15 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, | |||
145 | if ((ahc->features & AHC_TWIN) != 0) | 145 | if ((ahc->features & AHC_TWIN) != 0) |
146 | seq_printf(m, "Channel %c ", channel); | 146 | seq_printf(m, "Channel %c ", channel); |
147 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); | 147 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); |
148 | seq_printf(m, "\tUser: "); | 148 | seq_puts(m, "\tUser: "); |
149 | ahc_format_transinfo(m, &tinfo->user); | 149 | ahc_format_transinfo(m, &tinfo->user); |
150 | starget = ahc->platform_data->starget[target_offset]; | 150 | starget = ahc->platform_data->starget[target_offset]; |
151 | if (!starget) | 151 | if (!starget) |
152 | return; | 152 | return; |
153 | 153 | ||
154 | seq_printf(m, "\tGoal: "); | 154 | seq_puts(m, "\tGoal: "); |
155 | ahc_format_transinfo(m, &tinfo->goal); | 155 | ahc_format_transinfo(m, &tinfo->goal); |
156 | seq_printf(m, "\tCurr: "); | 156 | seq_puts(m, "\tCurr: "); |
157 | ahc_format_transinfo(m, &tinfo->curr); | 157 | ahc_format_transinfo(m, &tinfo->curr); |
158 | 158 | ||
159 | for (lun = 0; lun < AHC_NUM_LUNS; lun++) { | 159 | for (lun = 0; lun < AHC_NUM_LUNS; lun++) { |
@@ -303,19 +303,19 @@ ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
303 | 303 | ||
304 | 304 | ||
305 | if (ahc->seep_config == NULL) | 305 | if (ahc->seep_config == NULL) |
306 | seq_printf(m, "No Serial EEPROM\n"); | 306 | seq_puts(m, "No Serial EEPROM\n"); |
307 | else { | 307 | else { |
308 | seq_printf(m, "Serial EEPROM:\n"); | 308 | seq_puts(m, "Serial EEPROM:\n"); |
309 | for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { | 309 | for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { |
310 | if (((i % 8) == 0) && (i != 0)) { | 310 | if (((i % 8) == 0) && (i != 0)) { |
311 | seq_printf(m, "\n"); | 311 | seq_putc(m, '\n'); |
312 | } | 312 | } |
313 | seq_printf(m, "0x%.4x ", | 313 | seq_printf(m, "0x%.4x ", |
314 | ((uint16_t*)ahc->seep_config)[i]); | 314 | ((uint16_t*)ahc->seep_config)[i]); |
315 | } | 315 | } |
316 | seq_printf(m, "\n"); | 316 | seq_putc(m, '\n'); |
317 | } | 317 | } |
318 | seq_printf(m, "\n"); | 318 | seq_putc(m, '\n'); |
319 | 319 | ||
320 | max_targ = 16; | 320 | max_targ = 16; |
321 | if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) | 321 | if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) |
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c index aa3e2c7cd83c..a6f5ee80fadc 100644 --- a/drivers/scsi/am53c974.c +++ b/drivers/scsi/am53c974.c | |||
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp) | |||
178 | break; | 178 | break; |
179 | cpu_relax(); | 179 | cpu_relax(); |
180 | } | 180 | } |
181 | if (resid > 1) { | ||
182 | /* FIFO not cleared */ | ||
183 | shost_printk(KERN_INFO, esp->host, | ||
184 | "FIFO not cleared, %d bytes left\n", | ||
185 | resid); | ||
186 | } | ||
187 | 181 | ||
188 | /* | 182 | /* |
189 | * When there is a residual BCMPLT will never be set | 183 | * When there is a residual BCMPLT will never be set |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index e64c3af7c1a0..decdc71b6b86 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2990,7 +2990,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
2990 | struct fas216_device *dev; | 2990 | struct fas216_device *dev; |
2991 | struct scsi_device *scd; | 2991 | struct scsi_device *scd; |
2992 | 2992 | ||
2993 | seq_printf(m, "Device/Lun TaggedQ Parity Sync\n"); | 2993 | seq_puts(m, "Device/Lun TaggedQ Parity Sync\n"); |
2994 | 2994 | ||
2995 | shost_for_each_device(scd, info->host) { | 2995 | shost_for_each_device(scd, info->host) { |
2996 | dev = &info->device[scd->id]; | 2996 | dev = &info->device[scd->id]; |
@@ -3000,7 +3000,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
3000 | scd->simple_tags ? "en" : "dis", | 3000 | scd->simple_tags ? "en" : "dis", |
3001 | scd->current_tag); | 3001 | scd->current_tag); |
3002 | else | 3002 | else |
3003 | seq_printf(m, "unsupported "); | 3003 | seq_puts(m, "unsupported "); |
3004 | 3004 | ||
3005 | seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); | 3005 | seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); |
3006 | 3006 | ||
@@ -3008,7 +3008,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
3008 | seq_printf(m, "offset %d, %d ns\n", | 3008 | seq_printf(m, "offset %d, %d ns\n", |
3009 | dev->sof, dev->period * 4); | 3009 | dev->sof, dev->period * 4); |
3010 | else | 3010 | else |
3011 | seq_printf(m, "async\n"); | 3011 | seq_puts(m, "async\n"); |
3012 | } | 3012 | } |
3013 | } | 3013 | } |
3014 | 3014 | ||
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 6daed6b386d4..a70255413e7f 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -711,12 +711,12 @@ static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) | |||
711 | unsigned char *command; | 711 | unsigned char *command; |
712 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", | 712 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", |
713 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 713 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
714 | seq_printf(m, " command = "); | 714 | seq_puts(m, " command = "); |
715 | command = cmd->cmnd; | 715 | command = cmd->cmnd; |
716 | seq_printf(m, "%2d (0x%02x)", command[0], command[0]); | 716 | seq_printf(m, "%2d (0x%02x)", command[0], command[0]); |
717 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) | 717 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) |
718 | seq_printf(m, " %02x", command[i]); | 718 | seq_printf(m, " %02x", command[i]); |
719 | seq_printf(m, "\n"); | 719 | seq_putc(m, '\n'); |
720 | } | 720 | } |
721 | 721 | ||
722 | static int __maybe_unused NCR5380_show_info(struct seq_file *m, | 722 | static int __maybe_unused NCR5380_show_info(struct seq_file *m, |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index a795d81ef875..0836433e3a2d 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -3101,9 +3101,8 @@ static const char *atp870u_info(struct Scsi_Host *notused) | |||
3101 | 3101 | ||
3102 | static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) | 3102 | static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) |
3103 | { | 3103 | { |
3104 | seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n"); | 3104 | seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n" |
3105 | seq_printf(m, "\n"); | 3105 | "Adapter Configuration:\n"); |
3106 | seq_printf(m, "Adapter Configuration:\n"); | ||
3107 | seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); | 3106 | seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); |
3108 | seq_printf(m, " IRQ: %d\n", HBAptr->irq); | 3107 | seq_printf(m, " IRQ: %d\n", HBAptr->irq); |
3109 | return 0; | 3108 | return 0; |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index f3193406776c..a7cc61837818 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -48,7 +48,6 @@ static unsigned int be_iopoll_budget = 10; | |||
48 | static unsigned int be_max_phys_size = 64; | 48 | static unsigned int be_max_phys_size = 64; |
49 | static unsigned int enable_msix = 1; | 49 | static unsigned int enable_msix = 1; |
50 | 50 | ||
51 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | ||
52 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); | 51 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); |
53 | MODULE_VERSION(BUILD_STR); | 52 | MODULE_VERSION(BUILD_STR); |
54 | MODULE_AUTHOR("Emulex Corporation"); | 53 | MODULE_AUTHOR("Emulex Corporation"); |
@@ -586,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) | |||
586 | "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); | 585 | "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); |
587 | return NULL; | 586 | return NULL; |
588 | } | 587 | } |
589 | shost->dma_boundary = pcidev->dma_mask; | ||
590 | shost->max_id = BE2_MAX_SESSIONS; | 588 | shost->max_id = BE2_MAX_SESSIONS; |
591 | shost->max_channel = 0; | 589 | shost->max_channel = 0; |
592 | shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; | 590 | shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; |
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 6bac8a746ee2..0045742fab7d 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
@@ -194,16 +194,10 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, | |||
194 | 194 | ||
195 | retry: | 195 | retry: |
196 | errno = 0; | 196 | errno = 0; |
197 | if (debug) { | ||
198 | DPRINTK("command: "); | ||
199 | __scsi_print_command(cmd, cmd_len); | ||
200 | } | ||
201 | |||
202 | result = scsi_execute_req(ch->device, cmd, direction, buffer, | 197 | result = scsi_execute_req(ch->device, cmd, direction, buffer, |
203 | buflength, &sshdr, timeout * HZ, | 198 | buflength, &sshdr, timeout * HZ, |
204 | MAX_RETRIES, NULL); | 199 | MAX_RETRIES, NULL); |
205 | 200 | ||
206 | DPRINTK("result: 0x%x\n",result); | ||
207 | if (driver_byte(result) & DRIVER_SENSE) { | 201 | if (driver_byte(result) & DRIVER_SENSE) { |
208 | if (debug) | 202 | if (debug) |
209 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); | 203 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index e2068a2621c4..fa09d4be2b53 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -18,14 +18,10 @@ | |||
18 | #include <scsi/scsi_eh.h> | 18 | #include <scsi/scsi_eh.h> |
19 | #include <scsi/scsi_dbg.h> | 19 | #include <scsi/scsi_dbg.h> |
20 | 20 | ||
21 | |||
22 | |||
23 | /* Commands with service actions that change the command name */ | 21 | /* Commands with service actions that change the command name */ |
24 | #define THIRD_PARTY_COPY_OUT 0x83 | 22 | #define THIRD_PARTY_COPY_OUT 0x83 |
25 | #define THIRD_PARTY_COPY_IN 0x84 | 23 | #define THIRD_PARTY_COPY_IN 0x84 |
26 | 24 | ||
27 | #define VENDOR_SPECIFIC_CDB 0xc0 | ||
28 | |||
29 | struct sa_name_list { | 25 | struct sa_name_list { |
30 | int opcode; | 26 | int opcode; |
31 | const struct value_name_pair *arr; | 27 | const struct value_name_pair *arr; |
@@ -37,7 +33,6 @@ struct value_name_pair { | |||
37 | const char * name; | 33 | const char * name; |
38 | }; | 34 | }; |
39 | 35 | ||
40 | #ifdef CONFIG_SCSI_CONSTANTS | ||
41 | static const char * cdb_byte0_names[] = { | 36 | static const char * cdb_byte0_names[] = { |
42 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", | 37 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", |
43 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, | 38 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, |
@@ -261,28 +256,8 @@ static struct sa_name_list sa_names_arr[] = { | |||
261 | {0, NULL, 0}, | 256 | {0, NULL, 0}, |
262 | }; | 257 | }; |
263 | 258 | ||
264 | #else /* ifndef CONFIG_SCSI_CONSTANTS */ | 259 | bool scsi_opcode_sa_name(int opcode, int service_action, |
265 | static const char *cdb_byte0_names[0]; | 260 | const char **cdb_name, const char **sa_name) |
266 | |||
267 | static struct sa_name_list sa_names_arr[] = { | ||
268 | {VARIABLE_LENGTH_CMD, NULL, 0}, | ||
269 | {MAINTENANCE_IN, NULL, 0}, | ||
270 | {MAINTENANCE_OUT, NULL, 0}, | ||
271 | {PERSISTENT_RESERVE_IN, NULL, 0}, | ||
272 | {PERSISTENT_RESERVE_OUT, NULL, 0}, | ||
273 | {SERVICE_ACTION_IN_12, NULL, 0}, | ||
274 | {SERVICE_ACTION_OUT_12, NULL, 0}, | ||
275 | {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0}, | ||
276 | {SERVICE_ACTION_IN_16, NULL, 0}, | ||
277 | {SERVICE_ACTION_OUT_16, NULL, 0}, | ||
278 | {THIRD_PARTY_COPY_IN, NULL, 0}, | ||
279 | {THIRD_PARTY_COPY_OUT, NULL, 0}, | ||
280 | {0, NULL, 0}, | ||
281 | }; | ||
282 | #endif /* CONFIG_SCSI_CONSTANTS */ | ||
283 | |||
284 | static bool scsi_opcode_sa_name(int opcode, int service_action, | ||
285 | const char **cdb_name, const char **sa_name) | ||
286 | { | 261 | { |
287 | struct sa_name_list *sa_name_ptr; | 262 | struct sa_name_list *sa_name_ptr; |
288 | const struct value_name_pair *arr = NULL; | 263 | const struct value_name_pair *arr = NULL; |
@@ -315,76 +290,6 @@ static bool scsi_opcode_sa_name(int opcode, int service_action, | |||
315 | return true; | 290 | return true; |
316 | } | 291 | } |
317 | 292 | ||
318 | static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len) | ||
319 | { | ||
320 | int sa, cdb0; | ||
321 | const char *cdb_name = NULL, *sa_name = NULL; | ||
322 | |||
323 | cdb0 = cdbp[0]; | ||
324 | if (cdb0 == VARIABLE_LENGTH_CMD) { | ||
325 | if (cdb_len < 10) { | ||
326 | printk("short variable length command, len=%zu", | ||
327 | cdb_len); | ||
328 | return; | ||
329 | } | ||
330 | sa = (cdbp[8] << 8) + cdbp[9]; | ||
331 | } else | ||
332 | sa = cdbp[1] & 0x1f; | ||
333 | |||
334 | if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { | ||
335 | if (cdb_name) | ||
336 | printk("%s", cdb_name); | ||
337 | else if (cdb0 >= VENDOR_SPECIFIC_CDB) | ||
338 | printk("cdb[0]=0x%x (vendor)", cdb0); | ||
339 | else if (cdb0 >= 0x60 && cdb0 < 0x7e) | ||
340 | printk("cdb[0]=0x%x (reserved)", cdb0); | ||
341 | else | ||
342 | printk("cdb[0]=0x%x", cdb0); | ||
343 | } else { | ||
344 | if (sa_name) | ||
345 | printk("%s", sa_name); | ||
346 | else if (cdb_name) | ||
347 | printk("%s, sa=0x%x", cdb_name, sa); | ||
348 | else | ||
349 | printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | void __scsi_print_command(const unsigned char *cdb, size_t cdb_len) | ||
354 | { | ||
355 | int k, len; | ||
356 | |||
357 | print_opcode_name(cdb, cdb_len); | ||
358 | len = scsi_command_size(cdb); | ||
359 | if (cdb_len < len) | ||
360 | len = cdb_len; | ||
361 | /* print out all bytes in cdb */ | ||
362 | for (k = 0; k < len; ++k) | ||
363 | printk(" %02x", cdb[k]); | ||
364 | printk("\n"); | ||
365 | } | ||
366 | EXPORT_SYMBOL(__scsi_print_command); | ||
367 | |||
368 | void scsi_print_command(struct scsi_cmnd *cmd) | ||
369 | { | ||
370 | int k; | ||
371 | |||
372 | if (cmd->cmnd == NULL) | ||
373 | return; | ||
374 | |||
375 | scmd_printk(KERN_INFO, cmd, "CDB: "); | ||
376 | print_opcode_name(cmd->cmnd, cmd->cmd_len); | ||
377 | |||
378 | /* print out all bytes in cdb */ | ||
379 | printk(":"); | ||
380 | for (k = 0; k < cmd->cmd_len; ++k) | ||
381 | printk(" %02x", cmd->cmnd[k]); | ||
382 | printk("\n"); | ||
383 | } | ||
384 | EXPORT_SYMBOL(scsi_print_command); | ||
385 | |||
386 | #ifdef CONFIG_SCSI_CONSTANTS | ||
387 | |||
388 | struct error_info { | 293 | struct error_info { |
389 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ | 294 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ |
390 | const char * text; | 295 | const char * text; |
@@ -392,7 +297,7 @@ struct error_info { | |||
392 | 297 | ||
393 | /* | 298 | /* |
394 | * The canonical list of T10 Additional Sense Codes is available at: | 299 | * The canonical list of T10 Additional Sense Codes is available at: |
395 | * http://www.t10.org/lists/asc-num.txt [most recent: 20130605] | 300 | * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] |
396 | */ | 301 | */ |
397 | 302 | ||
398 | static const struct error_info additional[] = | 303 | static const struct error_info additional[] = |
@@ -421,6 +326,7 @@ static const struct error_info additional[] = | |||
421 | {0x001E, "Conflicting SA creation request"}, | 326 | {0x001E, "Conflicting SA creation request"}, |
422 | {0x001F, "Logical unit transitioning to another power condition"}, | 327 | {0x001F, "Logical unit transitioning to another power condition"}, |
423 | {0x0020, "Extended copy information available"}, | 328 | {0x0020, "Extended copy information available"}, |
329 | {0x0021, "Atomic command aborted due to ACA"}, | ||
424 | 330 | ||
425 | {0x0100, "No index/sector signal"}, | 331 | {0x0100, "No index/sector signal"}, |
426 | 332 | ||
@@ -446,6 +352,7 @@ static const struct error_info additional[] = | |||
446 | {0x040C, "Logical unit not accessible, target port in unavailable " | 352 | {0x040C, "Logical unit not accessible, target port in unavailable " |
447 | "state"}, | 353 | "state"}, |
448 | {0x040D, "Logical unit not ready, structure check required"}, | 354 | {0x040D, "Logical unit not ready, structure check required"}, |
355 | {0x040E, "Logical unit not ready, security session in progress"}, | ||
449 | {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, | 356 | {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, |
450 | {0x0411, "Logical unit not ready, notify (enable spinup) required"}, | 357 | {0x0411, "Logical unit not ready, notify (enable spinup) required"}, |
451 | {0x0412, "Logical unit not ready, offline"}, | 358 | {0x0412, "Logical unit not ready, offline"}, |
@@ -462,6 +369,11 @@ static const struct error_info additional[] = | |||
462 | {0x041C, "Logical unit not ready, additional power use not yet " | 369 | {0x041C, "Logical unit not ready, additional power use not yet " |
463 | "granted"}, | 370 | "granted"}, |
464 | {0x041D, "Logical unit not ready, configuration in progress"}, | 371 | {0x041D, "Logical unit not ready, configuration in progress"}, |
372 | {0x041E, "Logical unit not ready, microcode activation required"}, | ||
373 | {0x041F, "Logical unit not ready, microcode download required"}, | ||
374 | {0x0420, "Logical unit not ready, logical unit reset required"}, | ||
375 | {0x0421, "Logical unit not ready, hard reset required"}, | ||
376 | {0x0422, "Logical unit not ready, power cycle required"}, | ||
465 | 377 | ||
466 | {0x0500, "Logical unit does not respond to selection"}, | 378 | {0x0500, "Logical unit does not respond to selection"}, |
467 | 379 | ||
@@ -480,6 +392,7 @@ static const struct error_info additional[] = | |||
480 | {0x0902, "Focus servo failure"}, | 392 | {0x0902, "Focus servo failure"}, |
481 | {0x0903, "Spindle servo failure"}, | 393 | {0x0903, "Spindle servo failure"}, |
482 | {0x0904, "Head select fault"}, | 394 | {0x0904, "Head select fault"}, |
395 | {0x0905, "Vibration induced tracking error"}, | ||
483 | 396 | ||
484 | {0x0A00, "Error log overflow"}, | 397 | {0x0A00, "Error log overflow"}, |
485 | 398 | ||
@@ -510,6 +423,7 @@ static const struct error_info additional[] = | |||
510 | {0x0C0D, "Write error - not enough unsolicited data"}, | 423 | {0x0C0D, "Write error - not enough unsolicited data"}, |
511 | {0x0C0E, "Multiple write errors"}, | 424 | {0x0C0E, "Multiple write errors"}, |
512 | {0x0C0F, "Defects in error window"}, | 425 | {0x0C0F, "Defects in error window"}, |
426 | {0x0C10, "Incomplete multiple atomic write operations"}, | ||
513 | 427 | ||
514 | {0x0D00, "Error detected by third party temporary initiator"}, | 428 | {0x0D00, "Error detected by third party temporary initiator"}, |
515 | {0x0D01, "Third party device failure"}, | 429 | {0x0D01, "Third party device failure"}, |
@@ -635,6 +549,10 @@ static const struct error_info additional[] = | |||
635 | {0x2101, "Invalid element address"}, | 549 | {0x2101, "Invalid element address"}, |
636 | {0x2102, "Invalid address for write"}, | 550 | {0x2102, "Invalid address for write"}, |
637 | {0x2103, "Invalid write crossing layer jump"}, | 551 | {0x2103, "Invalid write crossing layer jump"}, |
552 | {0x2104, "Unaligned write command"}, | ||
553 | {0x2105, "Write boundary violation"}, | ||
554 | {0x2106, "Attempt to read invalid data"}, | ||
555 | {0x2107, "Read boundary violation"}, | ||
638 | 556 | ||
639 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, | 557 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, |
640 | 558 | ||
@@ -691,6 +609,7 @@ static const struct error_info additional[] = | |||
691 | {0x2705, "Permanent write protect"}, | 609 | {0x2705, "Permanent write protect"}, |
692 | {0x2706, "Conditional write protect"}, | 610 | {0x2706, "Conditional write protect"}, |
693 | {0x2707, "Space allocation failed write protect"}, | 611 | {0x2707, "Space allocation failed write protect"}, |
612 | {0x2708, "Zone is read only"}, | ||
694 | 613 | ||
695 | {0x2800, "Not ready to ready change, medium may have changed"}, | 614 | {0x2800, "Not ready to ready change, medium may have changed"}, |
696 | {0x2801, "Import or export element accessed"}, | 615 | {0x2801, "Import or export element accessed"}, |
@@ -743,10 +662,15 @@ static const struct error_info additional[] = | |||
743 | {0x2C0A, "Partition or collection contains user objects"}, | 662 | {0x2C0A, "Partition or collection contains user objects"}, |
744 | {0x2C0B, "Not reserved"}, | 663 | {0x2C0B, "Not reserved"}, |
745 | {0x2C0C, "Orwrite generation does not match"}, | 664 | {0x2C0C, "Orwrite generation does not match"}, |
665 | {0x2C0D, "Reset write pointer not allowed"}, | ||
666 | {0x2C0E, "Zone is offline"}, | ||
746 | 667 | ||
747 | {0x2D00, "Overwrite error on update in place"}, | 668 | {0x2D00, "Overwrite error on update in place"}, |
748 | 669 | ||
749 | {0x2E00, "Insufficient time for operation"}, | 670 | {0x2E00, "Insufficient time for operation"}, |
671 | {0x2E01, "Command timeout before processing"}, | ||
672 | {0x2E02, "Command timeout during processing"}, | ||
673 | {0x2E03, "Command timeout during processing due to error recovery"}, | ||
750 | 674 | ||
751 | {0x2F00, "Commands cleared by another initiator"}, | 675 | {0x2F00, "Commands cleared by another initiator"}, |
752 | {0x2F01, "Commands cleared by power loss notification"}, | 676 | {0x2F01, "Commands cleared by power loss notification"}, |
@@ -868,6 +792,7 @@ static const struct error_info additional[] = | |||
868 | {0x3F13, "iSCSI IP address removed"}, | 792 | {0x3F13, "iSCSI IP address removed"}, |
869 | {0x3F14, "iSCSI IP address changed"}, | 793 | {0x3F14, "iSCSI IP address changed"}, |
870 | {0x3F15, "Inspect referrals sense descriptors"}, | 794 | {0x3F15, "Inspect referrals sense descriptors"}, |
795 | {0x3F16, "Microcode has been changed without reset"}, | ||
871 | /* | 796 | /* |
872 | * {0x40NN, "Ram failure"}, | 797 | * {0x40NN, "Ram failure"}, |
873 | * {0x40NN, "Diagnostic failure on component nn"}, | 798 | * {0x40NN, "Diagnostic failure on component nn"}, |
@@ -946,6 +871,11 @@ static const struct error_info additional[] = | |||
946 | {0x5306, "Volume identifier missing"}, | 871 | {0x5306, "Volume identifier missing"}, |
947 | {0x5307, "Duplicate volume identifier"}, | 872 | {0x5307, "Duplicate volume identifier"}, |
948 | {0x5308, "Element status unknown"}, | 873 | {0x5308, "Element status unknown"}, |
874 | {0x5309, "Data transfer device error - load failed"}, | ||
875 | {0x530a, "Data transfer device error - unload failed"}, | ||
876 | {0x530b, "Data transfer device error - unload missing"}, | ||
877 | {0x530c, "Data transfer device error - eject failed"}, | ||
878 | {0x530d, "Data transfer device error - library communication failed"}, | ||
949 | 879 | ||
950 | {0x5400, "Scsi to host system interface failure"}, | 880 | {0x5400, "Scsi to host system interface failure"}, |
951 | 881 | ||
@@ -963,6 +893,7 @@ static const struct error_info additional[] = | |||
963 | {0x550B, "Insufficient power for operation"}, | 893 | {0x550B, "Insufficient power for operation"}, |
964 | {0x550C, "Insufficient resources to create rod"}, | 894 | {0x550C, "Insufficient resources to create rod"}, |
965 | {0x550D, "Insufficient resources to create rod token"}, | 895 | {0x550D, "Insufficient resources to create rod token"}, |
896 | {0x550E, "Insufficient zone resources"}, | ||
966 | 897 | ||
967 | {0x5700, "Unable to recover table-of-contents"}, | 898 | {0x5700, "Unable to recover table-of-contents"}, |
968 | 899 | ||
@@ -1247,15 +1178,12 @@ static const char * const snstext[] = { | |||
1247 | "Completed", /* F: command completed sense data reported, | 1178 | "Completed", /* F: command completed sense data reported, |
1248 | may occur for successful command */ | 1179 | may occur for successful command */ |
1249 | }; | 1180 | }; |
1250 | #endif | ||
1251 | 1181 | ||
1252 | /* Get sense key string or NULL if not available */ | 1182 | /* Get sense key string or NULL if not available */ |
1253 | const char * | 1183 | const char * |
1254 | scsi_sense_key_string(unsigned char key) { | 1184 | scsi_sense_key_string(unsigned char key) { |
1255 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1256 | if (key <= 0xE) | 1185 | if (key <= 0xE) |
1257 | return snstext[key]; | 1186 | return snstext[key]; |
1258 | #endif | ||
1259 | return NULL; | 1187 | return NULL; |
1260 | } | 1188 | } |
1261 | EXPORT_SYMBOL(scsi_sense_key_string); | 1189 | EXPORT_SYMBOL(scsi_sense_key_string); |
@@ -1267,7 +1195,6 @@ EXPORT_SYMBOL(scsi_sense_key_string); | |||
1267 | const char * | 1195 | const char * |
1268 | scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) | 1196 | scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) |
1269 | { | 1197 | { |
1270 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1271 | int i; | 1198 | int i; |
1272 | unsigned short code = ((asc << 8) | ascq); | 1199 | unsigned short code = ((asc << 8) | ascq); |
1273 | 1200 | ||
@@ -1283,122 +1210,10 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) | |||
1283 | return additional2[i].str; | 1210 | return additional2[i].str; |
1284 | } | 1211 | } |
1285 | } | 1212 | } |
1286 | #else | ||
1287 | *fmt = NULL; | ||
1288 | #endif | ||
1289 | return NULL; | 1213 | return NULL; |
1290 | } | 1214 | } |
1291 | EXPORT_SYMBOL(scsi_extd_sense_format); | 1215 | EXPORT_SYMBOL(scsi_extd_sense_format); |
1292 | 1216 | ||
1293 | void | ||
1294 | scsi_show_extd_sense(const struct scsi_device *sdev, const char *name, | ||
1295 | unsigned char asc, unsigned char ascq) | ||
1296 | { | ||
1297 | const char *extd_sense_fmt = NULL; | ||
1298 | const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, | ||
1299 | &extd_sense_fmt); | ||
1300 | |||
1301 | if (extd_sense_str) { | ||
1302 | if (extd_sense_fmt) | ||
1303 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1304 | "Add. Sense: %s (%s%x)", | ||
1305 | extd_sense_str, extd_sense_fmt, | ||
1306 | ascq); | ||
1307 | else | ||
1308 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1309 | "Add. Sense: %s", extd_sense_str); | ||
1310 | |||
1311 | } else { | ||
1312 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1313 | "%sASC=0x%x %sASCQ=0x%x\n", | ||
1314 | asc >= 0x80 ? "<<vendor>> " : "", asc, | ||
1315 | ascq >= 0x80 ? "<<vendor>> " : "", ascq); | ||
1316 | } | ||
1317 | } | ||
1318 | EXPORT_SYMBOL(scsi_show_extd_sense); | ||
1319 | |||
1320 | void | ||
1321 | scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
1322 | const struct scsi_sense_hdr *sshdr) | ||
1323 | { | ||
1324 | const char *sense_txt; | ||
1325 | |||
1326 | sense_txt = scsi_sense_key_string(sshdr->sense_key); | ||
1327 | if (sense_txt) | ||
1328 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1329 | "Sense Key : %s [%s]%s\n", sense_txt, | ||
1330 | scsi_sense_is_deferred(sshdr) ? | ||
1331 | "deferred" : "current", | ||
1332 | sshdr->response_code >= 0x72 ? | ||
1333 | " [descriptor]" : ""); | ||
1334 | else | ||
1335 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1336 | "Sense Key : 0x%x [%s]%s", sshdr->sense_key, | ||
1337 | scsi_sense_is_deferred(sshdr) ? | ||
1338 | "deferred" : "current", | ||
1339 | sshdr->response_code >= 0x72 ? | ||
1340 | " [descriptor]" : ""); | ||
1341 | } | ||
1342 | EXPORT_SYMBOL(scsi_show_sense_hdr); | ||
1343 | |||
1344 | /* | ||
1345 | * Print normalized SCSI sense header with a prefix. | ||
1346 | */ | ||
1347 | void | ||
1348 | scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
1349 | const struct scsi_sense_hdr *sshdr) | ||
1350 | { | ||
1351 | scsi_show_sense_hdr(sdev, name, sshdr); | ||
1352 | scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq); | ||
1353 | } | ||
1354 | EXPORT_SYMBOL(scsi_print_sense_hdr); | ||
1355 | |||
1356 | static void | ||
1357 | scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len) | ||
1358 | { | ||
1359 | int k, num; | ||
1360 | |||
1361 | num = (sense_len < 32) ? sense_len : 32; | ||
1362 | printk("Unrecognized sense data (in hex):"); | ||
1363 | for (k = 0; k < num; ++k) { | ||
1364 | if (0 == (k % 16)) { | ||
1365 | printk("\n"); | ||
1366 | printk(KERN_INFO " "); | ||
1367 | } | ||
1368 | printk("%02x ", sense_buffer[k]); | ||
1369 | } | ||
1370 | printk("\n"); | ||
1371 | return; | ||
1372 | } | ||
1373 | |||
1374 | /* Normalize and print sense buffer with name prefix */ | ||
1375 | void __scsi_print_sense(const struct scsi_device *sdev, const char *name, | ||
1376 | const unsigned char *sense_buffer, int sense_len) | ||
1377 | { | ||
1378 | struct scsi_sense_hdr sshdr; | ||
1379 | |||
1380 | if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) { | ||
1381 | scsi_dump_sense_buffer(sense_buffer, sense_len); | ||
1382 | return; | ||
1383 | } | ||
1384 | scsi_show_sense_hdr(sdev, name, &sshdr); | ||
1385 | scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq); | ||
1386 | } | ||
1387 | EXPORT_SYMBOL(__scsi_print_sense); | ||
1388 | |||
1389 | /* Normalize and print sense buffer in SCSI command */ | ||
1390 | void scsi_print_sense(const struct scsi_cmnd *cmd) | ||
1391 | { | ||
1392 | struct gendisk *disk = cmd->request->rq_disk; | ||
1393 | const char *disk_name = disk ? disk->disk_name : NULL; | ||
1394 | |||
1395 | __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer, | ||
1396 | SCSI_SENSE_BUFFERSIZE); | ||
1397 | } | ||
1398 | EXPORT_SYMBOL(scsi_print_sense); | ||
1399 | |||
1400 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1401 | |||
1402 | static const char * const hostbyte_table[]={ | 1217 | static const char * const hostbyte_table[]={ |
1403 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", | 1218 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", |
1404 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", | 1219 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", |
@@ -1410,17 +1225,13 @@ static const char * const driverbyte_table[]={ | |||
1410 | "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", | 1225 | "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", |
1411 | "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; | 1226 | "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; |
1412 | 1227 | ||
1413 | #endif | ||
1414 | |||
1415 | const char *scsi_hostbyte_string(int result) | 1228 | const char *scsi_hostbyte_string(int result) |
1416 | { | 1229 | { |
1417 | const char *hb_string = NULL; | 1230 | const char *hb_string = NULL; |
1418 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1419 | int hb = host_byte(result); | 1231 | int hb = host_byte(result); |
1420 | 1232 | ||
1421 | if (hb < ARRAY_SIZE(hostbyte_table)) | 1233 | if (hb < ARRAY_SIZE(hostbyte_table)) |
1422 | hb_string = hostbyte_table[hb]; | 1234 | hb_string = hostbyte_table[hb]; |
1423 | #endif | ||
1424 | return hb_string; | 1235 | return hb_string; |
1425 | } | 1236 | } |
1426 | EXPORT_SYMBOL(scsi_hostbyte_string); | 1237 | EXPORT_SYMBOL(scsi_hostbyte_string); |
@@ -1428,17 +1239,14 @@ EXPORT_SYMBOL(scsi_hostbyte_string); | |||
1428 | const char *scsi_driverbyte_string(int result) | 1239 | const char *scsi_driverbyte_string(int result) |
1429 | { | 1240 | { |
1430 | const char *db_string = NULL; | 1241 | const char *db_string = NULL; |
1431 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1432 | int db = driver_byte(result); | 1242 | int db = driver_byte(result); |
1433 | 1243 | ||
1434 | if (db < ARRAY_SIZE(driverbyte_table)) | 1244 | if (db < ARRAY_SIZE(driverbyte_table)) |
1435 | db_string = driverbyte_table[db]; | 1245 | db_string = driverbyte_table[db]; |
1436 | #endif | ||
1437 | return db_string; | 1246 | return db_string; |
1438 | } | 1247 | } |
1439 | EXPORT_SYMBOL(scsi_driverbyte_string); | 1248 | EXPORT_SYMBOL(scsi_driverbyte_string); |
1440 | 1249 | ||
1441 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1442 | #define scsi_mlreturn_name(result) { result, #result } | 1250 | #define scsi_mlreturn_name(result) { result, #result } |
1443 | static const struct value_name_pair scsi_mlreturn_arr[] = { | 1251 | static const struct value_name_pair scsi_mlreturn_arr[] = { |
1444 | scsi_mlreturn_name(NEEDS_RETRY), | 1252 | scsi_mlreturn_name(NEEDS_RETRY), |
@@ -1451,11 +1259,9 @@ static const struct value_name_pair scsi_mlreturn_arr[] = { | |||
1451 | scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), | 1259 | scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), |
1452 | scsi_mlreturn_name(FAST_IO_FAIL) | 1260 | scsi_mlreturn_name(FAST_IO_FAIL) |
1453 | }; | 1261 | }; |
1454 | #endif | ||
1455 | 1262 | ||
1456 | const char *scsi_mlreturn_string(int result) | 1263 | const char *scsi_mlreturn_string(int result) |
1457 | { | 1264 | { |
1458 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1459 | const struct value_name_pair *arr = scsi_mlreturn_arr; | 1265 | const struct value_name_pair *arr = scsi_mlreturn_arr; |
1460 | int k; | 1266 | int k; |
1461 | 1267 | ||
@@ -1463,29 +1269,6 @@ const char *scsi_mlreturn_string(int result) | |||
1463 | if (result == arr->value) | 1269 | if (result == arr->value) |
1464 | return arr->name; | 1270 | return arr->name; |
1465 | } | 1271 | } |
1466 | #endif | ||
1467 | return NULL; | 1272 | return NULL; |
1468 | } | 1273 | } |
1469 | EXPORT_SYMBOL(scsi_mlreturn_string); | 1274 | EXPORT_SYMBOL(scsi_mlreturn_string); |
1470 | |||
1471 | void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition) | ||
1472 | { | ||
1473 | const char *mlret_string = scsi_mlreturn_string(disposition); | ||
1474 | const char *hb_string = scsi_hostbyte_string(cmd->result); | ||
1475 | const char *db_string = scsi_driverbyte_string(cmd->result); | ||
1476 | |||
1477 | if (hb_string || db_string) | ||
1478 | scmd_printk(KERN_INFO, cmd, | ||
1479 | "%s%s Result: hostbyte=%s driverbyte=%s", | ||
1480 | msg ? msg : "", | ||
1481 | mlret_string ? mlret_string : "UNKNOWN", | ||
1482 | hb_string ? hb_string : "invalid", | ||
1483 | db_string ? db_string : "invalid"); | ||
1484 | else | ||
1485 | scmd_printk(KERN_INFO, cmd, | ||
1486 | "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x", | ||
1487 | msg ? msg : "", | ||
1488 | mlret_string ? mlret_string : "UNKNOWN", | ||
1489 | host_byte(cmd->result), driver_byte(cmd->result)); | ||
1490 | } | ||
1491 | EXPORT_SYMBOL(scsi_print_result); | ||
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile index 913b9a92fb06..3681a3fbd499 100644 --- a/drivers/scsi/csiostor/Makefile +++ b/drivers/scsi/csiostor/Makefile | |||
@@ -8,5 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 | |||
8 | obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o | 8 | obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o |
9 | 9 | ||
10 | csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ | 10 | csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ |
11 | csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \ | 11 | csio_hw.o csio_hw_t5.o csio_isr.o \ |
12 | csio_mb.o csio_rnode.o csio_wr.o | 12 | csio_mb.o csio_rnode.o csio_wr.o |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 9ab997e18b20..2e66f34ebb79 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -60,37 +60,10 @@ int csio_msi = 2; | |||
60 | static int dev_num; | 60 | static int dev_num; |
61 | 61 | ||
62 | /* FCoE Adapter types & its description */ | 62 | /* FCoE Adapter types & its description */ |
63 | static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { | ||
64 | {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, | ||
65 | {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, | ||
66 | {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, | ||
67 | {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, | ||
68 | {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, | ||
69 | {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, | ||
70 | {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, | ||
71 | {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, | ||
72 | {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, | ||
73 | {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, | ||
74 | {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, | ||
75 | {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, | ||
76 | {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, | ||
77 | {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, | ||
78 | {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, | ||
79 | {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, | ||
80 | {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, | ||
81 | {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, | ||
82 | {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, | ||
83 | {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, | ||
84 | {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, | ||
85 | {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, | ||
86 | {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, | ||
87 | {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} | ||
88 | }; | ||
89 | |||
90 | static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { | 63 | static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { |
91 | {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, | 64 | {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, |
92 | {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, | 65 | {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, |
93 | {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, | 66 | {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, |
94 | {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, | 67 | {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, |
95 | {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, | 68 | {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, |
96 | {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, | 69 | {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, |
@@ -107,7 +80,9 @@ static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { | |||
107 | {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, | 80 | {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, |
108 | {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, | 81 | {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, |
109 | {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, | 82 | {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, |
110 | {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} | 83 | {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, |
84 | {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, | ||
85 | {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} | ||
111 | }; | 86 | }; |
112 | 87 | ||
113 | static void csio_mgmtm_cleanup(struct csio_mgmtm *); | 88 | static void csio_mgmtm_cleanup(struct csio_mgmtm *); |
@@ -188,9 +163,9 @@ void | |||
188 | csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, | 163 | csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, |
189 | unsigned int mask, unsigned int val) | 164 | unsigned int mask, unsigned int val) |
190 | { | 165 | { |
191 | csio_wr_reg32(hw, addr, TP_PIO_ADDR); | 166 | csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); |
192 | val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; | 167 | val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; |
193 | csio_wr_reg32(hw, val, TP_PIO_DATA); | 168 | csio_wr_reg32(hw, val, TP_PIO_DATA_A); |
194 | } | 169 | } |
195 | 170 | ||
196 | void | 171 | void |
@@ -256,7 +231,7 @@ csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) | |||
256 | } | 231 | } |
257 | 232 | ||
258 | pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); | 233 | pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); |
259 | *data = le32_to_cpu(*data); | 234 | *data = le32_to_cpu(*(__le32 *)data); |
260 | 235 | ||
261 | return 0; | 236 | return 0; |
262 | } | 237 | } |
@@ -421,17 +396,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, | |||
421 | 396 | ||
422 | if (!byte_cnt || byte_cnt > 4) | 397 | if (!byte_cnt || byte_cnt > 4) |
423 | return -EINVAL; | 398 | return -EINVAL; |
424 | if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) | 399 | if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) |
425 | return -EBUSY; | 400 | return -EBUSY; |
426 | 401 | ||
427 | cont = cont ? SF_CONT : 0; | 402 | csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | |
428 | lock = lock ? SF_LOCK : 0; | 403 | BYTECNT_V(byte_cnt - 1), SF_OP_A); |
429 | 404 | ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, | |
430 | csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); | 405 | 10, NULL); |
431 | ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, | ||
432 | 10, NULL); | ||
433 | if (!ret) | 406 | if (!ret) |
434 | *valp = csio_rd_reg32(hw, SF_DATA); | 407 | *valp = csio_rd_reg32(hw, SF_DATA_A); |
435 | return ret; | 408 | return ret; |
436 | } | 409 | } |
437 | 410 | ||
@@ -453,16 +426,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, | |||
453 | { | 426 | { |
454 | if (!byte_cnt || byte_cnt > 4) | 427 | if (!byte_cnt || byte_cnt > 4) |
455 | return -EINVAL; | 428 | return -EINVAL; |
456 | if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) | 429 | if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) |
457 | return -EBUSY; | 430 | return -EBUSY; |
458 | 431 | ||
459 | cont = cont ? SF_CONT : 0; | 432 | csio_wr_reg32(hw, val, SF_DATA_A); |
460 | lock = lock ? SF_LOCK : 0; | 433 | csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | |
461 | 434 | OP_V(1) | SF_LOCK_V(lock), SF_OP_A); | |
462 | csio_wr_reg32(hw, val, SF_DATA); | ||
463 | csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); | ||
464 | 435 | ||
465 | return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, | 436 | return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, |
466 | 10, NULL); | 437 | 10, NULL); |
467 | } | 438 | } |
468 | 439 | ||
@@ -533,11 +504,11 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, | |||
533 | for ( ; nwords; nwords--, data++) { | 504 | for ( ; nwords; nwords--, data++) { |
534 | ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); | 505 | ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); |
535 | if (nwords == 1) | 506 | if (nwords == 1) |
536 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | 507 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
537 | if (ret) | 508 | if (ret) |
538 | return ret; | 509 | return ret; |
539 | if (byte_oriented) | 510 | if (byte_oriented) |
540 | *data = htonl(*data); | 511 | *data = (__force __u32) htonl(*data); |
541 | } | 512 | } |
542 | return 0; | 513 | return 0; |
543 | } | 514 | } |
@@ -586,7 +557,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, | |||
586 | if (ret) | 557 | if (ret) |
587 | goto unlock; | 558 | goto unlock; |
588 | 559 | ||
589 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | 560 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
590 | 561 | ||
591 | /* Read the page to verify the write succeeded */ | 562 | /* Read the page to verify the write succeeded */ |
592 | ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); | 563 | ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); |
@@ -603,7 +574,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, | |||
603 | return 0; | 574 | return 0; |
604 | 575 | ||
605 | unlock: | 576 | unlock: |
606 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | 577 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
607 | return ret; | 578 | return ret; |
608 | } | 579 | } |
609 | 580 | ||
@@ -641,7 +612,7 @@ out: | |||
641 | if (ret) | 612 | if (ret) |
642 | csio_err(hw, "erase of flash sector %d failed, error %d\n", | 613 | csio_err(hw, "erase of flash sector %d failed, error %d\n", |
643 | start, ret); | 614 | start, ret); |
644 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | 615 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
645 | return 0; | 616 | return 0; |
646 | } | 617 | } |
647 | 618 | ||
@@ -665,7 +636,7 @@ csio_hw_print_fw_version(struct csio_hw *hw, char *str) | |||
665 | static int | 636 | static int |
666 | csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) | 637 | csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) |
667 | { | 638 | { |
668 | return csio_hw_read_flash(hw, FW_IMG_START + | 639 | return csio_hw_read_flash(hw, FLASH_FW_START + |
669 | offsetof(struct fw_hdr, fw_ver), 1, | 640 | offsetof(struct fw_hdr, fw_ver), 1, |
670 | vers, 0); | 641 | vers, 0); |
671 | } | 642 | } |
@@ -686,43 +657,6 @@ csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) | |||
686 | } | 657 | } |
687 | 658 | ||
688 | /* | 659 | /* |
689 | * csio_hw_check_fw_version - check if the FW is compatible with | ||
690 | * this driver | ||
691 | * @hw: HW module | ||
692 | * | ||
693 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
694 | * if there's exact match, a negative error if the version could not be | ||
695 | * read or there's a major/minor version mismatch/minor. | ||
696 | */ | ||
697 | static int | ||
698 | csio_hw_check_fw_version(struct csio_hw *hw) | ||
699 | { | ||
700 | int ret, major, minor, micro; | ||
701 | |||
702 | ret = csio_hw_get_fw_version(hw, &hw->fwrev); | ||
703 | if (!ret) | ||
704 | ret = csio_hw_get_tp_version(hw, &hw->tp_vers); | ||
705 | if (ret) | ||
706 | return ret; | ||
707 | |||
708 | major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev); | ||
709 | minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev); | ||
710 | micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev); | ||
711 | |||
712 | if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ | ||
713 | csio_err(hw, "card FW has major version %u, driver wants %u\n", | ||
714 | major, FW_VERSION_MAJOR(hw)); | ||
715 | return -EINVAL; | ||
716 | } | ||
717 | |||
718 | if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) | ||
719 | return 0; /* perfect match */ | ||
720 | |||
721 | /* Minor/micro version mismatch */ | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * csio_hw_fw_dload - download firmware. | 660 | * csio_hw_fw_dload - download firmware. |
727 | * @hw: HW module | 661 | * @hw: HW module |
728 | * @fw_data: firmware image to write. | 662 | * @fw_data: firmware image to write. |
@@ -762,9 +696,9 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) | |||
762 | return -EINVAL; | 696 | return -EINVAL; |
763 | } | 697 | } |
764 | 698 | ||
765 | if (size > FW_MAX_SIZE) { | 699 | if (size > FLASH_FW_MAX_SIZE) { |
766 | csio_err(hw, "FW image too large, max is %u bytes\n", | 700 | csio_err(hw, "FW image too large, max is %u bytes\n", |
767 | FW_MAX_SIZE); | 701 | FLASH_FW_MAX_SIZE); |
768 | return -EINVAL; | 702 | return -EINVAL; |
769 | } | 703 | } |
770 | 704 | ||
@@ -780,10 +714,10 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) | |||
780 | i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ | 714 | i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ |
781 | 715 | ||
782 | csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", | 716 | csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", |
783 | FW_START_SEC, FW_START_SEC + i - 1); | 717 | FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); |
784 | 718 | ||
785 | ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, | 719 | ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, |
786 | FW_START_SEC + i - 1); | 720 | FLASH_FW_START_SEC + i - 1); |
787 | if (ret) { | 721 | if (ret) { |
788 | csio_err(hw, "Flash Erase failed\n"); | 722 | csio_err(hw, "Flash Erase failed\n"); |
789 | goto out; | 723 | goto out; |
@@ -796,14 +730,14 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) | |||
796 | */ | 730 | */ |
797 | memcpy(first_page, fw_data, SF_PAGE_SIZE); | 731 | memcpy(first_page, fw_data, SF_PAGE_SIZE); |
798 | ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); | 732 | ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); |
799 | ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); | 733 | ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); |
800 | if (ret) | 734 | if (ret) |
801 | goto out; | 735 | goto out; |
802 | 736 | ||
803 | csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", | 737 | csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", |
804 | FW_IMG_START, FW_IMG_START + size); | 738 | FW_IMG_START, FW_IMG_START + size); |
805 | 739 | ||
806 | addr = FW_IMG_START; | 740 | addr = FLASH_FW_START; |
807 | for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { | 741 | for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { |
808 | addr += SF_PAGE_SIZE; | 742 | addr += SF_PAGE_SIZE; |
809 | fw_data += SF_PAGE_SIZE; | 743 | fw_data += SF_PAGE_SIZE; |
@@ -813,7 +747,7 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) | |||
813 | } | 747 | } |
814 | 748 | ||
815 | ret = csio_hw_write_flash(hw, | 749 | ret = csio_hw_write_flash(hw, |
816 | FW_IMG_START + | 750 | FLASH_FW_START + |
817 | offsetof(struct fw_hdr, fw_ver), | 751 | offsetof(struct fw_hdr, fw_ver), |
818 | sizeof(hdr->fw_ver), | 752 | sizeof(hdr->fw_ver), |
819 | (const uint8_t *)&hdr->fw_ver); | 753 | (const uint8_t *)&hdr->fw_ver); |
@@ -833,7 +767,7 @@ csio_hw_get_flash_params(struct csio_hw *hw) | |||
833 | ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); | 767 | ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); |
834 | if (!ret) | 768 | if (!ret) |
835 | ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); | 769 | ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); |
836 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | 770 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
837 | if (ret != 0) | 771 | if (ret != 0) |
838 | return ret; | 772 | return ret; |
839 | 773 | ||
@@ -861,17 +795,17 @@ csio_hw_dev_ready(struct csio_hw *hw) | |||
861 | uint32_t reg; | 795 | uint32_t reg; |
862 | int cnt = 6; | 796 | int cnt = 6; |
863 | 797 | ||
864 | while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && | 798 | while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && |
865 | (--cnt != 0)) | 799 | (--cnt != 0)) |
866 | mdelay(100); | 800 | mdelay(100); |
867 | 801 | ||
868 | if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || | 802 | if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) || |
869 | (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { | 803 | (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) { |
870 | csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); | 804 | csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); |
871 | return -EIO; | 805 | return -EIO; |
872 | } | 806 | } |
873 | 807 | ||
874 | hw->pfn = SOURCEPF_GET(reg); | 808 | hw->pfn = SOURCEPF_G(reg); |
875 | 809 | ||
876 | return 0; | 810 | return 0; |
877 | } | 811 | } |
@@ -959,8 +893,8 @@ retry: | |||
959 | * timeout ... and then retry if we haven't exhausted | 893 | * timeout ... and then retry if we haven't exhausted |
960 | * our retries ... | 894 | * our retries ... |
961 | */ | 895 | */ |
962 | pcie_fw = csio_rd_reg32(hw, PCIE_FW); | 896 | pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); |
963 | if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { | 897 | if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { |
964 | if (waiting <= 0) { | 898 | if (waiting <= 0) { |
965 | if (retries-- > 0) | 899 | if (retries-- > 0) |
966 | goto retry; | 900 | goto retry; |
@@ -976,10 +910,10 @@ retry: | |||
976 | * report errors preferentially. | 910 | * report errors preferentially. |
977 | */ | 911 | */ |
978 | if (state) { | 912 | if (state) { |
979 | if (pcie_fw & PCIE_FW_ERR) { | 913 | if (pcie_fw & PCIE_FW_ERR_F) { |
980 | *state = CSIO_DEV_STATE_ERR; | 914 | *state = CSIO_DEV_STATE_ERR; |
981 | rv = -ETIMEDOUT; | 915 | rv = -ETIMEDOUT; |
982 | } else if (pcie_fw & PCIE_FW_INIT) | 916 | } else if (pcie_fw & PCIE_FW_INIT_F) |
983 | *state = CSIO_DEV_STATE_INIT; | 917 | *state = CSIO_DEV_STATE_INIT; |
984 | } | 918 | } |
985 | 919 | ||
@@ -988,9 +922,9 @@ retry: | |||
988 | * there's not a valid Master PF, grab its identity | 922 | * there's not a valid Master PF, grab its identity |
989 | * for our caller. | 923 | * for our caller. |
990 | */ | 924 | */ |
991 | if (mpfn == PCIE_FW_MASTER_MASK && | 925 | if (mpfn == PCIE_FW_MASTER_M && |
992 | (pcie_fw & PCIE_FW_MASTER_VLD)) | 926 | (pcie_fw & PCIE_FW_MASTER_VLD_F)) |
993 | mpfn = PCIE_FW_MASTER_GET(pcie_fw); | 927 | mpfn = PCIE_FW_MASTER_G(pcie_fw); |
994 | break; | 928 | break; |
995 | } | 929 | } |
996 | hw->flags &= ~CSIO_HWF_MASTER; | 930 | hw->flags &= ~CSIO_HWF_MASTER; |
@@ -1078,7 +1012,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst) | |||
1078 | 1012 | ||
1079 | if (!fw_rst) { | 1013 | if (!fw_rst) { |
1080 | /* PIO reset */ | 1014 | /* PIO reset */ |
1081 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | 1015 | csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); |
1082 | mdelay(2000); | 1016 | mdelay(2000); |
1083 | return 0; | 1017 | return 0; |
1084 | } | 1018 | } |
@@ -1090,7 +1024,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst) | |||
1090 | } | 1024 | } |
1091 | 1025 | ||
1092 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1026 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, |
1093 | PIORSTMODE | PIORST, 0, NULL); | 1027 | PIORSTMODE_F | PIORST_F, 0, NULL); |
1094 | 1028 | ||
1095 | if (csio_mb_issue(hw, mbp)) { | 1029 | if (csio_mb_issue(hw, mbp)) { |
1096 | csio_err(hw, "Issue of RESET command failed.n"); | 1030 | csio_err(hw, "Issue of RESET command failed.n"); |
@@ -1156,7 +1090,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) | |||
1156 | * If a legitimate mailbox is provided, issue a RESET command | 1090 | * If a legitimate mailbox is provided, issue a RESET command |
1157 | * with a HALT indication. | 1091 | * with a HALT indication. |
1158 | */ | 1092 | */ |
1159 | if (mbox <= PCIE_FW_MASTER_MASK) { | 1093 | if (mbox <= PCIE_FW_MASTER_M) { |
1160 | struct csio_mb *mbp; | 1094 | struct csio_mb *mbp; |
1161 | 1095 | ||
1162 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | 1096 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
@@ -1166,7 +1100,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) | |||
1166 | } | 1100 | } |
1167 | 1101 | ||
1168 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1102 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, |
1169 | PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F, | 1103 | PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, |
1170 | NULL); | 1104 | NULL); |
1171 | 1105 | ||
1172 | if (csio_mb_issue(hw, mbp)) { | 1106 | if (csio_mb_issue(hw, mbp)) { |
@@ -1193,8 +1127,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) | |||
1193 | * rather than a RESET ... if it's new enough to understand that ... | 1127 | * rather than a RESET ... if it's new enough to understand that ... |
1194 | */ | 1128 | */ |
1195 | if (retval == 0 || force) { | 1129 | if (retval == 0 || force) { |
1196 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); | 1130 | csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); |
1197 | csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); | 1131 | csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, |
1132 | PCIE_FW_HALT_F); | ||
1198 | } | 1133 | } |
1199 | 1134 | ||
1200 | /* | 1135 | /* |
@@ -1234,7 +1169,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) | |||
1234 | * doing it automatically, we need to clear the PCIE_FW.HALT | 1169 | * doing it automatically, we need to clear the PCIE_FW.HALT |
1235 | * bit. | 1170 | * bit. |
1236 | */ | 1171 | */ |
1237 | csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); | 1172 | csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); |
1238 | 1173 | ||
1239 | /* | 1174 | /* |
1240 | * If we've been given a valid mailbox, first try to get the | 1175 | * If we've been given a valid mailbox, first try to get the |
@@ -1243,21 +1178,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) | |||
1243 | * valid mailbox or the RESET command failed, fall back to | 1178 | * valid mailbox or the RESET command failed, fall back to |
1244 | * hitting the chip with a hammer. | 1179 | * hitting the chip with a hammer. |
1245 | */ | 1180 | */ |
1246 | if (mbox <= PCIE_FW_MASTER_MASK) { | 1181 | if (mbox <= PCIE_FW_MASTER_M) { |
1247 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); | 1182 | csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); |
1248 | msleep(100); | 1183 | msleep(100); |
1249 | if (csio_do_reset(hw, true) == 0) | 1184 | if (csio_do_reset(hw, true) == 0) |
1250 | return 0; | 1185 | return 0; |
1251 | } | 1186 | } |
1252 | 1187 | ||
1253 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | 1188 | csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); |
1254 | msleep(2000); | 1189 | msleep(2000); |
1255 | } else { | 1190 | } else { |
1256 | int ms; | 1191 | int ms; |
1257 | 1192 | ||
1258 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); | 1193 | csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); |
1259 | for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { | 1194 | for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { |
1260 | if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) | 1195 | if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) |
1261 | return 0; | 1196 | return 0; |
1262 | msleep(100); | 1197 | msleep(100); |
1263 | ms += 100; | 1198 | ms += 100; |
@@ -1315,116 +1250,6 @@ csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, | |||
1315 | return csio_hw_fw_restart(hw, mbox, reset); | 1250 | return csio_hw_fw_restart(hw, mbox, reset); |
1316 | } | 1251 | } |
1317 | 1252 | ||
1318 | |||
1319 | /* | ||
1320 | * csio_hw_fw_config_file - setup an adapter via a Configuration File | ||
1321 | * @hw: the HW module | ||
1322 | * @mbox: mailbox to use for the FW command | ||
1323 | * @mtype: the memory type where the Configuration File is located | ||
1324 | * @maddr: the memory address where the Configuration File is located | ||
1325 | * @finiver: return value for CF [fini] version | ||
1326 | * @finicsum: return value for CF [fini] checksum | ||
1327 | * @cfcsum: return value for CF computed checksum | ||
1328 | * | ||
1329 | * Issue a command to get the firmware to process the Configuration | ||
1330 | * File located at the specified mtype/maddress. If the Configuration | ||
1331 | * File is processed successfully and return value pointers are | ||
1332 | * provided, the Configuration File "[fini] section version and | ||
1333 | * checksum values will be returned along with the computed checksum. | ||
1334 | * It's up to the caller to decide how it wants to respond to the | ||
1335 | * checksums not matching but it recommended that a prominant warning | ||
1336 | * be emitted in order to help people rapidly identify changed or | ||
1337 | * corrupted Configuration Files. | ||
1338 | * | ||
1339 | * Also note that it's possible to modify things like "niccaps", | ||
1340 | * "toecaps",etc. between processing the Configuration File and telling | ||
1341 | * the firmware to use the new configuration. Callers which want to | ||
1342 | * do this will need to "hand-roll" their own CAPS_CONFIGS commands for | ||
1343 | * Configuration Files if they want to do this. | ||
1344 | */ | ||
1345 | static int | ||
1346 | csio_hw_fw_config_file(struct csio_hw *hw, | ||
1347 | unsigned int mtype, unsigned int maddr, | ||
1348 | uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) | ||
1349 | { | ||
1350 | struct csio_mb *mbp; | ||
1351 | struct fw_caps_config_cmd *caps_cmd; | ||
1352 | int rv = -EINVAL; | ||
1353 | enum fw_retval ret; | ||
1354 | |||
1355 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1356 | if (!mbp) { | ||
1357 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1358 | return -ENOMEM; | ||
1359 | } | ||
1360 | /* | ||
1361 | * Tell the firmware to process the indicated Configuration File. | ||
1362 | * If there are no errors and the caller has provided return value | ||
1363 | * pointers for the [fini] section version, checksum and computed | ||
1364 | * checksum, pass those back to the caller. | ||
1365 | */ | ||
1366 | caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); | ||
1367 | CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); | ||
1368 | caps_cmd->op_to_write = | ||
1369 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | | ||
1370 | FW_CMD_REQUEST_F | | ||
1371 | FW_CMD_READ_F); | ||
1372 | caps_cmd->cfvalid_to_len16 = | ||
1373 | htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | | ||
1374 | FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | | ||
1375 | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | | ||
1376 | FW_LEN16(*caps_cmd)); | ||
1377 | |||
1378 | if (csio_mb_issue(hw, mbp)) { | ||
1379 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); | ||
1380 | goto out; | ||
1381 | } | ||
1382 | |||
1383 | ret = csio_mb_fw_retval(mbp); | ||
1384 | if (ret != FW_SUCCESS) { | ||
1385 | csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (finiver) | ||
1390 | *finiver = ntohl(caps_cmd->finiver); | ||
1391 | if (finicsum) | ||
1392 | *finicsum = ntohl(caps_cmd->finicsum); | ||
1393 | if (cfcsum) | ||
1394 | *cfcsum = ntohl(caps_cmd->cfcsum); | ||
1395 | |||
1396 | /* Validate device capabilities */ | ||
1397 | if (csio_hw_validate_caps(hw, mbp)) { | ||
1398 | rv = -ENOENT; | ||
1399 | goto out; | ||
1400 | } | ||
1401 | |||
1402 | /* | ||
1403 | * And now tell the firmware to use the configuration we just loaded. | ||
1404 | */ | ||
1405 | caps_cmd->op_to_write = | ||
1406 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | | ||
1407 | FW_CMD_REQUEST_F | | ||
1408 | FW_CMD_WRITE_F); | ||
1409 | caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); | ||
1410 | |||
1411 | if (csio_mb_issue(hw, mbp)) { | ||
1412 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); | ||
1413 | goto out; | ||
1414 | } | ||
1415 | |||
1416 | ret = csio_mb_fw_retval(mbp); | ||
1417 | if (ret != FW_SUCCESS) { | ||
1418 | csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); | ||
1419 | goto out; | ||
1420 | } | ||
1421 | |||
1422 | rv = 0; | ||
1423 | out: | ||
1424 | mempool_free(mbp, hw->mb_mempool); | ||
1425 | return rv; | ||
1426 | } | ||
1427 | |||
1428 | /* | 1253 | /* |
1429 | * csio_get_device_params - Get device parameters. | 1254 | * csio_get_device_params - Get device parameters. |
1430 | * @hw: HW module | 1255 | * @hw: HW module |
@@ -1547,7 +1372,8 @@ csio_config_device_caps(struct csio_hw *hw) | |||
1547 | } | 1372 | } |
1548 | 1373 | ||
1549 | /* Validate device capabilities */ | 1374 | /* Validate device capabilities */ |
1550 | if (csio_hw_validate_caps(hw, mbp)) | 1375 | rv = csio_hw_validate_caps(hw, mbp); |
1376 | if (rv != 0) | ||
1551 | goto out; | 1377 | goto out; |
1552 | 1378 | ||
1553 | /* Don't config device capabilities if already configured */ | 1379 | /* Don't config device capabilities if already configured */ |
@@ -1756,9 +1582,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) | |||
1756 | uint32_t *cfg_data; | 1582 | uint32_t *cfg_data; |
1757 | int value_to_add = 0; | 1583 | int value_to_add = 0; |
1758 | 1584 | ||
1759 | if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { | 1585 | if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) { |
1760 | csio_err(hw, "could not find config file %s, err: %d\n", | 1586 | csio_err(hw, "could not find config file %s, err: %d\n", |
1761 | CSIO_CF_FNAME(hw), ret); | 1587 | FW_CFG_NAME_T5, ret); |
1762 | return -ENOENT; | 1588 | return -ENOENT; |
1763 | } | 1589 | } |
1764 | 1590 | ||
@@ -1798,8 +1624,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) | |||
1798 | } | 1624 | } |
1799 | if (ret == 0) { | 1625 | if (ret == 0) { |
1800 | csio_info(hw, "config file upgraded to %s\n", | 1626 | csio_info(hw, "config file upgraded to %s\n", |
1801 | CSIO_CF_FNAME(hw)); | 1627 | FW_CFG_NAME_T5); |
1802 | snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); | 1628 | snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5); |
1803 | } | 1629 | } |
1804 | 1630 | ||
1805 | leave: | 1631 | leave: |
@@ -1827,11 +1653,13 @@ leave: | |||
1827 | static int | 1653 | static int |
1828 | csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | 1654 | csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) |
1829 | { | 1655 | { |
1656 | struct csio_mb *mbp = NULL; | ||
1657 | struct fw_caps_config_cmd *caps_cmd; | ||
1830 | unsigned int mtype, maddr; | 1658 | unsigned int mtype, maddr; |
1831 | int rv; | 1659 | int rv = -EINVAL; |
1832 | uint32_t finiver = 0, finicsum = 0, cfcsum = 0; | 1660 | uint32_t finiver = 0, finicsum = 0, cfcsum = 0; |
1833 | int using_flash; | ||
1834 | char path[64]; | 1661 | char path[64]; |
1662 | char *config_name = NULL; | ||
1835 | 1663 | ||
1836 | /* | 1664 | /* |
1837 | * Reset device if necessary | 1665 | * Reset device if necessary |
@@ -1851,51 +1679,107 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | |||
1851 | rv = csio_hw_flash_config(hw, fw_cfg_param, path); | 1679 | rv = csio_hw_flash_config(hw, fw_cfg_param, path); |
1852 | spin_lock_irq(&hw->lock); | 1680 | spin_lock_irq(&hw->lock); |
1853 | if (rv != 0) { | 1681 | if (rv != 0) { |
1854 | if (rv == -ENOENT) { | 1682 | /* |
1855 | /* | 1683 | * config file was not found. Use default |
1856 | * config file was not found. Use default | 1684 | * config file from flash. |
1857 | * config file from flash. | 1685 | */ |
1858 | */ | 1686 | config_name = "On FLASH"; |
1859 | mtype = FW_MEMTYPE_CF_FLASH; | 1687 | mtype = FW_MEMTYPE_CF_FLASH; |
1860 | maddr = hw->chip_ops->chip_flash_cfg_addr(hw); | 1688 | maddr = hw->chip_ops->chip_flash_cfg_addr(hw); |
1861 | using_flash = 1; | ||
1862 | } else { | ||
1863 | /* | ||
1864 | * we revert back to the hardwired config if | ||
1865 | * flashing failed. | ||
1866 | */ | ||
1867 | goto bye; | ||
1868 | } | ||
1869 | } else { | 1689 | } else { |
1690 | config_name = path; | ||
1870 | mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); | 1691 | mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); |
1871 | maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; | 1692 | maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; |
1872 | using_flash = 0; | ||
1873 | } | 1693 | } |
1874 | 1694 | ||
1875 | hw->cfg_store = (uint8_t)mtype; | 1695 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
1696 | if (!mbp) { | ||
1697 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1698 | return -ENOMEM; | ||
1699 | } | ||
1700 | /* | ||
1701 | * Tell the firmware to process the indicated Configuration File. | ||
1702 | * If there are no errors and the caller has provided return value | ||
1703 | * pointers for the [fini] section version, checksum and computed | ||
1704 | * checksum, pass those back to the caller. | ||
1705 | */ | ||
1706 | caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); | ||
1707 | CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); | ||
1708 | caps_cmd->op_to_write = | ||
1709 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | | ||
1710 | FW_CMD_REQUEST_F | | ||
1711 | FW_CMD_READ_F); | ||
1712 | caps_cmd->cfvalid_to_len16 = | ||
1713 | htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | | ||
1714 | FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | | ||
1715 | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | | ||
1716 | FW_LEN16(*caps_cmd)); | ||
1717 | |||
1718 | if (csio_mb_issue(hw, mbp)) { | ||
1719 | rv = -EINVAL; | ||
1720 | goto bye; | ||
1721 | } | ||
1722 | |||
1723 | rv = csio_mb_fw_retval(mbp); | ||
1724 | /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware | ||
1725 | * Configuration File in FLASH), our last gasp effort is to use the | ||
1726 | * Firmware Configuration File which is embedded in the | ||
1727 | * firmware. A very few early versions of the firmware didn't | ||
1728 | * have one embedded but we can ignore those. | ||
1729 | */ | ||
1730 | if (rv == ENOENT) { | ||
1731 | CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); | ||
1732 | caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | | ||
1733 | FW_CMD_REQUEST_F | | ||
1734 | FW_CMD_READ_F); | ||
1735 | caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); | ||
1736 | |||
1737 | if (csio_mb_issue(hw, mbp)) { | ||
1738 | rv = -EINVAL; | ||
1739 | goto bye; | ||
1740 | } | ||
1741 | |||
1742 | rv = csio_mb_fw_retval(mbp); | ||
1743 | config_name = "Firmware Default"; | ||
1744 | } | ||
1745 | if (rv != FW_SUCCESS) | ||
1746 | goto bye; | ||
1747 | |||
1748 | finiver = ntohl(caps_cmd->finiver); | ||
1749 | finicsum = ntohl(caps_cmd->finicsum); | ||
1750 | cfcsum = ntohl(caps_cmd->cfcsum); | ||
1876 | 1751 | ||
1877 | /* | 1752 | /* |
1878 | * Issue a Capability Configuration command to the firmware to get it | 1753 | * And now tell the firmware to use the configuration we just loaded. |
1879 | * to parse the Configuration File. | ||
1880 | */ | 1754 | */ |
1881 | rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, | 1755 | caps_cmd->op_to_write = |
1882 | &finicsum, &cfcsum); | 1756 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
1883 | if (rv != 0) | 1757 | FW_CMD_REQUEST_F | |
1758 | FW_CMD_WRITE_F); | ||
1759 | caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); | ||
1760 | |||
1761 | if (csio_mb_issue(hw, mbp)) { | ||
1762 | rv = -EINVAL; | ||
1884 | goto bye; | 1763 | goto bye; |
1764 | } | ||
1885 | 1765 | ||
1886 | hw->cfg_finiver = finiver; | 1766 | rv = csio_mb_fw_retval(mbp); |
1887 | hw->cfg_finicsum = finicsum; | 1767 | if (rv != FW_SUCCESS) { |
1888 | hw->cfg_cfcsum = cfcsum; | 1768 | csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); |
1889 | hw->cfg_csum_status = true; | 1769 | goto bye; |
1770 | } | ||
1890 | 1771 | ||
1772 | mempool_free(mbp, hw->mb_mempool); | ||
1891 | if (finicsum != cfcsum) { | 1773 | if (finicsum != cfcsum) { |
1892 | csio_warn(hw, | 1774 | csio_warn(hw, |
1893 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", | 1775 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", |
1894 | finicsum, cfcsum); | 1776 | finicsum, cfcsum); |
1895 | |||
1896 | hw->cfg_csum_status = false; | ||
1897 | } | 1777 | } |
1898 | 1778 | ||
1779 | /* Validate device capabilities */ | ||
1780 | rv = csio_hw_validate_caps(hw, mbp); | ||
1781 | if (rv != 0) | ||
1782 | goto bye; | ||
1899 | /* | 1783 | /* |
1900 | * Note that we're operating with parameters | 1784 | * Note that we're operating with parameters |
1901 | * not supplied by the driver, rather than from hard-wired | 1785 | * not supplied by the driver, rather than from hard-wired |
@@ -1918,56 +1802,184 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | |||
1918 | /* Post event to notify completion of configuration */ | 1802 | /* Post event to notify completion of configuration */ |
1919 | csio_post_event(&hw->sm, CSIO_HWE_INIT); | 1803 | csio_post_event(&hw->sm, CSIO_HWE_INIT); |
1920 | 1804 | ||
1921 | csio_info(hw, | 1805 | csio_info(hw, "Successfully configure using Firmware " |
1922 | "Firmware Configuration File %s, version %#x, computed checksum %#x\n", | 1806 | "Configuration File %s, version %#x, computed checksum %#x\n", |
1923 | (using_flash ? "in device FLASH" : path), finiver, cfcsum); | 1807 | config_name, finiver, cfcsum); |
1924 | |||
1925 | return 0; | 1808 | return 0; |
1926 | 1809 | ||
1927 | /* | 1810 | /* |
1928 | * Something bad happened. Return the error ... | 1811 | * Something bad happened. Return the error ... |
1929 | */ | 1812 | */ |
1930 | bye: | 1813 | bye: |
1814 | if (mbp) | ||
1815 | mempool_free(mbp, hw->mb_mempool); | ||
1931 | hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; | 1816 | hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; |
1932 | csio_dbg(hw, "Configuration file error %d\n", rv); | 1817 | csio_warn(hw, "Configuration file error %d\n", rv); |
1933 | return rv; | 1818 | return rv; |
1934 | } | 1819 | } |
1935 | 1820 | ||
1936 | /* | 1821 | /* Is the given firmware API compatible with the one the driver was compiled |
1937 | * Attempt to initialize the adapter via hard-coded, driver supplied | 1822 | * with? |
1938 | * parameters ... | ||
1939 | */ | 1823 | */ |
1940 | static int | 1824 | static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) |
1941 | csio_hw_no_fwconfig(struct csio_hw *hw, int reset) | ||
1942 | { | 1825 | { |
1943 | int rv; | 1826 | |
1944 | /* | 1827 | /* short circuit if it's the exact same firmware version */ |
1945 | * Reset device if necessary | 1828 | if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) |
1946 | */ | 1829 | return 1; |
1947 | if (reset) { | 1830 | |
1948 | rv = csio_do_reset(hw, true); | 1831 | #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) |
1949 | if (rv != 0) | 1832 | if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && |
1950 | goto out; | 1833 | SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) |
1834 | return 1; | ||
1835 | #undef SAME_INTF | ||
1836 | |||
1837 | return 0; | ||
1838 | } | ||
1839 | |||
1840 | /* The firmware in the filesystem is usable, but should it be installed? | ||
1841 | * This routine explains itself in detail if it indicates the filesystem | ||
1842 | * firmware should be installed. | ||
1843 | */ | ||
1844 | static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, | ||
1845 | int k, int c) | ||
1846 | { | ||
1847 | const char *reason; | ||
1848 | |||
1849 | if (!card_fw_usable) { | ||
1850 | reason = "incompatible or unusable"; | ||
1851 | goto install; | ||
1951 | } | 1852 | } |
1952 | 1853 | ||
1953 | /* Get and set device capabilities */ | 1854 | if (k > c) { |
1954 | rv = csio_config_device_caps(hw); | 1855 | reason = "older than the version supported with this driver"; |
1955 | if (rv != 0) | 1856 | goto install; |
1956 | goto out; | 1857 | } |
1957 | 1858 | ||
1958 | /* device parameters */ | 1859 | return 0; |
1959 | rv = csio_get_device_params(hw); | ||
1960 | if (rv != 0) | ||
1961 | goto out; | ||
1962 | 1860 | ||
1963 | /* Configure SGE */ | 1861 | install: |
1964 | csio_wr_sge_init(hw); | 1862 | csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " |
1863 | "installing firmware %u.%u.%u.%u on card.\n", | ||
1864 | FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), | ||
1865 | FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, | ||
1866 | FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), | ||
1867 | FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); | ||
1965 | 1868 | ||
1966 | /* Post event to notify completion of configuration */ | 1869 | return 1; |
1967 | csio_post_event(&hw->sm, CSIO_HWE_INIT); | 1870 | } |
1968 | 1871 | ||
1969 | out: | 1872 | static struct fw_info fw_info_array[] = { |
1970 | return rv; | 1873 | { |
1874 | .chip = CHELSIO_T5, | ||
1875 | .fs_name = FW_CFG_NAME_T5, | ||
1876 | .fw_mod_name = FW_FNAME_T5, | ||
1877 | .fw_hdr = { | ||
1878 | .chip = FW_HDR_CHIP_T5, | ||
1879 | .fw_ver = __cpu_to_be32(FW_VERSION(T5)), | ||
1880 | .intfver_nic = FW_INTFVER(T5, NIC), | ||
1881 | .intfver_vnic = FW_INTFVER(T5, VNIC), | ||
1882 | .intfver_ri = FW_INTFVER(T5, RI), | ||
1883 | .intfver_iscsi = FW_INTFVER(T5, ISCSI), | ||
1884 | .intfver_fcoe = FW_INTFVER(T5, FCOE), | ||
1885 | }, | ||
1886 | } | ||
1887 | }; | ||
1888 | |||
1889 | static struct fw_info *find_fw_info(int chip) | ||
1890 | { | ||
1891 | int i; | ||
1892 | |||
1893 | for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { | ||
1894 | if (fw_info_array[i].chip == chip) | ||
1895 | return &fw_info_array[i]; | ||
1896 | } | ||
1897 | return NULL; | ||
1898 | } | ||
1899 | |||
1900 | static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, | ||
1901 | const u8 *fw_data, unsigned int fw_size, | ||
1902 | struct fw_hdr *card_fw, enum csio_dev_state state, | ||
1903 | int *reset) | ||
1904 | { | ||
1905 | int ret, card_fw_usable, fs_fw_usable; | ||
1906 | const struct fw_hdr *fs_fw; | ||
1907 | const struct fw_hdr *drv_fw; | ||
1908 | |||
1909 | drv_fw = &fw_info->fw_hdr; | ||
1910 | |||
1911 | /* Read the header of the firmware on the card */ | ||
1912 | ret = csio_hw_read_flash(hw, FLASH_FW_START, | ||
1913 | sizeof(*card_fw) / sizeof(uint32_t), | ||
1914 | (uint32_t *)card_fw, 1); | ||
1915 | if (ret == 0) { | ||
1916 | card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); | ||
1917 | } else { | ||
1918 | csio_err(hw, | ||
1919 | "Unable to read card's firmware header: %d\n", ret); | ||
1920 | card_fw_usable = 0; | ||
1921 | } | ||
1922 | |||
1923 | if (fw_data != NULL) { | ||
1924 | fs_fw = (const void *)fw_data; | ||
1925 | fs_fw_usable = fw_compatible(drv_fw, fs_fw); | ||
1926 | } else { | ||
1927 | fs_fw = NULL; | ||
1928 | fs_fw_usable = 0; | ||
1929 | } | ||
1930 | |||
1931 | if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && | ||
1932 | (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { | ||
1933 | /* Common case: the firmware on the card is an exact match and | ||
1934 | * the filesystem one is an exact match too, or the filesystem | ||
1935 | * one is absent/incompatible. | ||
1936 | */ | ||
1937 | } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && | ||
1938 | csio_should_install_fs_fw(hw, card_fw_usable, | ||
1939 | be32_to_cpu(fs_fw->fw_ver), | ||
1940 | be32_to_cpu(card_fw->fw_ver))) { | ||
1941 | ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, | ||
1942 | fw_size, 0); | ||
1943 | if (ret != 0) { | ||
1944 | csio_err(hw, | ||
1945 | "failed to install firmware: %d\n", ret); | ||
1946 | goto bye; | ||
1947 | } | ||
1948 | |||
1949 | /* Installed successfully, update the cached header too. */ | ||
1950 | memcpy(card_fw, fs_fw, sizeof(*card_fw)); | ||
1951 | card_fw_usable = 1; | ||
1952 | *reset = 0; /* already reset as part of load_fw */ | ||
1953 | } | ||
1954 | |||
1955 | if (!card_fw_usable) { | ||
1956 | uint32_t d, c, k; | ||
1957 | |||
1958 | d = be32_to_cpu(drv_fw->fw_ver); | ||
1959 | c = be32_to_cpu(card_fw->fw_ver); | ||
1960 | k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; | ||
1961 | |||
1962 | csio_err(hw, "Cannot find a usable firmware: " | ||
1963 | "chip state %d, " | ||
1964 | "driver compiled with %d.%d.%d.%d, " | ||
1965 | "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", | ||
1966 | state, | ||
1967 | FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), | ||
1968 | FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), | ||
1969 | FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), | ||
1970 | FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), | ||
1971 | FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), | ||
1972 | FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); | ||
1973 | ret = EINVAL; | ||
1974 | goto bye; | ||
1975 | } | ||
1976 | |||
1977 | /* We're using whatever's on the card and it's known to be good. */ | ||
1978 | hw->fwrev = be32_to_cpu(card_fw->fw_ver); | ||
1979 | hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); | ||
1980 | |||
1981 | bye: | ||
1982 | return ret; | ||
1971 | } | 1983 | } |
1972 | 1984 | ||
1973 | /* | 1985 | /* |
@@ -1977,48 +1989,52 @@ out: | |||
1977 | * latest firmware ECANCELED is returned | 1989 | * latest firmware ECANCELED is returned |
1978 | */ | 1990 | */ |
1979 | static int | 1991 | static int |
1980 | csio_hw_flash_fw(struct csio_hw *hw) | 1992 | csio_hw_flash_fw(struct csio_hw *hw, int *reset) |
1981 | { | 1993 | { |
1982 | int ret = -ECANCELED; | 1994 | int ret = -ECANCELED; |
1983 | const struct firmware *fw; | 1995 | const struct firmware *fw; |
1984 | const struct fw_hdr *hdr; | 1996 | struct fw_info *fw_info; |
1985 | u32 fw_ver; | 1997 | struct fw_hdr *card_fw; |
1986 | struct pci_dev *pci_dev = hw->pdev; | 1998 | struct pci_dev *pci_dev = hw->pdev; |
1987 | struct device *dev = &pci_dev->dev ; | 1999 | struct device *dev = &pci_dev->dev ; |
2000 | const u8 *fw_data = NULL; | ||
2001 | unsigned int fw_size = 0; | ||
1988 | 2002 | ||
1989 | if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { | 2003 | /* This is the firmware whose headers the driver was compiled |
1990 | csio_err(hw, "could not find firmware image %s, err: %d\n", | 2004 | * against |
1991 | CSIO_FW_FNAME(hw), ret); | 2005 | */ |
2006 | fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); | ||
2007 | if (fw_info == NULL) { | ||
2008 | csio_err(hw, | ||
2009 | "unable to get firmware info for chip %d.\n", | ||
2010 | CHELSIO_CHIP_VERSION(hw->chip_id)); | ||
1992 | return -EINVAL; | 2011 | return -EINVAL; |
1993 | } | 2012 | } |
1994 | 2013 | ||
1995 | hdr = (const struct fw_hdr *)fw->data; | 2014 | if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) { |
1996 | fw_ver = ntohl(hdr->fw_ver); | 2015 | csio_err(hw, "could not find firmware image %s, err: %d\n", |
1997 | if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw)) | 2016 | FW_FNAME_T5, ret); |
1998 | return -EINVAL; /* wrong major version, won't do */ | 2017 | } else { |
2018 | fw_data = fw->data; | ||
2019 | fw_size = fw->size; | ||
2020 | } | ||
1999 | 2021 | ||
2000 | /* | 2022 | /* allocate memory to read the header of the firmware on the |
2001 | * If the flash FW is unusable or we found something newer, load it. | 2023 | * card |
2002 | */ | 2024 | */ |
2003 | if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) || | 2025 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); |
2004 | fw_ver > hw->fwrev) { | ||
2005 | ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, | ||
2006 | /*force=*/false); | ||
2007 | if (!ret) | ||
2008 | csio_info(hw, | ||
2009 | "firmware upgraded to version %pI4 from %s\n", | ||
2010 | &hdr->fw_ver, CSIO_FW_FNAME(hw)); | ||
2011 | else | ||
2012 | csio_err(hw, "firmware upgrade failed! err=%d\n", ret); | ||
2013 | } else | ||
2014 | ret = -EINVAL; | ||
2015 | 2026 | ||
2016 | release_firmware(fw); | 2027 | /* upgrade FW logic */ |
2028 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, | ||
2029 | hw->fw_state, reset); | ||
2017 | 2030 | ||
2031 | /* Cleaning up */ | ||
2032 | if (fw != NULL) | ||
2033 | release_firmware(fw); | ||
2034 | kfree(card_fw); | ||
2018 | return ret; | 2035 | return ret; |
2019 | } | 2036 | } |
2020 | 2037 | ||
2021 | |||
2022 | /* | 2038 | /* |
2023 | * csio_hw_configure - Configure HW | 2039 | * csio_hw_configure - Configure HW |
2024 | * @hw - HW module | 2040 | * @hw - HW module |
@@ -2039,7 +2055,7 @@ csio_hw_configure(struct csio_hw *hw) | |||
2039 | } | 2055 | } |
2040 | 2056 | ||
2041 | /* HW version */ | 2057 | /* HW version */ |
2042 | hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); | 2058 | hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); |
2043 | 2059 | ||
2044 | /* Needed for FW download */ | 2060 | /* Needed for FW download */ |
2045 | rv = csio_hw_get_flash_params(hw); | 2061 | rv = csio_hw_get_flash_params(hw); |
@@ -2074,50 +2090,43 @@ csio_hw_configure(struct csio_hw *hw) | |||
2074 | if (rv != 0) | 2090 | if (rv != 0) |
2075 | goto out; | 2091 | goto out; |
2076 | 2092 | ||
2093 | csio_hw_get_fw_version(hw, &hw->fwrev); | ||
2094 | csio_hw_get_tp_version(hw, &hw->tp_vers); | ||
2077 | if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { | 2095 | if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { |
2078 | rv = csio_hw_check_fw_version(hw); | ||
2079 | if (rv == -EINVAL) { | ||
2080 | 2096 | ||
2081 | /* Do firmware update */ | 2097 | /* Do firmware update */ |
2082 | spin_unlock_irq(&hw->lock); | 2098 | spin_unlock_irq(&hw->lock); |
2083 | rv = csio_hw_flash_fw(hw); | 2099 | rv = csio_hw_flash_fw(hw, &reset); |
2084 | spin_lock_irq(&hw->lock); | 2100 | spin_lock_irq(&hw->lock); |
2085 | 2101 | ||
2086 | if (rv == 0) { | 2102 | if (rv != 0) |
2087 | reset = 0; | 2103 | goto out; |
2088 | /* | 2104 | |
2089 | * Note that the chip was reset as part of the | 2105 | /* If the firmware doesn't support Configuration Files, |
2090 | * firmware upgrade so we don't reset it again | 2106 | * return an error. |
2091 | * below and grab the new firmware version. | ||
2092 | */ | ||
2093 | rv = csio_hw_check_fw_version(hw); | ||
2094 | } | ||
2095 | } | ||
2096 | /* | ||
2097 | * If the firmware doesn't support Configuration | ||
2098 | * Files, use the old Driver-based, hard-wired | ||
2099 | * initialization. Otherwise, try using the | ||
2100 | * Configuration File support and fall back to the | ||
2101 | * Driver-based initialization if there's no | ||
2102 | * Configuration File found. | ||
2103 | */ | 2107 | */ |
2104 | if (csio_hw_check_fwconfig(hw, param) == 0) { | 2108 | rv = csio_hw_check_fwconfig(hw, param); |
2105 | rv = csio_hw_use_fwconfig(hw, reset, param); | 2109 | if (rv != 0) { |
2106 | if (rv == -ENOENT) | 2110 | csio_info(hw, "Firmware doesn't support " |
2107 | goto out; | 2111 | "Firmware Configuration files\n"); |
2108 | if (rv != 0) { | 2112 | goto out; |
2109 | csio_info(hw, | ||
2110 | "No Configuration File present " | ||
2111 | "on adapter. Using hard-wired " | ||
2112 | "configuration parameters.\n"); | ||
2113 | rv = csio_hw_no_fwconfig(hw, reset); | ||
2114 | } | ||
2115 | } else { | ||
2116 | rv = csio_hw_no_fwconfig(hw, reset); | ||
2117 | } | 2113 | } |
2118 | 2114 | ||
2119 | if (rv != 0) | 2115 | /* The firmware provides us with a memory buffer where we can |
2116 | * load a Configuration File from the host if we want to | ||
2117 | * override the Configuration File in flash. | ||
2118 | */ | ||
2119 | rv = csio_hw_use_fwconfig(hw, reset, param); | ||
2120 | if (rv == -ENOENT) { | ||
2121 | csio_info(hw, "Could not initialize " | ||
2122 | "adapter, error%d\n", rv); | ||
2123 | goto out; | ||
2124 | } | ||
2125 | if (rv != 0) { | ||
2126 | csio_info(hw, "Could not initialize " | ||
2127 | "adapter, error%d\n", rv); | ||
2120 | goto out; | 2128 | goto out; |
2129 | } | ||
2121 | 2130 | ||
2122 | } else { | 2131 | } else { |
2123 | if (hw->fw_state == CSIO_DEV_STATE_INIT) { | 2132 | if (hw->fw_state == CSIO_DEV_STATE_INIT) { |
@@ -2217,7 +2226,7 @@ out: | |||
2217 | return; | 2226 | return; |
2218 | } | 2227 | } |
2219 | 2228 | ||
2220 | #define PF_INTR_MASK (PFSW | PFCIM) | 2229 | #define PF_INTR_MASK (PFSW_F | PFCIM_F) |
2221 | 2230 | ||
2222 | /* | 2231 | /* |
2223 | * csio_hw_intr_enable - Enable HW interrupts | 2232 | * csio_hw_intr_enable - Enable HW interrupts |
@@ -2229,21 +2238,21 @@ static void | |||
2229 | csio_hw_intr_enable(struct csio_hw *hw) | 2238 | csio_hw_intr_enable(struct csio_hw *hw) |
2230 | { | 2239 | { |
2231 | uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); | 2240 | uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); |
2232 | uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); | 2241 | uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); |
2233 | uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); | 2242 | uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); |
2234 | 2243 | ||
2235 | /* | 2244 | /* |
2236 | * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up | 2245 | * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up |
2237 | * by FW, so do nothing for INTX. | 2246 | * by FW, so do nothing for INTX. |
2238 | */ | 2247 | */ |
2239 | if (hw->intr_mode == CSIO_IM_MSIX) | 2248 | if (hw->intr_mode == CSIO_IM_MSIX) |
2240 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), | 2249 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), |
2241 | AIVEC(AIVEC_MASK), vec); | 2250 | AIVEC_V(AIVEC_M), vec); |
2242 | else if (hw->intr_mode == CSIO_IM_MSI) | 2251 | else if (hw->intr_mode == CSIO_IM_MSI) |
2243 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), | 2252 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), |
2244 | AIVEC(AIVEC_MASK), 0); | 2253 | AIVEC_V(AIVEC_M), 0); |
2245 | 2254 | ||
2246 | csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); | 2255 | csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); |
2247 | 2256 | ||
2248 | /* Turn on MB interrupts - this will internally flush PIO as well */ | 2257 | /* Turn on MB interrupts - this will internally flush PIO as well */ |
2249 | csio_mb_intr_enable(hw); | 2258 | csio_mb_intr_enable(hw); |
@@ -2253,19 +2262,19 @@ csio_hw_intr_enable(struct csio_hw *hw) | |||
2253 | /* | 2262 | /* |
2254 | * Disable the Serial FLASH interrupt, if enabled! | 2263 | * Disable the Serial FLASH interrupt, if enabled! |
2255 | */ | 2264 | */ |
2256 | pl &= (~SF); | 2265 | pl &= (~SF_F); |
2257 | csio_wr_reg32(hw, pl, PL_INT_ENABLE); | 2266 | csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); |
2258 | 2267 | ||
2259 | csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | | 2268 | csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | |
2260 | EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | | 2269 | EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | |
2261 | ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | | 2270 | ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | |
2262 | ERR_DATA_CPL_ON_HIGH_QID1 | | 2271 | ERR_DATA_CPL_ON_HIGH_QID1_F | |
2263 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | | 2272 | ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | |
2264 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | | 2273 | ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | |
2265 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | | 2274 | ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | |
2266 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, | 2275 | ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, |
2267 | SGE_INT_ENABLE3); | 2276 | SGE_INT_ENABLE3_A); |
2268 | csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); | 2277 | csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); |
2269 | } | 2278 | } |
2270 | 2279 | ||
2271 | hw->flags |= CSIO_HWF_HW_INTR_ENABLED; | 2280 | hw->flags |= CSIO_HWF_HW_INTR_ENABLED; |
@@ -2281,16 +2290,16 @@ csio_hw_intr_enable(struct csio_hw *hw) | |||
2281 | void | 2290 | void |
2282 | csio_hw_intr_disable(struct csio_hw *hw) | 2291 | csio_hw_intr_disable(struct csio_hw *hw) |
2283 | { | 2292 | { |
2284 | uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); | 2293 | uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); |
2285 | 2294 | ||
2286 | if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) | 2295 | if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) |
2287 | return; | 2296 | return; |
2288 | 2297 | ||
2289 | hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; | 2298 | hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; |
2290 | 2299 | ||
2291 | csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); | 2300 | csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); |
2292 | if (csio_is_hw_master(hw)) | 2301 | if (csio_is_hw_master(hw)) |
2293 | csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); | 2302 | csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); |
2294 | 2303 | ||
2295 | /* Turn off MB interrupts */ | 2304 | /* Turn off MB interrupts */ |
2296 | csio_mb_intr_disable(hw); | 2305 | csio_mb_intr_disable(hw); |
@@ -2300,7 +2309,7 @@ csio_hw_intr_disable(struct csio_hw *hw) | |||
2300 | void | 2309 | void |
2301 | csio_hw_fatal_err(struct csio_hw *hw) | 2310 | csio_hw_fatal_err(struct csio_hw *hw) |
2302 | { | 2311 | { |
2303 | csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); | 2312 | csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); |
2304 | csio_hw_intr_disable(hw); | 2313 | csio_hw_intr_disable(hw); |
2305 | 2314 | ||
2306 | /* Do not reset HW, we may need FW state for debugging */ | 2315 | /* Do not reset HW, we may need FW state for debugging */ |
@@ -2594,7 +2603,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) | |||
2594 | * register directly. | 2603 | * register directly. |
2595 | */ | 2604 | */ |
2596 | csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); | 2605 | csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); |
2597 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | 2606 | csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); |
2598 | mdelay(2000); | 2607 | mdelay(2000); |
2599 | break; | 2608 | break; |
2600 | 2609 | ||
@@ -2682,11 +2691,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw) | |||
2682 | { | 2691 | { |
2683 | static struct intr_info tp_intr_info[] = { | 2692 | static struct intr_info tp_intr_info[] = { |
2684 | { 0x3fffffff, "TP parity error", -1, 1 }, | 2693 | { 0x3fffffff, "TP parity error", -1, 1 }, |
2685 | { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, | 2694 | { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, |
2686 | { 0, NULL, 0, 0 } | 2695 | { 0, NULL, 0, 0 } |
2687 | }; | 2696 | }; |
2688 | 2697 | ||
2689 | if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) | 2698 | if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) |
2690 | csio_hw_fatal_err(hw); | 2699 | csio_hw_fatal_err(hw); |
2691 | } | 2700 | } |
2692 | 2701 | ||
@@ -2698,52 +2707,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw) | |||
2698 | uint64_t v; | 2707 | uint64_t v; |
2699 | 2708 | ||
2700 | static struct intr_info sge_intr_info[] = { | 2709 | static struct intr_info sge_intr_info[] = { |
2701 | { ERR_CPL_EXCEED_IQE_SIZE, | 2710 | { ERR_CPL_EXCEED_IQE_SIZE_F, |
2702 | "SGE received CPL exceeding IQE size", -1, 1 }, | 2711 | "SGE received CPL exceeding IQE size", -1, 1 }, |
2703 | { ERR_INVALID_CIDX_INC, | 2712 | { ERR_INVALID_CIDX_INC_F, |
2704 | "SGE GTS CIDX increment too large", -1, 0 }, | 2713 | "SGE GTS CIDX increment too large", -1, 0 }, |
2705 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, | 2714 | { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, |
2706 | { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, | 2715 | { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, |
2707 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, | 2716 | { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, |
2708 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, | 2717 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, |
2709 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, | 2718 | { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, |
2710 | 0 }, | 2719 | 0 }, |
2711 | { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, | 2720 | { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, |
2712 | 0 }, | 2721 | 0 }, |
2713 | { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, | 2722 | { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, |
2714 | 0 }, | 2723 | 0 }, |
2715 | { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, | 2724 | { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, |
2716 | 0 }, | 2725 | 0 }, |
2717 | { ERR_ING_CTXT_PRIO, | 2726 | { ERR_ING_CTXT_PRIO_F, |
2718 | "SGE too many priority ingress contexts", -1, 0 }, | 2727 | "SGE too many priority ingress contexts", -1, 0 }, |
2719 | { ERR_EGR_CTXT_PRIO, | 2728 | { ERR_EGR_CTXT_PRIO_F, |
2720 | "SGE too many priority egress contexts", -1, 0 }, | 2729 | "SGE too many priority egress contexts", -1, 0 }, |
2721 | { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, | 2730 | { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, |
2722 | { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, | 2731 | { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, |
2723 | { 0, NULL, 0, 0 } | 2732 | { 0, NULL, 0, 0 } |
2724 | }; | 2733 | }; |
2725 | 2734 | ||
2726 | v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | | 2735 | v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | |
2727 | ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); | 2736 | ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); |
2728 | if (v) { | 2737 | if (v) { |
2729 | csio_fatal(hw, "SGE parity error (%#llx)\n", | 2738 | csio_fatal(hw, "SGE parity error (%#llx)\n", |
2730 | (unsigned long long)v); | 2739 | (unsigned long long)v); |
2731 | csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), | 2740 | csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), |
2732 | SGE_INT_CAUSE1); | 2741 | SGE_INT_CAUSE1_A); |
2733 | csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); | 2742 | csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); |
2734 | } | 2743 | } |
2735 | 2744 | ||
2736 | v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); | 2745 | v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); |
2737 | 2746 | ||
2738 | if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || | 2747 | if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || |
2739 | v != 0) | 2748 | v != 0) |
2740 | csio_hw_fatal_err(hw); | 2749 | csio_hw_fatal_err(hw); |
2741 | } | 2750 | } |
2742 | 2751 | ||
2743 | #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ | 2752 | #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ |
2744 | OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) | 2753 | OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) |
2745 | #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ | 2754 | #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ |
2746 | IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) | 2755 | IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) |
2747 | 2756 | ||
2748 | /* | 2757 | /* |
2749 | * CIM interrupt handler. | 2758 | * CIM interrupt handler. |
@@ -2751,53 +2760,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw) | |||
2751 | static void csio_cim_intr_handler(struct csio_hw *hw) | 2760 | static void csio_cim_intr_handler(struct csio_hw *hw) |
2752 | { | 2761 | { |
2753 | static struct intr_info cim_intr_info[] = { | 2762 | static struct intr_info cim_intr_info[] = { |
2754 | { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, | 2763 | { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, |
2755 | { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, | 2764 | { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, |
2756 | { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, | 2765 | { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, |
2757 | { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, | 2766 | { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, |
2758 | { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, | 2767 | { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, |
2759 | { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, | 2768 | { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, |
2760 | { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, | 2769 | { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, |
2761 | { 0, NULL, 0, 0 } | 2770 | { 0, NULL, 0, 0 } |
2762 | }; | 2771 | }; |
2763 | static struct intr_info cim_upintr_info[] = { | 2772 | static struct intr_info cim_upintr_info[] = { |
2764 | { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, | 2773 | { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, |
2765 | { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, | 2774 | { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, |
2766 | { ILLWRINT, "CIM illegal write", -1, 1 }, | 2775 | { ILLWRINT_F, "CIM illegal write", -1, 1 }, |
2767 | { ILLRDINT, "CIM illegal read", -1, 1 }, | 2776 | { ILLRDINT_F, "CIM illegal read", -1, 1 }, |
2768 | { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, | 2777 | { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, |
2769 | { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, | 2778 | { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, |
2770 | { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, | 2779 | { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, |
2771 | { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, | 2780 | { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, |
2772 | { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, | 2781 | { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, |
2773 | { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, | 2782 | { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, |
2774 | { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, | 2783 | { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, |
2775 | { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, | 2784 | { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, |
2776 | { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, | 2785 | { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, |
2777 | { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, | 2786 | { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, |
2778 | { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, | 2787 | { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, |
2779 | { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, | 2788 | { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, |
2780 | { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, | 2789 | { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, |
2781 | { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, | 2790 | { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, |
2782 | { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, | 2791 | { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, |
2783 | { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, | 2792 | { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, |
2784 | { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, | 2793 | { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, |
2785 | { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, | 2794 | { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, |
2786 | { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, | 2795 | { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, |
2787 | { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, | 2796 | { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, |
2788 | { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, | 2797 | { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, |
2789 | { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, | 2798 | { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, |
2790 | { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, | 2799 | { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, |
2791 | { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, | 2800 | { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, |
2792 | { 0, NULL, 0, 0 } | 2801 | { 0, NULL, 0, 0 } |
2793 | }; | 2802 | }; |
2794 | 2803 | ||
2795 | int fat; | 2804 | int fat; |
2796 | 2805 | ||
2797 | fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, | 2806 | fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, |
2798 | cim_intr_info) + | 2807 | cim_intr_info) + |
2799 | csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, | 2808 | csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, |
2800 | cim_upintr_info); | 2809 | cim_upintr_info); |
2801 | if (fat) | 2810 | if (fat) |
2802 | csio_hw_fatal_err(hw); | 2811 | csio_hw_fatal_err(hw); |
2803 | } | 2812 | } |
@@ -2813,7 +2822,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw) | |||
2813 | { 0, NULL, 0, 0 } | 2822 | { 0, NULL, 0, 0 } |
2814 | }; | 2823 | }; |
2815 | 2824 | ||
2816 | if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) | 2825 | if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) |
2817 | csio_hw_fatal_err(hw); | 2826 | csio_hw_fatal_err(hw); |
2818 | } | 2827 | } |
2819 | 2828 | ||
@@ -2823,19 +2832,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw) | |||
2823 | static void csio_ulptx_intr_handler(struct csio_hw *hw) | 2832 | static void csio_ulptx_intr_handler(struct csio_hw *hw) |
2824 | { | 2833 | { |
2825 | static struct intr_info ulptx_intr_info[] = { | 2834 | static struct intr_info ulptx_intr_info[] = { |
2826 | { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, | 2835 | { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, |
2827 | 0 }, | 2836 | 0 }, |
2828 | { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, | 2837 | { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, |
2829 | 0 }, | 2838 | 0 }, |
2830 | { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, | 2839 | { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, |
2831 | 0 }, | 2840 | 0 }, |
2832 | { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, | 2841 | { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, |
2833 | 0 }, | 2842 | 0 }, |
2834 | { 0xfffffff, "ULPTX parity error", -1, 1 }, | 2843 | { 0xfffffff, "ULPTX parity error", -1, 1 }, |
2835 | { 0, NULL, 0, 0 } | 2844 | { 0, NULL, 0, 0 } |
2836 | }; | 2845 | }; |
2837 | 2846 | ||
2838 | if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) | 2847 | if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) |
2839 | csio_hw_fatal_err(hw); | 2848 | csio_hw_fatal_err(hw); |
2840 | } | 2849 | } |
2841 | 2850 | ||
@@ -2845,20 +2854,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw) | |||
2845 | static void csio_pmtx_intr_handler(struct csio_hw *hw) | 2854 | static void csio_pmtx_intr_handler(struct csio_hw *hw) |
2846 | { | 2855 | { |
2847 | static struct intr_info pmtx_intr_info[] = { | 2856 | static struct intr_info pmtx_intr_info[] = { |
2848 | { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, | 2857 | { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, |
2849 | { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, | 2858 | { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, |
2850 | { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, | 2859 | { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, |
2851 | { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, | 2860 | { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, |
2852 | { 0xffffff0, "PMTX framing error", -1, 1 }, | 2861 | { 0xffffff0, "PMTX framing error", -1, 1 }, |
2853 | { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, | 2862 | { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, |
2854 | { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, | 2863 | { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, |
2855 | 1 }, | 2864 | 1 }, |
2856 | { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, | 2865 | { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, |
2857 | { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, | 2866 | { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, |
2858 | { 0, NULL, 0, 0 } | 2867 | { 0, NULL, 0, 0 } |
2859 | }; | 2868 | }; |
2860 | 2869 | ||
2861 | if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) | 2870 | if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) |
2862 | csio_hw_fatal_err(hw); | 2871 | csio_hw_fatal_err(hw); |
2863 | } | 2872 | } |
2864 | 2873 | ||
@@ -2868,17 +2877,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw) | |||
2868 | static void csio_pmrx_intr_handler(struct csio_hw *hw) | 2877 | static void csio_pmrx_intr_handler(struct csio_hw *hw) |
2869 | { | 2878 | { |
2870 | static struct intr_info pmrx_intr_info[] = { | 2879 | static struct intr_info pmrx_intr_info[] = { |
2871 | { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, | 2880 | { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, |
2872 | { 0x3ffff0, "PMRX framing error", -1, 1 }, | 2881 | { 0x3ffff0, "PMRX framing error", -1, 1 }, |
2873 | { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, | 2882 | { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, |
2874 | { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, | 2883 | { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, |
2875 | 1 }, | 2884 | 1 }, |
2876 | { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, | 2885 | { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, |
2877 | { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, | 2886 | { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, |
2878 | { 0, NULL, 0, 0 } | 2887 | { 0, NULL, 0, 0 } |
2879 | }; | 2888 | }; |
2880 | 2889 | ||
2881 | if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) | 2890 | if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) |
2882 | csio_hw_fatal_err(hw); | 2891 | csio_hw_fatal_err(hw); |
2883 | } | 2892 | } |
2884 | 2893 | ||
@@ -2888,16 +2897,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw) | |||
2888 | static void csio_cplsw_intr_handler(struct csio_hw *hw) | 2897 | static void csio_cplsw_intr_handler(struct csio_hw *hw) |
2889 | { | 2898 | { |
2890 | static struct intr_info cplsw_intr_info[] = { | 2899 | static struct intr_info cplsw_intr_info[] = { |
2891 | { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, | 2900 | { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, |
2892 | { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, | 2901 | { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, |
2893 | { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, | 2902 | { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, |
2894 | { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, | 2903 | { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, |
2895 | { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, | 2904 | { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, |
2896 | { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, | 2905 | { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, |
2897 | { 0, NULL, 0, 0 } | 2906 | { 0, NULL, 0, 0 } |
2898 | }; | 2907 | }; |
2899 | 2908 | ||
2900 | if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) | 2909 | if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) |
2901 | csio_hw_fatal_err(hw); | 2910 | csio_hw_fatal_err(hw); |
2902 | } | 2911 | } |
2903 | 2912 | ||
@@ -2907,15 +2916,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw) | |||
2907 | static void csio_le_intr_handler(struct csio_hw *hw) | 2916 | static void csio_le_intr_handler(struct csio_hw *hw) |
2908 | { | 2917 | { |
2909 | static struct intr_info le_intr_info[] = { | 2918 | static struct intr_info le_intr_info[] = { |
2910 | { LIPMISS, "LE LIP miss", -1, 0 }, | 2919 | { LIPMISS_F, "LE LIP miss", -1, 0 }, |
2911 | { LIP0, "LE 0 LIP error", -1, 0 }, | 2920 | { LIP0_F, "LE 0 LIP error", -1, 0 }, |
2912 | { PARITYERR, "LE parity error", -1, 1 }, | 2921 | { PARITYERR_F, "LE parity error", -1, 1 }, |
2913 | { UNKNOWNCMD, "LE unknown command", -1, 1 }, | 2922 | { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, |
2914 | { REQQPARERR, "LE request queue parity error", -1, 1 }, | 2923 | { REQQPARERR_F, "LE request queue parity error", -1, 1 }, |
2915 | { 0, NULL, 0, 0 } | 2924 | { 0, NULL, 0, 0 } |
2916 | }; | 2925 | }; |
2917 | 2926 | ||
2918 | if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) | 2927 | if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info)) |
2919 | csio_hw_fatal_err(hw); | 2928 | csio_hw_fatal_err(hw); |
2920 | } | 2929 | } |
2921 | 2930 | ||
@@ -2929,19 +2938,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw) | |||
2929 | { 0, NULL, 0, 0 } | 2938 | { 0, NULL, 0, 0 } |
2930 | }; | 2939 | }; |
2931 | static struct intr_info mps_tx_intr_info[] = { | 2940 | static struct intr_info mps_tx_intr_info[] = { |
2932 | { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, | 2941 | { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, |
2933 | { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, | 2942 | { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, |
2934 | { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, | 2943 | { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", |
2935 | { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, | 2944 | -1, 1 }, |
2936 | { BUBBLE, "MPS Tx underflow", -1, 1 }, | 2945 | { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", |
2937 | { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, | 2946 | -1, 1 }, |
2938 | { FRMERR, "MPS Tx framing error", -1, 1 }, | 2947 | { BUBBLE_F, "MPS Tx underflow", -1, 1 }, |
2948 | { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, | ||
2949 | { FRMERR_F, "MPS Tx framing error", -1, 1 }, | ||
2939 | { 0, NULL, 0, 0 } | 2950 | { 0, NULL, 0, 0 } |
2940 | }; | 2951 | }; |
2941 | static struct intr_info mps_trc_intr_info[] = { | 2952 | static struct intr_info mps_trc_intr_info[] = { |
2942 | { FILTMEM, "MPS TRC filter parity error", -1, 1 }, | 2953 | { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, |
2943 | { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, | 2954 | { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", |
2944 | { MISCPERR, "MPS TRC misc parity error", -1, 1 }, | 2955 | -1, 1 }, |
2956 | { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, | ||
2945 | { 0, NULL, 0, 0 } | 2957 | { 0, NULL, 0, 0 } |
2946 | }; | 2958 | }; |
2947 | static struct intr_info mps_stat_sram_intr_info[] = { | 2959 | static struct intr_info mps_stat_sram_intr_info[] = { |
@@ -2957,36 +2969,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw) | |||
2957 | { 0, NULL, 0, 0 } | 2969 | { 0, NULL, 0, 0 } |
2958 | }; | 2970 | }; |
2959 | static struct intr_info mps_cls_intr_info[] = { | 2971 | static struct intr_info mps_cls_intr_info[] = { |
2960 | { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, | 2972 | { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, |
2961 | { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, | 2973 | { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, |
2962 | { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, | 2974 | { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, |
2963 | { 0, NULL, 0, 0 } | 2975 | { 0, NULL, 0, 0 } |
2964 | }; | 2976 | }; |
2965 | 2977 | ||
2966 | int fat; | 2978 | int fat; |
2967 | 2979 | ||
2968 | fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, | 2980 | fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, |
2969 | mps_rx_intr_info) + | 2981 | mps_rx_intr_info) + |
2970 | csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, | 2982 | csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, |
2971 | mps_tx_intr_info) + | 2983 | mps_tx_intr_info) + |
2972 | csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, | 2984 | csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, |
2973 | mps_trc_intr_info) + | 2985 | mps_trc_intr_info) + |
2974 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, | 2986 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, |
2975 | mps_stat_sram_intr_info) + | 2987 | mps_stat_sram_intr_info) + |
2976 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, | 2988 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, |
2977 | mps_stat_tx_intr_info) + | 2989 | mps_stat_tx_intr_info) + |
2978 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, | 2990 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, |
2979 | mps_stat_rx_intr_info) + | 2991 | mps_stat_rx_intr_info) + |
2980 | csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, | 2992 | csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, |
2981 | mps_cls_intr_info); | 2993 | mps_cls_intr_info); |
2982 | 2994 | ||
2983 | csio_wr_reg32(hw, 0, MPS_INT_CAUSE); | 2995 | csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); |
2984 | csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ | 2996 | csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ |
2985 | if (fat) | 2997 | if (fat) |
2986 | csio_hw_fatal_err(hw); | 2998 | csio_hw_fatal_err(hw); |
2987 | } | 2999 | } |
2988 | 3000 | ||
2989 | #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) | 3001 | #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ |
3002 | ECC_UE_INT_CAUSE_F) | ||
2990 | 3003 | ||
2991 | /* | 3004 | /* |
2992 | * EDC/MC interrupt handler. | 3005 | * EDC/MC interrupt handler. |
@@ -2998,28 +3011,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx) | |||
2998 | unsigned int addr, cnt_addr, v; | 3011 | unsigned int addr, cnt_addr, v; |
2999 | 3012 | ||
3000 | if (idx <= MEM_EDC1) { | 3013 | if (idx <= MEM_EDC1) { |
3001 | addr = EDC_REG(EDC_INT_CAUSE, idx); | 3014 | addr = EDC_REG(EDC_INT_CAUSE_A, idx); |
3002 | cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); | 3015 | cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); |
3003 | } else { | 3016 | } else { |
3004 | addr = MC_INT_CAUSE; | 3017 | addr = MC_INT_CAUSE_A; |
3005 | cnt_addr = MC_ECC_STATUS; | 3018 | cnt_addr = MC_ECC_STATUS_A; |
3006 | } | 3019 | } |
3007 | 3020 | ||
3008 | v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; | 3021 | v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; |
3009 | if (v & PERR_INT_CAUSE) | 3022 | if (v & PERR_INT_CAUSE_F) |
3010 | csio_fatal(hw, "%s FIFO parity error\n", name[idx]); | 3023 | csio_fatal(hw, "%s FIFO parity error\n", name[idx]); |
3011 | if (v & ECC_CE_INT_CAUSE) { | 3024 | if (v & ECC_CE_INT_CAUSE_F) { |
3012 | uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); | 3025 | uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); |
3013 | 3026 | ||
3014 | csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); | 3027 | csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); |
3015 | csio_warn(hw, "%u %s correctable ECC data error%s\n", | 3028 | csio_warn(hw, "%u %s correctable ECC data error%s\n", |
3016 | cnt, name[idx], cnt > 1 ? "s" : ""); | 3029 | cnt, name[idx], cnt > 1 ? "s" : ""); |
3017 | } | 3030 | } |
3018 | if (v & ECC_UE_INT_CAUSE) | 3031 | if (v & ECC_UE_INT_CAUSE_F) |
3019 | csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); | 3032 | csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); |
3020 | 3033 | ||
3021 | csio_wr_reg32(hw, v, addr); | 3034 | csio_wr_reg32(hw, v, addr); |
3022 | if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) | 3035 | if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) |
3023 | csio_hw_fatal_err(hw); | 3036 | csio_hw_fatal_err(hw); |
3024 | } | 3037 | } |
3025 | 3038 | ||
@@ -3028,18 +3041,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx) | |||
3028 | */ | 3041 | */ |
3029 | static void csio_ma_intr_handler(struct csio_hw *hw) | 3042 | static void csio_ma_intr_handler(struct csio_hw *hw) |
3030 | { | 3043 | { |
3031 | uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); | 3044 | uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); |
3032 | 3045 | ||
3033 | if (status & MEM_PERR_INT_CAUSE) | 3046 | if (status & MEM_PERR_INT_CAUSE_F) |
3034 | csio_fatal(hw, "MA parity error, parity status %#x\n", | 3047 | csio_fatal(hw, "MA parity error, parity status %#x\n", |
3035 | csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); | 3048 | csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); |
3036 | if (status & MEM_WRAP_INT_CAUSE) { | 3049 | if (status & MEM_WRAP_INT_CAUSE_F) { |
3037 | v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); | 3050 | v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); |
3038 | csio_fatal(hw, | 3051 | csio_fatal(hw, |
3039 | "MA address wrap-around error by client %u to address %#x\n", | 3052 | "MA address wrap-around error by client %u to address %#x\n", |
3040 | MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); | 3053 | MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); |
3041 | } | 3054 | } |
3042 | csio_wr_reg32(hw, status, MA_INT_CAUSE); | 3055 | csio_wr_reg32(hw, status, MA_INT_CAUSE_A); |
3043 | csio_hw_fatal_err(hw); | 3056 | csio_hw_fatal_err(hw); |
3044 | } | 3057 | } |
3045 | 3058 | ||
@@ -3049,13 +3062,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw) | |||
3049 | static void csio_smb_intr_handler(struct csio_hw *hw) | 3062 | static void csio_smb_intr_handler(struct csio_hw *hw) |
3050 | { | 3063 | { |
3051 | static struct intr_info smb_intr_info[] = { | 3064 | static struct intr_info smb_intr_info[] = { |
3052 | { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, | 3065 | { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, |
3053 | { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, | 3066 | { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, |
3054 | { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, | 3067 | { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, |
3055 | { 0, NULL, 0, 0 } | 3068 | { 0, NULL, 0, 0 } |
3056 | }; | 3069 | }; |
3057 | 3070 | ||
3058 | if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) | 3071 | if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) |
3059 | csio_hw_fatal_err(hw); | 3072 | csio_hw_fatal_err(hw); |
3060 | } | 3073 | } |
3061 | 3074 | ||
@@ -3065,14 +3078,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw) | |||
3065 | static void csio_ncsi_intr_handler(struct csio_hw *hw) | 3078 | static void csio_ncsi_intr_handler(struct csio_hw *hw) |
3066 | { | 3079 | { |
3067 | static struct intr_info ncsi_intr_info[] = { | 3080 | static struct intr_info ncsi_intr_info[] = { |
3068 | { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, | 3081 | { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, |
3069 | { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, | 3082 | { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, |
3070 | { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, | 3083 | { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, |
3071 | { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, | 3084 | { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, |
3072 | { 0, NULL, 0, 0 } | 3085 | { 0, NULL, 0, 0 } |
3073 | }; | 3086 | }; |
3074 | 3087 | ||
3075 | if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) | 3088 | if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) |
3076 | csio_hw_fatal_err(hw); | 3089 | csio_hw_fatal_err(hw); |
3077 | } | 3090 | } |
3078 | 3091 | ||
@@ -3081,17 +3094,17 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw) | |||
3081 | */ | 3094 | */ |
3082 | static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) | 3095 | static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) |
3083 | { | 3096 | { |
3084 | uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port)); | 3097 | uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); |
3085 | 3098 | ||
3086 | v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; | 3099 | v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; |
3087 | if (!v) | 3100 | if (!v) |
3088 | return; | 3101 | return; |
3089 | 3102 | ||
3090 | if (v & TXFIFO_PRTY_ERR) | 3103 | if (v & TXFIFO_PRTY_ERR_F) |
3091 | csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); | 3104 | csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); |
3092 | if (v & RXFIFO_PRTY_ERR) | 3105 | if (v & RXFIFO_PRTY_ERR_F) |
3093 | csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); | 3106 | csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); |
3094 | csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); | 3107 | csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); |
3095 | csio_hw_fatal_err(hw); | 3108 | csio_hw_fatal_err(hw); |
3096 | } | 3109 | } |
3097 | 3110 | ||
@@ -3101,12 +3114,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) | |||
3101 | static void csio_pl_intr_handler(struct csio_hw *hw) | 3114 | static void csio_pl_intr_handler(struct csio_hw *hw) |
3102 | { | 3115 | { |
3103 | static struct intr_info pl_intr_info[] = { | 3116 | static struct intr_info pl_intr_info[] = { |
3104 | { FATALPERR, "T4 fatal parity error", -1, 1 }, | 3117 | { FATALPERR_F, "T4 fatal parity error", -1, 1 }, |
3105 | { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, | 3118 | { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, |
3106 | { 0, NULL, 0, 0 } | 3119 | { 0, NULL, 0, 0 } |
3107 | }; | 3120 | }; |
3108 | 3121 | ||
3109 | if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) | 3122 | if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) |
3110 | csio_hw_fatal_err(hw); | 3123 | csio_hw_fatal_err(hw); |
3111 | } | 3124 | } |
3112 | 3125 | ||
@@ -3121,7 +3134,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw) | |||
3121 | int | 3134 | int |
3122 | csio_hw_slow_intr_handler(struct csio_hw *hw) | 3135 | csio_hw_slow_intr_handler(struct csio_hw *hw) |
3123 | { | 3136 | { |
3124 | uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); | 3137 | uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); |
3125 | 3138 | ||
3126 | if (!(cause & CSIO_GLBL_INTR_MASK)) { | 3139 | if (!(cause & CSIO_GLBL_INTR_MASK)) { |
3127 | CSIO_INC_STATS(hw, n_plint_unexp); | 3140 | CSIO_INC_STATS(hw, n_plint_unexp); |
@@ -3132,75 +3145,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw) | |||
3132 | 3145 | ||
3133 | CSIO_INC_STATS(hw, n_plint_cnt); | 3146 | CSIO_INC_STATS(hw, n_plint_cnt); |
3134 | 3147 | ||
3135 | if (cause & CIM) | 3148 | if (cause & CIM_F) |
3136 | csio_cim_intr_handler(hw); | 3149 | csio_cim_intr_handler(hw); |
3137 | 3150 | ||
3138 | if (cause & MPS) | 3151 | if (cause & MPS_F) |
3139 | csio_mps_intr_handler(hw); | 3152 | csio_mps_intr_handler(hw); |
3140 | 3153 | ||
3141 | if (cause & NCSI) | 3154 | if (cause & NCSI_F) |
3142 | csio_ncsi_intr_handler(hw); | 3155 | csio_ncsi_intr_handler(hw); |
3143 | 3156 | ||
3144 | if (cause & PL) | 3157 | if (cause & PL_F) |
3145 | csio_pl_intr_handler(hw); | 3158 | csio_pl_intr_handler(hw); |
3146 | 3159 | ||
3147 | if (cause & SMB) | 3160 | if (cause & SMB_F) |
3148 | csio_smb_intr_handler(hw); | 3161 | csio_smb_intr_handler(hw); |
3149 | 3162 | ||
3150 | if (cause & XGMAC0) | 3163 | if (cause & XGMAC0_F) |
3151 | csio_xgmac_intr_handler(hw, 0); | 3164 | csio_xgmac_intr_handler(hw, 0); |
3152 | 3165 | ||
3153 | if (cause & XGMAC1) | 3166 | if (cause & XGMAC1_F) |
3154 | csio_xgmac_intr_handler(hw, 1); | 3167 | csio_xgmac_intr_handler(hw, 1); |
3155 | 3168 | ||
3156 | if (cause & XGMAC_KR0) | 3169 | if (cause & XGMAC_KR0_F) |
3157 | csio_xgmac_intr_handler(hw, 2); | 3170 | csio_xgmac_intr_handler(hw, 2); |
3158 | 3171 | ||
3159 | if (cause & XGMAC_KR1) | 3172 | if (cause & XGMAC_KR1_F) |
3160 | csio_xgmac_intr_handler(hw, 3); | 3173 | csio_xgmac_intr_handler(hw, 3); |
3161 | 3174 | ||
3162 | if (cause & PCIE) | 3175 | if (cause & PCIE_F) |
3163 | hw->chip_ops->chip_pcie_intr_handler(hw); | 3176 | hw->chip_ops->chip_pcie_intr_handler(hw); |
3164 | 3177 | ||
3165 | if (cause & MC) | 3178 | if (cause & MC_F) |
3166 | csio_mem_intr_handler(hw, MEM_MC); | 3179 | csio_mem_intr_handler(hw, MEM_MC); |
3167 | 3180 | ||
3168 | if (cause & EDC0) | 3181 | if (cause & EDC0_F) |
3169 | csio_mem_intr_handler(hw, MEM_EDC0); | 3182 | csio_mem_intr_handler(hw, MEM_EDC0); |
3170 | 3183 | ||
3171 | if (cause & EDC1) | 3184 | if (cause & EDC1_F) |
3172 | csio_mem_intr_handler(hw, MEM_EDC1); | 3185 | csio_mem_intr_handler(hw, MEM_EDC1); |
3173 | 3186 | ||
3174 | if (cause & LE) | 3187 | if (cause & LE_F) |
3175 | csio_le_intr_handler(hw); | 3188 | csio_le_intr_handler(hw); |
3176 | 3189 | ||
3177 | if (cause & TP) | 3190 | if (cause & TP_F) |
3178 | csio_tp_intr_handler(hw); | 3191 | csio_tp_intr_handler(hw); |
3179 | 3192 | ||
3180 | if (cause & MA) | 3193 | if (cause & MA_F) |
3181 | csio_ma_intr_handler(hw); | 3194 | csio_ma_intr_handler(hw); |
3182 | 3195 | ||
3183 | if (cause & PM_TX) | 3196 | if (cause & PM_TX_F) |
3184 | csio_pmtx_intr_handler(hw); | 3197 | csio_pmtx_intr_handler(hw); |
3185 | 3198 | ||
3186 | if (cause & PM_RX) | 3199 | if (cause & PM_RX_F) |
3187 | csio_pmrx_intr_handler(hw); | 3200 | csio_pmrx_intr_handler(hw); |
3188 | 3201 | ||
3189 | if (cause & ULP_RX) | 3202 | if (cause & ULP_RX_F) |
3190 | csio_ulprx_intr_handler(hw); | 3203 | csio_ulprx_intr_handler(hw); |
3191 | 3204 | ||
3192 | if (cause & CPL_SWITCH) | 3205 | if (cause & CPL_SWITCH_F) |
3193 | csio_cplsw_intr_handler(hw); | 3206 | csio_cplsw_intr_handler(hw); |
3194 | 3207 | ||
3195 | if (cause & SGE) | 3208 | if (cause & SGE_F) |
3196 | csio_sge_intr_handler(hw); | 3209 | csio_sge_intr_handler(hw); |
3197 | 3210 | ||
3198 | if (cause & ULP_TX) | 3211 | if (cause & ULP_TX_F) |
3199 | csio_ulptx_intr_handler(hw); | 3212 | csio_ulptx_intr_handler(hw); |
3200 | 3213 | ||
3201 | /* Clear the interrupts just processed for which we are the master. */ | 3214 | /* Clear the interrupts just processed for which we are the master. */ |
3202 | csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); | 3215 | csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); |
3203 | csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ | 3216 | csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ |
3204 | 3217 | ||
3205 | return 1; | 3218 | return 1; |
3206 | } | 3219 | } |
@@ -3840,13 +3853,7 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) | |||
3840 | prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); | 3853 | prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); |
3841 | adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); | 3854 | adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); |
3842 | 3855 | ||
3843 | if (prot_type == CSIO_T4_FCOE_ASIC) { | 3856 | if (prot_type == CSIO_T5_FCOE_ASIC) { |
3844 | memcpy(hw->hw_ver, | ||
3845 | csio_t4_fcoe_adapters[adap_type].model_no, 16); | ||
3846 | memcpy(hw->model_desc, | ||
3847 | csio_t4_fcoe_adapters[adap_type].description, | ||
3848 | 32); | ||
3849 | } else if (prot_type == CSIO_T5_FCOE_ASIC) { | ||
3850 | memcpy(hw->hw_ver, | 3857 | memcpy(hw->hw_ver, |
3851 | csio_t5_fcoe_adapters[adap_type].model_no, 16); | 3858 | csio_t5_fcoe_adapters[adap_type].model_no, 16); |
3852 | memcpy(hw->model_desc, | 3859 | memcpy(hw->model_desc, |
@@ -3883,8 +3890,8 @@ csio_hw_init(struct csio_hw *hw) | |||
3883 | 3890 | ||
3884 | strcpy(hw->name, CSIO_HW_NAME); | 3891 | strcpy(hw->name, CSIO_HW_NAME); |
3885 | 3892 | ||
3886 | /* Initialize the HW chip ops with T4/T5 specific ops */ | 3893 | /* Initialize the HW chip ops T5 specific ops */ |
3887 | hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; | 3894 | hw->chip_ops = &t5_ops; |
3888 | 3895 | ||
3889 | /* Set the model & its description */ | 3896 | /* Set the model & its description */ |
3890 | 3897 | ||
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 68248da1b9af..029bef82c057 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
49 | #include <scsi/scsi_transport_fc.h> | 49 | #include <scsi/scsi_transport_fc.h> |
50 | 50 | ||
51 | #include "t4_hw.h" | ||
51 | #include "csio_hw_chip.h" | 52 | #include "csio_hw_chip.h" |
52 | #include "csio_wr.h" | 53 | #include "csio_wr.h" |
53 | #include "csio_mb.h" | 54 | #include "csio_mb.h" |
@@ -117,10 +118,10 @@ extern int csio_msi; | |||
117 | #define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 | 118 | #define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 |
118 | #define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF | 119 | #define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF |
119 | 120 | ||
120 | #define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ | 121 | #define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \ |
121 | EDC1 | LE | TP | MA | PM_TX | PM_RX | \ | 122 | EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \ |
122 | ULP_RX | CPL_SWITCH | SGE | \ | 123 | PM_TX_F | PM_RX_F | ULP_RX_F | \ |
123 | ULP_TX | SF) | 124 | CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F) |
124 | 125 | ||
125 | /* | 126 | /* |
126 | * Hard parameters used to initialize the card in the absence of a | 127 | * Hard parameters used to initialize the card in the absence of a |
@@ -174,16 +175,12 @@ struct csio_evt_msg { | |||
174 | }; | 175 | }; |
175 | 176 | ||
176 | enum { | 177 | enum { |
177 | EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ | ||
178 | SERNUM_LEN = 16, /* Serial # length */ | 178 | SERNUM_LEN = 16, /* Serial # length */ |
179 | EC_LEN = 16, /* E/C length */ | 179 | EC_LEN = 16, /* E/C length */ |
180 | ID_LEN = 16, /* ID length */ | 180 | ID_LEN = 16, /* ID length */ |
181 | TRACE_LEN = 112, /* length of trace data and mask */ | ||
182 | }; | 181 | }; |
183 | 182 | ||
184 | enum { | 183 | enum { |
185 | SF_PAGE_SIZE = 256, /* serial flash page size */ | ||
186 | SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ | ||
187 | SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ | 184 | SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ |
188 | }; | 185 | }; |
189 | 186 | ||
@@ -199,39 +196,8 @@ enum { | |||
199 | SF_RD_DATA_FAST = 0xb, /* read flash */ | 196 | SF_RD_DATA_FAST = 0xb, /* read flash */ |
200 | SF_RD_ID = 0x9f, /* read ID */ | 197 | SF_RD_ID = 0x9f, /* read ID */ |
201 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ | 198 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ |
202 | |||
203 | FW_START_SEC = 8, /* first flash sector for FW */ | ||
204 | FW_END_SEC = 15, /* last flash sector for FW */ | ||
205 | FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, | ||
206 | FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, | ||
207 | |||
208 | FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/ | ||
209 | FLASH_CFG_OFFSET = 0x1f0000, | ||
210 | FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, | ||
211 | }; | 199 | }; |
212 | 200 | ||
213 | /* | ||
214 | * Flash layout. | ||
215 | */ | ||
216 | #define FLASH_START(start) ((start) * SF_SEC_SIZE) | ||
217 | #define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) | ||
218 | |||
219 | enum { | ||
220 | /* | ||
221 | * Location of firmware image in FLASH. | ||
222 | */ | ||
223 | FLASH_FW_START_SEC = 8, | ||
224 | FLASH_FW_NSECS = 8, | ||
225 | FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), | ||
226 | FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), | ||
227 | |||
228 | /* Location of Firmware Configuration File in FLASH. */ | ||
229 | FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), | ||
230 | }; | ||
231 | |||
232 | #undef FLASH_START | ||
233 | #undef FLASH_MAX_SIZE | ||
234 | |||
235 | /* Management module */ | 201 | /* Management module */ |
236 | enum { | 202 | enum { |
237 | CSIO_MGMT_EQ_WRSIZE = 512, | 203 | CSIO_MGMT_EQ_WRSIZE = 512, |
@@ -482,11 +448,6 @@ struct csio_hw { | |||
482 | uint32_t tp_vers; | 448 | uint32_t tp_vers; |
483 | char chip_ver; | 449 | char chip_ver; |
484 | uint16_t chip_id; /* Tells T4/T5 chip */ | 450 | uint16_t chip_id; /* Tells T4/T5 chip */ |
485 | uint32_t cfg_finiver; | ||
486 | uint32_t cfg_finicsum; | ||
487 | uint32_t cfg_cfcsum; | ||
488 | uint8_t cfg_csum_status; | ||
489 | uint8_t cfg_store; | ||
490 | enum csio_dev_state fw_state; | 451 | enum csio_dev_state fw_state; |
491 | struct csio_vpd vpd; | 452 | struct csio_vpd vpd; |
492 | 453 | ||
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h index 4752fed476df..b56a11d817be 100644 --- a/drivers/scsi/csiostor/csio_hw_chip.h +++ b/drivers/scsi/csiostor/csio_hw_chip.h | |||
@@ -37,24 +37,27 @@ | |||
37 | #include "csio_defs.h" | 37 | #include "csio_defs.h" |
38 | 38 | ||
39 | /* Define MACRO values */ | 39 | /* Define MACRO values */ |
40 | #define CSIO_HW_T4 0x4000 | ||
41 | #define CSIO_T4_FCOE_ASIC 0x4600 | ||
42 | #define CSIO_HW_T5 0x5000 | 40 | #define CSIO_HW_T5 0x5000 |
43 | #define CSIO_T5_FCOE_ASIC 0x5600 | 41 | #define CSIO_T5_FCOE_ASIC 0x5600 |
44 | #define CSIO_HW_CHIP_MASK 0xF000 | 42 | #define CSIO_HW_CHIP_MASK 0xF000 |
45 | 43 | ||
46 | #define T4_REGMAP_SIZE (160 * 1024) | ||
47 | #define T5_REGMAP_SIZE (332 * 1024) | 44 | #define T5_REGMAP_SIZE (332 * 1024) |
48 | #define FW_FNAME_T4 "cxgb4/t4fw.bin" | ||
49 | #define FW_FNAME_T5 "cxgb4/t5fw.bin" | 45 | #define FW_FNAME_T5 "cxgb4/t5fw.bin" |
50 | #define FW_CFG_NAME_T4 "cxgb4/t4-config.txt" | ||
51 | #define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" | 46 | #define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" |
52 | 47 | ||
53 | /* Define static functions */ | 48 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) |
54 | static inline int csio_is_t4(uint16_t chip) | 49 | #define CHELSIO_CHIP_FPGA 0x100 |
55 | { | 50 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf) |
56 | return (chip == CSIO_HW_T4); | 51 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) |
57 | } | 52 | |
53 | #define CHELSIO_T5 0x5 | ||
54 | |||
55 | enum chip_type { | ||
56 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | ||
57 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), | ||
58 | T5_FIRST_REV = T5_A0, | ||
59 | T5_LAST_REV = T5_A1, | ||
60 | }; | ||
58 | 61 | ||
59 | static inline int csio_is_t5(uint16_t chip) | 62 | static inline int csio_is_t5(uint16_t chip) |
60 | { | 63 | { |
@@ -65,30 +68,22 @@ static inline int csio_is_t5(uint16_t chip) | |||
65 | #define CSIO_DEVICE(devid, idx) \ | 68 | #define CSIO_DEVICE(devid, idx) \ |
66 | { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } | 69 | { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } |
67 | 70 | ||
68 | #define CSIO_HW_PIDX(hw, index) \ | 71 | #include "t4fw_api.h" |
69 | (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \ | 72 | #include "t4fw_version.h" |
70 | (PIDX_T5(index) | DBTYPE(1U))) | 73 | |
71 | 74 | #define FW_VERSION(chip) ( \ | |
72 | #define CSIO_HW_LP_INT_THRESH(hw, val) \ | 75 | FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ |
73 | (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \ | 76 | FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ |
74 | (V_LP_INT_THRESH_T5(val))) | 77 | FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ |
75 | 78 | FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) | |
76 | #define CSIO_HW_M_LP_INT_THRESH(hw) \ | 79 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) |
77 | (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5)) | 80 | |
78 | 81 | struct fw_info { | |
79 | #define CSIO_MAC_INT_CAUSE_REG(hw, port) \ | 82 | u8 chip; |
80 | (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \ | 83 | char *fs_name; |
81 | (T5_PORT_REG(port, MAC_PORT_INT_CAUSE))) | 84 | char *fw_mod_name; |
82 | 85 | struct fw_hdr fw_hdr; | |
83 | #define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0) | 86 | }; |
84 | #define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0) | ||
85 | #define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0) | ||
86 | |||
87 | #define CSIO_FW_FNAME(hw) \ | ||
88 | (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5) | ||
89 | |||
90 | #define CSIO_CF_FNAME(hw) \ | ||
91 | (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5) | ||
92 | 87 | ||
93 | /* Declare ENUMS */ | 88 | /* Declare ENUMS */ |
94 | enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; | 89 | enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; |
@@ -96,7 +91,6 @@ enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; | |||
96 | enum { | 91 | enum { |
97 | MEMWIN_APERTURE = 2048, | 92 | MEMWIN_APERTURE = 2048, |
98 | MEMWIN_BASE = 0x1b800, | 93 | MEMWIN_BASE = 0x1b800, |
99 | MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */ | ||
100 | }; | 94 | }; |
101 | 95 | ||
102 | /* Slow path handlers */ | 96 | /* Slow path handlers */ |
@@ -122,7 +116,6 @@ struct csio_hw_chip_ops { | |||
122 | void (*chip_dfs_create_ext_mem)(struct csio_hw *); | 116 | void (*chip_dfs_create_ext_mem)(struct csio_hw *); |
123 | }; | 117 | }; |
124 | 118 | ||
125 | extern struct csio_hw_chip_ops t4_ops; | ||
126 | extern struct csio_hw_chip_ops t5_ops; | 119 | extern struct csio_hw_chip_ops t5_ops; |
127 | 120 | ||
128 | #endif /* #ifndef __CSIO_HW_CHIP_H__ */ | 121 | #endif /* #ifndef __CSIO_HW_CHIP_H__ */ |
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c deleted file mode 100644 index 95d831857640..000000000000 --- a/drivers/scsi/csiostor/csio_hw_t4.c +++ /dev/null | |||
@@ -1,404 +0,0 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * - Redistributions in binary form must reproduce the above | ||
18 | * copyright notice, this list of conditions and the following | ||
19 | * disclaimer in the documentation and/or other materials | ||
20 | * provided with the distribution. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
29 | * SOFTWARE. | ||
30 | */ | ||
31 | |||
32 | #include "csio_hw.h" | ||
33 | #include "csio_init.h" | ||
34 | |||
35 | /* | ||
36 | * Return the specified PCI-E Configuration Space register from our Physical | ||
37 | * Function. We try first via a Firmware LDST Command since we prefer to let | ||
38 | * the firmware own all of these registers, but if that fails we go for it | ||
39 | * directly ourselves. | ||
40 | */ | ||
41 | static uint32_t | ||
42 | csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg) | ||
43 | { | ||
44 | u32 val = 0; | ||
45 | struct csio_mb *mbp; | ||
46 | int rv; | ||
47 | struct fw_ldst_cmd *ldst_cmd; | ||
48 | |||
49 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
50 | if (!mbp) { | ||
51 | CSIO_INC_STATS(hw, n_err_nomem); | ||
52 | pci_read_config_dword(hw->pdev, reg, &val); | ||
53 | return val; | ||
54 | } | ||
55 | |||
56 | csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); | ||
57 | rv = csio_mb_issue(hw, mbp); | ||
58 | |||
59 | /* | ||
60 | * If the LDST Command suucceeded, exctract the returned register | ||
61 | * value. Otherwise read it directly ourself. | ||
62 | */ | ||
63 | if (rv == 0) { | ||
64 | ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); | ||
65 | val = ntohl(ldst_cmd->u.pcie.data[0]); | ||
66 | } else | ||
67 | pci_read_config_dword(hw->pdev, reg, &val); | ||
68 | |||
69 | mempool_free(mbp, hw->mb_mempool); | ||
70 | |||
71 | return val; | ||
72 | } | ||
73 | |||
74 | static int | ||
75 | csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win) | ||
76 | { | ||
77 | u32 bar0; | ||
78 | u32 mem_win_base; | ||
79 | |||
80 | /* | ||
81 | * Truncation intentional: we only read the bottom 32-bits of the | ||
82 | * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to | ||
83 | * read BAR0 instead of using pci_resource_start() because we could be | ||
84 | * operating from within a Virtual Machine which is trapping our | ||
85 | * accesses to our Configuration Space and we need to set up the PCI-E | ||
86 | * Memory Window decoders with the actual addresses which will be | ||
87 | * coming across the PCI-E link. | ||
88 | */ | ||
89 | bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); | ||
90 | bar0 &= PCI_BASE_ADDRESS_MEM_MASK; | ||
91 | |||
92 | mem_win_base = bar0 + MEMWIN_BASE; | ||
93 | |||
94 | /* | ||
95 | * Set up memory window for accessing adapter memory ranges. (Read | ||
96 | * back MA register to ensure that changes propagate before we attempt | ||
97 | * to use the new values.) | ||
98 | */ | ||
99 | csio_wr_reg32(hw, mem_win_base | BIR(0) | | ||
100 | WINDOW(ilog2(MEMWIN_APERTURE) - 10), | ||
101 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | ||
102 | csio_rd_reg32(hw, | ||
103 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Interrupt handler for the PCIE module. | ||
109 | */ | ||
110 | static void | ||
111 | csio_t4_pcie_intr_handler(struct csio_hw *hw) | ||
112 | { | ||
113 | static struct intr_info sysbus_intr_info[] = { | ||
114 | { RNPP, "RXNP array parity error", -1, 1 }, | ||
115 | { RPCP, "RXPC array parity error", -1, 1 }, | ||
116 | { RCIP, "RXCIF array parity error", -1, 1 }, | ||
117 | { RCCP, "Rx completions control array parity error", -1, 1 }, | ||
118 | { RFTP, "RXFT array parity error", -1, 1 }, | ||
119 | { 0, NULL, 0, 0 } | ||
120 | }; | ||
121 | static struct intr_info pcie_port_intr_info[] = { | ||
122 | { TPCP, "TXPC array parity error", -1, 1 }, | ||
123 | { TNPP, "TXNP array parity error", -1, 1 }, | ||
124 | { TFTP, "TXFT array parity error", -1, 1 }, | ||
125 | { TCAP, "TXCA array parity error", -1, 1 }, | ||
126 | { TCIP, "TXCIF array parity error", -1, 1 }, | ||
127 | { RCAP, "RXCA array parity error", -1, 1 }, | ||
128 | { OTDD, "outbound request TLP discarded", -1, 1 }, | ||
129 | { RDPE, "Rx data parity error", -1, 1 }, | ||
130 | { TDUE, "Tx uncorrectable data error", -1, 1 }, | ||
131 | { 0, NULL, 0, 0 } | ||
132 | }; | ||
133 | |||
134 | static struct intr_info pcie_intr_info[] = { | ||
135 | { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, | ||
136 | { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, | ||
137 | { MSIDATAPERR, "MSI data parity error", -1, 1 }, | ||
138 | { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, | ||
139 | { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, | ||
140 | { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, | ||
141 | { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, | ||
142 | { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, | ||
143 | { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, | ||
144 | { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, | ||
145 | { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, | ||
146 | { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, | ||
147 | { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, | ||
148 | { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, | ||
149 | { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, | ||
150 | { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, | ||
151 | { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, | ||
152 | { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, | ||
153 | { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, | ||
154 | { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, | ||
155 | { FIDPERR, "PCI FID parity error", -1, 1 }, | ||
156 | { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, | ||
157 | { MATAGPERR, "PCI MA tag parity error", -1, 1 }, | ||
158 | { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, | ||
159 | { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, | ||
160 | { RXWRPERR, "PCI Rx write parity error", -1, 1 }, | ||
161 | { RPLPERR, "PCI replay buffer parity error", -1, 1 }, | ||
162 | { PCIESINT, "PCI core secondary fault", -1, 1 }, | ||
163 | { PCIEPINT, "PCI core primary fault", -1, 1 }, | ||
164 | { UNXSPLCPLERR, "PCI unexpected split completion error", -1, | ||
165 | 0 }, | ||
166 | { 0, NULL, 0, 0 } | ||
167 | }; | ||
168 | |||
169 | int fat; | ||
170 | fat = csio_handle_intr_status(hw, | ||
171 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
172 | sysbus_intr_info) + | ||
173 | csio_handle_intr_status(hw, | ||
174 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
175 | pcie_port_intr_info) + | ||
176 | csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); | ||
177 | if (fat) | ||
178 | csio_hw_fatal_err(hw); | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * csio_t4_flash_cfg_addr - return the address of the flash configuration file | ||
183 | * @hw: the HW module | ||
184 | * | ||
185 | * Return the address within the flash where the Firmware Configuration | ||
186 | * File is stored. | ||
187 | */ | ||
188 | static unsigned int | ||
189 | csio_t4_flash_cfg_addr(struct csio_hw *hw) | ||
190 | { | ||
191 | return FLASH_CFG_OFFSET; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * csio_t4_mc_read - read from MC through backdoor accesses | ||
196 | * @hw: the hw module | ||
197 | * @idx: not used for T4 adapter | ||
198 | * @addr: address of first byte requested | ||
199 | * @data: 64 bytes of data containing the requested address | ||
200 | * @ecc: where to store the corresponding 64-bit ECC word | ||
201 | * | ||
202 | * Read 64 bytes of data from MC starting at a 64-byte-aligned address | ||
203 | * that covers the requested address @addr. If @parity is not %NULL it | ||
204 | * is assigned the 64-bit ECC word for the read data. | ||
205 | */ | ||
206 | static int | ||
207 | csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, | ||
208 | uint64_t *ecc) | ||
209 | { | ||
210 | int i; | ||
211 | |||
212 | if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) | ||
213 | return -EBUSY; | ||
214 | csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); | ||
215 | csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); | ||
216 | csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); | ||
217 | csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), | ||
218 | MC_BIST_CMD); | ||
219 | i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, | ||
220 | 0, 10, 1, NULL); | ||
221 | if (i) | ||
222 | return i; | ||
223 | |||
224 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) | ||
225 | |||
226 | for (i = 15; i >= 0; i--) | ||
227 | *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); | ||
228 | if (ecc) | ||
229 | *ecc = csio_rd_reg64(hw, MC_DATA(16)); | ||
230 | #undef MC_DATA | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * csio_t4_edc_read - read from EDC through backdoor accesses | ||
236 | * @hw: the hw module | ||
237 | * @idx: which EDC to access | ||
238 | * @addr: address of first byte requested | ||
239 | * @data: 64 bytes of data containing the requested address | ||
240 | * @ecc: where to store the corresponding 64-bit ECC word | ||
241 | * | ||
242 | * Read 64 bytes of data from EDC starting at a 64-byte-aligned address | ||
243 | * that covers the requested address @addr. If @parity is not %NULL it | ||
244 | * is assigned the 64-bit ECC word for the read data. | ||
245 | */ | ||
246 | static int | ||
247 | csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, | ||
248 | uint64_t *ecc) | ||
249 | { | ||
250 | int i; | ||
251 | |||
252 | idx *= EDC_STRIDE; | ||
253 | if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) | ||
254 | return -EBUSY; | ||
255 | csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); | ||
256 | csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); | ||
257 | csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); | ||
258 | csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, | ||
259 | EDC_BIST_CMD + idx); | ||
260 | i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, | ||
261 | 0, 10, 1, NULL); | ||
262 | if (i) | ||
263 | return i; | ||
264 | |||
265 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) | ||
266 | |||
267 | for (i = 15; i >= 0; i--) | ||
268 | *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); | ||
269 | if (ecc) | ||
270 | *ecc = csio_rd_reg64(hw, EDC_DATA(16)); | ||
271 | #undef EDC_DATA | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window | ||
277 | * @hw: the csio_hw | ||
278 | * @win: PCI-E memory Window to use | ||
279 | * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1 | ||
280 | * @addr: address within indicated memory type | ||
281 | * @len: amount of memory to transfer | ||
282 | * @buf: host memory buffer | ||
283 | * @dir: direction of transfer 1 => read, 0 => write | ||
284 | * | ||
285 | * Reads/writes an [almost] arbitrary memory region in the firmware: the | ||
286 | * firmware memory address, length and host buffer must be aligned on | ||
287 | * 32-bit boudaries. The memory is transferred as a raw byte sequence | ||
288 | * from/to the firmware's memory. If this memory contains data | ||
289 | * structures which contain multi-byte integers, it's the callers | ||
290 | * responsibility to perform appropriate byte order conversions. | ||
291 | */ | ||
292 | static int | ||
293 | csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, | ||
294 | u32 len, uint32_t *buf, int dir) | ||
295 | { | ||
296 | u32 pos, start, offset, memoffset, bar0; | ||
297 | u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base; | ||
298 | |||
299 | /* | ||
300 | * Argument sanity checks ... | ||
301 | */ | ||
302 | if ((addr & 0x3) || (len & 0x3)) | ||
303 | return -EINVAL; | ||
304 | |||
305 | /* Offset into the region of memory which is being accessed | ||
306 | * MEM_EDC0 = 0 | ||
307 | * MEM_EDC1 = 1 | ||
308 | * MEM_MC = 2 -- T4 | ||
309 | */ | ||
310 | edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A)); | ||
311 | if (mtype != MEM_MC1) | ||
312 | memoffset = (mtype * (edc_size * 1024 * 1024)); | ||
313 | else { | ||
314 | mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw, | ||
315 | MA_EXT_MEMORY_BAR_A)); | ||
316 | memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; | ||
317 | } | ||
318 | |||
319 | /* Determine the PCIE_MEM_ACCESS_OFFSET */ | ||
320 | addr = addr + memoffset; | ||
321 | |||
322 | /* | ||
323 | * Each PCI-E Memory Window is programmed with a window size -- or | ||
324 | * "aperture" -- which controls the granularity of its mapping onto | ||
325 | * adapter memory. We need to grab that aperture in order to know | ||
326 | * how to use the specified window. The window is also programmed | ||
327 | * with the base address of the Memory Window in BAR0's address | ||
328 | * space. For T4 this is an absolute PCI-E Bus Address. For T5 | ||
329 | * the address is relative to BAR0. | ||
330 | */ | ||
331 | mem_reg = csio_rd_reg32(hw, | ||
332 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | ||
333 | mem_aperture = 1 << (WINDOW(mem_reg) + 10); | ||
334 | mem_base = GET_PCIEOFST(mem_reg) << 10; | ||
335 | |||
336 | bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); | ||
337 | bar0 &= PCI_BASE_ADDRESS_MEM_MASK; | ||
338 | mem_base -= bar0; | ||
339 | |||
340 | start = addr & ~(mem_aperture-1); | ||
341 | offset = addr - start; | ||
342 | |||
343 | csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", | ||
344 | mem_reg, mem_aperture); | ||
345 | csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n", | ||
346 | mem_base, memoffset); | ||
347 | csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n", | ||
348 | bar0, start, offset); | ||
349 | csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n", | ||
350 | mtype, addr, len); | ||
351 | |||
352 | for (pos = start; len > 0; pos += mem_aperture, offset = 0) { | ||
353 | /* | ||
354 | * Move PCI-E Memory Window to our current transfer | ||
355 | * position. Read it back to ensure that changes propagate | ||
356 | * before we attempt to use the new value. | ||
357 | */ | ||
358 | csio_wr_reg32(hw, pos, | ||
359 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); | ||
360 | csio_rd_reg32(hw, | ||
361 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); | ||
362 | |||
363 | while (offset < mem_aperture && len > 0) { | ||
364 | if (dir) | ||
365 | *buf++ = csio_rd_reg32(hw, mem_base + offset); | ||
366 | else | ||
367 | csio_wr_reg32(hw, *buf++, mem_base + offset); | ||
368 | |||
369 | offset += sizeof(__be32); | ||
370 | len -= sizeof(__be32); | ||
371 | } | ||
372 | } | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values | ||
378 | * @hw: the csio_hw | ||
379 | * | ||
380 | * This function creates files in the debugfs with external memory region MC. | ||
381 | */ | ||
382 | static void | ||
383 | csio_t4_dfs_create_ext_mem(struct csio_hw *hw) | ||
384 | { | ||
385 | u32 size; | ||
386 | int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); | ||
387 | |||
388 | if (i & EXT_MEM_ENABLE_F) { | ||
389 | size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A); | ||
390 | csio_add_debugfs_mem(hw, "mc", MEM_MC, | ||
391 | EXT_MEM_SIZE_G(size)); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | /* T4 adapter specific function */ | ||
396 | struct csio_hw_chip_ops t4_ops = { | ||
397 | .chip_set_mem_win = csio_t4_set_mem_win, | ||
398 | .chip_pcie_intr_handler = csio_t4_pcie_intr_handler, | ||
399 | .chip_flash_cfg_addr = csio_t4_flash_cfg_addr, | ||
400 | .chip_mc_read = csio_t4_mc_read, | ||
401 | .chip_edc_read = csio_t4_edc_read, | ||
402 | .chip_memory_rw = csio_t4_memory_rw, | ||
403 | .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem, | ||
404 | }; | ||
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c index 66e180a58718..3267f4f627c9 100644 --- a/drivers/scsi/csiostor/csio_hw_t5.c +++ b/drivers/scsi/csiostor/csio_hw_t5.c | |||
@@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win) | |||
56 | * back MA register to ensure that changes propagate before we attempt | 56 | * back MA register to ensure that changes propagate before we attempt |
57 | * to use the new values.) | 57 | * to use the new values.) |
58 | */ | 58 | */ |
59 | csio_wr_reg32(hw, mem_win_base | BIR(0) | | 59 | csio_wr_reg32(hw, mem_win_base | BIR_V(0) | |
60 | WINDOW(ilog2(MEMWIN_APERTURE) - 10), | 60 | WINDOW_V(ilog2(MEMWIN_APERTURE) - 10), |
61 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | 61 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); |
62 | csio_rd_reg32(hw, | 62 | csio_rd_reg32(hw, |
63 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | 63 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); |
64 | 64 | ||
65 | return 0; | 65 | return 0; |
66 | } | 66 | } |
@@ -72,74 +72,74 @@ static void | |||
72 | csio_t5_pcie_intr_handler(struct csio_hw *hw) | 72 | csio_t5_pcie_intr_handler(struct csio_hw *hw) |
73 | { | 73 | { |
74 | static struct intr_info sysbus_intr_info[] = { | 74 | static struct intr_info sysbus_intr_info[] = { |
75 | { RNPP, "RXNP array parity error", -1, 1 }, | 75 | { RNPP_F, "RXNP array parity error", -1, 1 }, |
76 | { RPCP, "RXPC array parity error", -1, 1 }, | 76 | { RPCP_F, "RXPC array parity error", -1, 1 }, |
77 | { RCIP, "RXCIF array parity error", -1, 1 }, | 77 | { RCIP_F, "RXCIF array parity error", -1, 1 }, |
78 | { RCCP, "Rx completions control array parity error", -1, 1 }, | 78 | { RCCP_F, "Rx completions control array parity error", -1, 1 }, |
79 | { RFTP, "RXFT array parity error", -1, 1 }, | 79 | { RFTP_F, "RXFT array parity error", -1, 1 }, |
80 | { 0, NULL, 0, 0 } | 80 | { 0, NULL, 0, 0 } |
81 | }; | 81 | }; |
82 | static struct intr_info pcie_port_intr_info[] = { | 82 | static struct intr_info pcie_port_intr_info[] = { |
83 | { TPCP, "TXPC array parity error", -1, 1 }, | 83 | { TPCP_F, "TXPC array parity error", -1, 1 }, |
84 | { TNPP, "TXNP array parity error", -1, 1 }, | 84 | { TNPP_F, "TXNP array parity error", -1, 1 }, |
85 | { TFTP, "TXFT array parity error", -1, 1 }, | 85 | { TFTP_F, "TXFT array parity error", -1, 1 }, |
86 | { TCAP, "TXCA array parity error", -1, 1 }, | 86 | { TCAP_F, "TXCA array parity error", -1, 1 }, |
87 | { TCIP, "TXCIF array parity error", -1, 1 }, | 87 | { TCIP_F, "TXCIF array parity error", -1, 1 }, |
88 | { RCAP, "RXCA array parity error", -1, 1 }, | 88 | { RCAP_F, "RXCA array parity error", -1, 1 }, |
89 | { OTDD, "outbound request TLP discarded", -1, 1 }, | 89 | { OTDD_F, "outbound request TLP discarded", -1, 1 }, |
90 | { RDPE, "Rx data parity error", -1, 1 }, | 90 | { RDPE_F, "Rx data parity error", -1, 1 }, |
91 | { TDUE, "Tx uncorrectable data error", -1, 1 }, | 91 | { TDUE_F, "Tx uncorrectable data error", -1, 1 }, |
92 | { 0, NULL, 0, 0 } | 92 | { 0, NULL, 0, 0 } |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static struct intr_info pcie_intr_info[] = { | 95 | static struct intr_info pcie_intr_info[] = { |
96 | { MSTGRPPERR, "Master Response Read Queue parity error", | 96 | { MSTGRPPERR_F, "Master Response Read Queue parity error", |
97 | -1, 1 }, | 97 | -1, 1 }, |
98 | { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, | 98 | { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 }, |
99 | { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, | 99 | { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 }, |
100 | { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, | 100 | { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, |
101 | { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, | 101 | { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, |
102 | { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, | 102 | { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, |
103 | { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, | 103 | { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, |
104 | { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", | 104 | { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error", |
105 | -1, 1 }, | 105 | -1, 1 }, |
106 | { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", | 106 | { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error", |
107 | -1, 1 }, | 107 | -1, 1 }, |
108 | { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, | 108 | { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, |
109 | { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, | 109 | { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 }, |
110 | { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, | 110 | { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, |
111 | { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, | 111 | { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, |
112 | { DREQWRPERR, "PCI DMA channel write request parity error", | 112 | { DREQWRPERR_F, "PCI DMA channel write request parity error", |
113 | -1, 1 }, | 113 | -1, 1 }, |
114 | { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, | 114 | { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, |
115 | { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, | 115 | { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, |
116 | { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, | 116 | { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 }, |
117 | { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, | 117 | { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, |
118 | { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, | 118 | { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, |
119 | { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, | 119 | { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, |
120 | { FIDPERR, "PCI FID parity error", -1, 1 }, | 120 | { FIDPERR_F, "PCI FID parity error", -1, 1 }, |
121 | { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, | 121 | { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 }, |
122 | { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, | 122 | { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 }, |
123 | { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, | 123 | { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, |
124 | { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", | 124 | { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error", |
125 | -1, 1 }, | 125 | -1, 1 }, |
126 | { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", | 126 | { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error", |
127 | -1, 1 }, | 127 | -1, 1 }, |
128 | { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, | 128 | { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 }, |
129 | { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, | 129 | { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 }, |
130 | { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, | 130 | { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 }, |
131 | { READRSPERR, "Outbound read error", -1, 0 }, | 131 | { READRSPERR_F, "Outbound read error", -1, 0 }, |
132 | { 0, NULL, 0, 0 } | 132 | { 0, NULL, 0, 0 } |
133 | }; | 133 | }; |
134 | 134 | ||
135 | int fat; | 135 | int fat; |
136 | fat = csio_handle_intr_status(hw, | 136 | fat = csio_handle_intr_status(hw, |
137 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | 137 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A, |
138 | sysbus_intr_info) + | 138 | sysbus_intr_info) + |
139 | csio_handle_intr_status(hw, | 139 | csio_handle_intr_status(hw, |
140 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | 140 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A, |
141 | pcie_port_intr_info) + | 141 | pcie_port_intr_info) + |
142 | csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); | 142 | csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info); |
143 | if (fat) | 143 | if (fat) |
144 | csio_hw_fatal_err(hw); | 144 | csio_hw_fatal_err(hw); |
145 | } | 145 | } |
@@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, | |||
177 | uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; | 177 | uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; |
178 | uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; | 178 | uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; |
179 | 179 | ||
180 | mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx); | 180 | mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx); |
181 | mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx); | 181 | mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx); |
182 | mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx); | 182 | mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx); |
183 | mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx); | 183 | mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx); |
184 | mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx); | 184 | mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx); |
185 | 185 | ||
186 | if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST) | 186 | if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F) |
187 | return -EBUSY; | 187 | return -EBUSY; |
188 | csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg); | 188 | csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg); |
189 | csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg); | 189 | csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg); |
190 | csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg); | 190 | csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg); |
191 | csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), | 191 | csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), |
192 | mc_bist_cmd_reg); | 192 | mc_bist_cmd_reg); |
193 | i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST, | 193 | i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F, |
194 | 0, 10, 1, NULL); | 194 | 0, 10, 1, NULL); |
195 | if (i) | 195 | if (i) |
196 | return i; | 196 | return i; |
197 | 197 | ||
198 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) | 198 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i) |
199 | 199 | ||
200 | for (i = 15; i >= 0; i--) | 200 | for (i = 15; i >= 0; i--) |
201 | *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); | 201 | *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); |
@@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, | |||
231 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) | 231 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) |
232 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) | 232 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) |
233 | 233 | ||
234 | edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx); | 234 | edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx); |
235 | edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); | 235 | edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx); |
236 | edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); | 236 | edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx); |
237 | edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); | 237 | edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx); |
238 | edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); | 238 | edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx); |
239 | #undef EDC_REG_T5 | 239 | #undef EDC_REG_T5 |
240 | #undef EDC_STRIDE_T5 | 240 | #undef EDC_STRIDE_T5 |
241 | 241 | ||
242 | if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST) | 242 | if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F) |
243 | return -EBUSY; | 243 | return -EBUSY; |
244 | csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg); | 244 | csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg); |
245 | csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg); | 245 | csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg); |
246 | csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern); | 246 | csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern); |
247 | csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), | 247 | csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), |
248 | edc_bist_cmd_reg); | 248 | edc_bist_cmd_reg); |
249 | i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST, | 249 | i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F, |
250 | 0, 10, 1, NULL); | 250 | 0, 10, 1, NULL); |
251 | if (i) | 251 | if (i) |
252 | return i; | 252 | return i; |
253 | 253 | ||
254 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) | 254 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx) |
255 | 255 | ||
256 | for (i = 15; i >= 0; i--) | 256 | for (i = 15; i >= 0; i--) |
257 | *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); | 257 | *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); |
@@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, | |||
320 | * the address is relative to BAR0. | 320 | * the address is relative to BAR0. |
321 | */ | 321 | */ |
322 | mem_reg = csio_rd_reg32(hw, | 322 | mem_reg = csio_rd_reg32(hw, |
323 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); | 323 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); |
324 | mem_aperture = 1 << (WINDOW(mem_reg) + 10); | 324 | mem_aperture = 1 << (WINDOW_V(mem_reg) + 10); |
325 | mem_base = GET_PCIEOFST(mem_reg) << 10; | 325 | mem_base = PCIEOFST_G(mem_reg) << 10; |
326 | 326 | ||
327 | start = addr & ~(mem_aperture-1); | 327 | start = addr & ~(mem_aperture-1); |
328 | offset = addr - start; | 328 | offset = addr - start; |
329 | win_pf = V_PFNUM(hw->pfn); | 329 | win_pf = PFNUM_V(hw->pfn); |
330 | 330 | ||
331 | csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", | 331 | csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", |
332 | mem_reg, mem_aperture); | 332 | mem_reg, mem_aperture); |
@@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, | |||
344 | * before we attempt to use the new value. | 344 | * before we attempt to use the new value. |
345 | */ | 345 | */ |
346 | csio_wr_reg32(hw, pos | win_pf, | 346 | csio_wr_reg32(hw, pos | win_pf, |
347 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); | 347 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); |
348 | csio_rd_reg32(hw, | 348 | csio_rd_reg32(hw, |
349 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); | 349 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); |
350 | 350 | ||
351 | while (offset < mem_aperture && len > 0) { | 351 | while (offset < mem_aperture && len > 0) { |
352 | if (dir) | 352 | if (dir) |
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 34d20cc3e110..d9631e15f7b5 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c | |||
@@ -113,12 +113,9 @@ static const struct file_operations csio_mem_debugfs_fops = { | |||
113 | void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, | 113 | void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, |
114 | unsigned int idx, unsigned int size_mb) | 114 | unsigned int idx, unsigned int size_mb) |
115 | { | 115 | { |
116 | struct dentry *de; | 116 | debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root, |
117 | 117 | (void *)hw + idx, &csio_mem_debugfs_fops, | |
118 | de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root, | 118 | size_mb << 20); |
119 | (void *)hw + idx, &csio_mem_debugfs_fops); | ||
120 | if (de && de->d_inode) | ||
121 | de->d_inode->i_size = size_mb << 20; | ||
122 | } | 119 | } |
123 | 120 | ||
124 | static int csio_setup_debugfs(struct csio_hw *hw) | 121 | static int csio_setup_debugfs(struct csio_hw *hw) |
@@ -1176,9 +1173,8 @@ static struct pci_error_handlers csio_err_handler = { | |||
1176 | */ | 1173 | */ |
1177 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ | 1174 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ |
1178 | static struct pci_device_id csio_pci_tbl[] = { | 1175 | static struct pci_device_id csio_pci_tbl[] = { |
1179 | /* Define for iSCSI uses PF5, FCoE uses PF6 */ | 1176 | /* Define for FCoE uses PF6 */ |
1180 | #define CH_PCI_DEVICE_ID_FUNCTION 0x5 | 1177 | #define CH_PCI_DEVICE_ID_FUNCTION 0x6 |
1181 | #define CH_PCI_DEVICE_ID_FUNCTION2 0x6 | ||
1182 | 1178 | ||
1183 | #define CH_PCI_ID_TABLE_ENTRY(devid) \ | 1179 | #define CH_PCI_ID_TABLE_ENTRY(devid) \ |
1184 | { PCI_VDEVICE(CHELSIO, (devid)), 0 } | 1180 | { PCI_VDEVICE(CHELSIO, (devid)), 0 } |
@@ -1256,5 +1252,4 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC); | |||
1256 | MODULE_LICENSE(CSIO_DRV_LICENSE); | 1252 | MODULE_LICENSE(CSIO_DRV_LICENSE); |
1257 | MODULE_DEVICE_TABLE(pci, csio_pci_tbl); | 1253 | MODULE_DEVICE_TABLE(pci, csio_pci_tbl); |
1258 | MODULE_VERSION(CSIO_DRV_VERSION); | 1254 | MODULE_VERSION(CSIO_DRV_VERSION); |
1259 | MODULE_FIRMWARE(FW_FNAME_T4); | ||
1260 | MODULE_FIRMWARE(FW_FNAME_T5); | 1255 | MODULE_FIRMWARE(FW_FNAME_T5); |
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c index a8c748a35f9c..2fb71c6c3b37 100644 --- a/drivers/scsi/csiostor/csio_isr.c +++ b/drivers/scsi/csiostor/csio_isr.c | |||
@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id) | |||
317 | 317 | ||
318 | /* Disable the interrupt for this PCI function. */ | 318 | /* Disable the interrupt for this PCI function. */ |
319 | if (hw->intr_mode == CSIO_IM_INTX) | 319 | if (hw->intr_mode == CSIO_IM_INTX) |
320 | csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI)); | 320 | csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A)); |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * The read in the following function will flush the | 323 | * The read in the following function will flush the |
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 87f9280d9b43..c00b2ff72b55 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c | |||
@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, | |||
1758 | else { | 1758 | else { |
1759 | /* Program DSGL to dma payload */ | 1759 | /* Program DSGL to dma payload */ |
1760 | dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | | 1760 | dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
1761 | ULPTX_MORE | ULPTX_NSGE(1)); | 1761 | ULPTX_MORE_F | ULPTX_NSGE_V(1)); |
1762 | dsgl.len0 = cpu_to_be32(pld_len); | 1762 | dsgl.len0 = cpu_to_be32(pld_len); |
1763 | dsgl.addr0 = cpu_to_be64(pld->paddr); | 1763 | dsgl.addr0 = cpu_to_be64(pld->paddr); |
1764 | csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), | 1764 | csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), |
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 08c265c0f353..9451787ca7f2 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c | |||
@@ -327,7 +327,8 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | |||
327 | } | 327 | } |
328 | 328 | ||
329 | #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | 329 | #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ |
330 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) | 330 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\ |
331 | FW_PORT_CAP_ANEG) | ||
331 | 332 | ||
332 | /* | 333 | /* |
333 | * csio_mb_port- FW PORT command helper | 334 | * csio_mb_port- FW PORT command helper |
@@ -1104,8 +1105,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw, | |||
1104 | void | 1105 | void |
1105 | csio_mb_intr_enable(struct csio_hw *hw) | 1106 | csio_mb_intr_enable(struct csio_hw *hw) |
1106 | { | 1107 | { |
1107 | csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | 1108 | csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); |
1108 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | 1109 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); |
1109 | } | 1110 | } |
1110 | 1111 | ||
1111 | /* | 1112 | /* |
@@ -1117,8 +1118,9 @@ csio_mb_intr_enable(struct csio_hw *hw) | |||
1117 | void | 1118 | void |
1118 | csio_mb_intr_disable(struct csio_hw *hw) | 1119 | csio_mb_intr_disable(struct csio_hw *hw) |
1119 | { | 1120 | { |
1120 | csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | 1121 | csio_wr_reg32(hw, MBMSGRDYINTEN_V(0), |
1121 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | 1122 | MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); |
1123 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); | ||
1122 | } | 1124 | } |
1123 | 1125 | ||
1124 | static void | 1126 | static void |
@@ -1153,8 +1155,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw) | |||
1153 | { | 1155 | { |
1154 | int i; | 1156 | int i; |
1155 | __be64 cmd[CSIO_MB_MAX_REGS]; | 1157 | __be64 cmd[CSIO_MB_MAX_REGS]; |
1156 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | 1158 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); |
1157 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | 1159 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); |
1158 | int size = sizeof(struct fw_debug_cmd); | 1160 | int size = sizeof(struct fw_debug_cmd); |
1159 | 1161 | ||
1160 | /* Copy mailbox data */ | 1162 | /* Copy mailbox data */ |
@@ -1164,8 +1166,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw) | |||
1164 | csio_mb_dump_fw_dbg(hw, cmd); | 1166 | csio_mb_dump_fw_dbg(hw, cmd); |
1165 | 1167 | ||
1166 | /* Notify FW of mailbox by setting owner as UP */ | 1168 | /* Notify FW of mailbox by setting owner as UP */ |
1167 | csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW), | 1169 | csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | |
1168 | ctl_reg); | 1170 | MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); |
1169 | 1171 | ||
1170 | csio_rd_reg32(hw, ctl_reg); | 1172 | csio_rd_reg32(hw, ctl_reg); |
1171 | wmb(); | 1173 | wmb(); |
@@ -1187,8 +1189,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) | |||
1187 | __be64 *cmd = mbp->mb; | 1189 | __be64 *cmd = mbp->mb; |
1188 | __be64 hdr; | 1190 | __be64 hdr; |
1189 | struct csio_mbm *mbm = &hw->mbm; | 1191 | struct csio_mbm *mbm = &hw->mbm; |
1190 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | 1192 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); |
1191 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | 1193 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); |
1192 | int size = mbp->mb_size; | 1194 | int size = mbp->mb_size; |
1193 | int rv = -EINVAL; | 1195 | int rv = -EINVAL; |
1194 | struct fw_cmd_hdr *fw_hdr; | 1196 | struct fw_cmd_hdr *fw_hdr; |
@@ -1224,12 +1226,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) | |||
1224 | } | 1226 | } |
1225 | 1227 | ||
1226 | /* Now get ownership of mailbox */ | 1228 | /* Now get ownership of mailbox */ |
1227 | owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); | 1229 | owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); |
1228 | 1230 | ||
1229 | if (!csio_mb_is_host_owner(owner)) { | 1231 | if (!csio_mb_is_host_owner(owner)) { |
1230 | 1232 | ||
1231 | for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) | 1233 | for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) |
1232 | owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); | 1234 | owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); |
1233 | /* | 1235 | /* |
1234 | * Mailbox unavailable. In immediate mode, fail the command. | 1236 | * Mailbox unavailable. In immediate mode, fail the command. |
1235 | * In other modes, enqueue the request. | 1237 | * In other modes, enqueue the request. |
@@ -1271,10 +1273,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) | |||
1271 | if (mbp->mb_cbfn != NULL) { | 1273 | if (mbp->mb_cbfn != NULL) { |
1272 | mbm->mcurrent = mbp; | 1274 | mbm->mcurrent = mbp; |
1273 | mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); | 1275 | mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); |
1274 | csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | | 1276 | csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | |
1275 | MBOWNER(CSIO_MBOWNER_FW), ctl_reg); | 1277 | MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); |
1276 | } else | 1278 | } else |
1277 | csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW), | 1279 | csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW), |
1278 | ctl_reg); | 1280 | ctl_reg); |
1279 | 1281 | ||
1280 | /* Flush posted writes */ | 1282 | /* Flush posted writes */ |
@@ -1294,9 +1296,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) | |||
1294 | 1296 | ||
1295 | /* Check for response */ | 1297 | /* Check for response */ |
1296 | ctl = csio_rd_reg32(hw, ctl_reg); | 1298 | ctl = csio_rd_reg32(hw, ctl_reg); |
1297 | if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { | 1299 | if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { |
1298 | 1300 | ||
1299 | if (!(ctl & MBMSGVALID)) { | 1301 | if (!(ctl & MBMSGVALID_F)) { |
1300 | csio_wr_reg32(hw, 0, ctl_reg); | 1302 | csio_wr_reg32(hw, 0, ctl_reg); |
1301 | continue; | 1303 | continue; |
1302 | } | 1304 | } |
@@ -1457,16 +1459,16 @@ csio_mb_isr_handler(struct csio_hw *hw) | |||
1457 | __be64 *cmd; | 1459 | __be64 *cmd; |
1458 | uint32_t ctl, cim_cause, pl_cause; | 1460 | uint32_t ctl, cim_cause, pl_cause; |
1459 | int i; | 1461 | int i; |
1460 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | 1462 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); |
1461 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | 1463 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); |
1462 | int size; | 1464 | int size; |
1463 | __be64 hdr; | 1465 | __be64 hdr; |
1464 | struct fw_cmd_hdr *fw_hdr; | 1466 | struct fw_cmd_hdr *fw_hdr; |
1465 | 1467 | ||
1466 | pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE)); | 1468 | pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A)); |
1467 | cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); | 1469 | cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); |
1468 | 1470 | ||
1469 | if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) { | 1471 | if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) { |
1470 | CSIO_INC_STATS(hw, n_mbint_unexp); | 1472 | CSIO_INC_STATS(hw, n_mbint_unexp); |
1471 | return -EINVAL; | 1473 | return -EINVAL; |
1472 | } | 1474 | } |
@@ -1477,16 +1479,16 @@ csio_mb_isr_handler(struct csio_hw *hw) | |||
1477 | * the upper level cause register. In other words, CIM-cause | 1479 | * the upper level cause register. In other words, CIM-cause |
1478 | * first followed by PL-Cause next. | 1480 | * first followed by PL-Cause next. |
1479 | */ | 1481 | */ |
1480 | csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); | 1482 | csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); |
1481 | csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE)); | 1483 | csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A)); |
1482 | 1484 | ||
1483 | ctl = csio_rd_reg32(hw, ctl_reg); | 1485 | ctl = csio_rd_reg32(hw, ctl_reg); |
1484 | 1486 | ||
1485 | if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { | 1487 | if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { |
1486 | 1488 | ||
1487 | CSIO_DUMP_MB(hw, hw->pfn, data_reg); | 1489 | CSIO_DUMP_MB(hw, hw->pfn, data_reg); |
1488 | 1490 | ||
1489 | if (!(ctl & MBMSGVALID)) { | 1491 | if (!(ctl & MBMSGVALID_F)) { |
1490 | csio_warn(hw, | 1492 | csio_warn(hw, |
1491 | "Stray mailbox interrupt recvd," | 1493 | "Stray mailbox interrupt recvd," |
1492 | " mailbox data not valid\n"); | 1494 | " mailbox data not valid\n"); |
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 3987284e0d2a..2c4562d82dc0 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c | |||
@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, | |||
298 | struct csio_dma_buf *dma_buf; | 298 | struct csio_dma_buf *dma_buf; |
299 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | 299 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); |
300 | 300 | ||
301 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | | 301 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | |
302 | ULPTX_NSGE(req->nsge)); | 302 | ULPTX_NSGE_V(req->nsge)); |
303 | /* Now add the data SGLs */ | 303 | /* Now add the data SGLs */ |
304 | if (likely(!req->dcopy)) { | 304 | if (likely(!req->dcopy)) { |
305 | scsi_for_each_sg(scmnd, sgel, req->nsge, i) { | 305 | scsi_for_each_sg(scmnd, sgel, req->nsge, i) { |
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 773da14cfa14..e8f18174f2e9 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c | |||
@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */ | |||
51 | static int csio_sge_timer_reg = 1; | 51 | static int csio_sge_timer_reg = 1; |
52 | 52 | ||
53 | #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ | 53 | #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ |
54 | csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg) | 54 | csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A) |
55 | 55 | ||
56 | static void | 56 | static void |
57 | csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) | 57 | csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) |
58 | { | 58 | { |
59 | sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 + | 59 | sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A + |
60 | reg * sizeof(uint32_t)); | 60 | reg * sizeof(uint32_t)); |
61 | } | 61 | } |
62 | 62 | ||
@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) | |||
71 | static inline uint32_t | 71 | static inline uint32_t |
72 | csio_wr_qstat_pgsz(struct csio_hw *hw) | 72 | csio_wr_qstat_pgsz(struct csio_hw *hw) |
73 | { | 73 | { |
74 | return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64; | 74 | return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; |
75 | } | 75 | } |
76 | 76 | ||
77 | /* Ring freelist doorbell */ | 77 | /* Ring freelist doorbell */ |
@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) | |||
84 | * 8 freelist buffer pointers (since each pointer is 8 bytes). | 84 | * 8 freelist buffer pointers (since each pointer is 8 bytes). |
85 | */ | 85 | */ |
86 | if (flq->inc_idx >= 8) { | 86 | if (flq->inc_idx >= 8) { |
87 | csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | | 87 | csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) | |
88 | CSIO_HW_PIDX(hw, flq->inc_idx / 8), | 88 | PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F, |
89 | MYPF_REG(SGE_PF_KDOORBELL)); | 89 | MYPF_REG(SGE_PF_KDOORBELL_A)); |
90 | flq->inc_idx &= 7; | 90 | flq->inc_idx &= 7; |
91 | } | 91 | } |
92 | } | 92 | } |
@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) | |||
95 | static void | 95 | static void |
96 | csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) | 96 | csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) |
97 | { | 97 | { |
98 | csio_wr_reg32(hw, CIDXINC(0) | | 98 | csio_wr_reg32(hw, CIDXINC_V(0) | |
99 | INGRESSQID(iqid) | | 99 | INGRESSQID_V(iqid) | |
100 | TIMERREG(X_TIMERREG_RESTART_COUNTER), | 100 | TIMERREG_V(X_TIMERREG_RESTART_COUNTER), |
101 | MYPF_REG(SGE_PF_GTS)); | 101 | MYPF_REG(SGE_PF_GTS_A)); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) | |||
982 | 982 | ||
983 | wmb(); | 983 | wmb(); |
984 | /* Ring SGE Doorbell writing q->pidx into it */ | 984 | /* Ring SGE Doorbell writing q->pidx into it */ |
985 | csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | | 985 | csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) | |
986 | CSIO_HW_PIDX(hw, q->inc_idx), | 986 | PIDX_T5_V(q->inc_idx) | DBTYPE_F, |
987 | MYPF_REG(SGE_PF_KDOORBELL)); | 987 | MYPF_REG(SGE_PF_KDOORBELL_A)); |
988 | q->inc_idx = 0; | 988 | q->inc_idx = 0; |
989 | 989 | ||
990 | return 0; | 990 | return 0; |
@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, | |||
1242 | 1242 | ||
1243 | restart: | 1243 | restart: |
1244 | /* Now inform SGE about our incremental index value */ | 1244 | /* Now inform SGE about our incremental index value */ |
1245 | csio_wr_reg32(hw, CIDXINC(q->inc_idx) | | 1245 | csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) | |
1246 | INGRESSQID(q->un.iq.physiqid) | | 1246 | INGRESSQID_V(q->un.iq.physiqid) | |
1247 | TIMERREG(csio_sge_timer_reg), | 1247 | TIMERREG_V(csio_sge_timer_reg), |
1248 | MYPF_REG(SGE_PF_GTS)); | 1248 | MYPF_REG(SGE_PF_GTS_A)); |
1249 | q->stats.n_tot_rsps += q->inc_idx; | 1249 | q->stats.n_tot_rsps += q->inc_idx; |
1250 | 1250 | ||
1251 | q->inc_idx = 0; | 1251 | q->inc_idx = 0; |
@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw) | |||
1310 | uint32_t ingpad = 0; | 1310 | uint32_t ingpad = 0; |
1311 | uint32_t stat_len = clsz > 64 ? 128 : 64; | 1311 | uint32_t stat_len = clsz > 64 ? 128 : 64; |
1312 | 1312 | ||
1313 | csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) | | 1313 | csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | |
1314 | HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) | | 1314 | HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | |
1315 | HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) | | 1315 | HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) | |
1316 | HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps), | 1316 | HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps), |
1317 | SGE_HOST_PAGE_SIZE); | 1317 | SGE_HOST_PAGE_SIZE_A); |
1318 | 1318 | ||
1319 | sge->csio_fl_align = clsz < 32 ? 32 : clsz; | 1319 | sge->csio_fl_align = clsz < 32 ? 32 : clsz; |
1320 | ingpad = ilog2(sge->csio_fl_align) - 5; | 1320 | ingpad = ilog2(sge->csio_fl_align) - 5; |
1321 | 1321 | ||
1322 | csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK | | 1322 | csio_set_reg_field(hw, SGE_CONTROL_A, |
1323 | EGRSTATUSPAGESIZE(1), | 1323 | INGPADBOUNDARY_V(INGPADBOUNDARY_M) | |
1324 | INGPADBOUNDARY(ingpad) | | 1324 | EGRSTATUSPAGESIZE_F, |
1325 | EGRSTATUSPAGESIZE(stat_len != 64)); | 1325 | INGPADBOUNDARY_V(ingpad) | |
1326 | EGRSTATUSPAGESIZE_V(stat_len != 64)); | ||
1326 | 1327 | ||
1327 | /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ | 1328 | /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ |
1328 | csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); | 1329 | csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A); |
1329 | 1330 | ||
1330 | /* | 1331 | /* |
1331 | * If using hard params, the following will get set correctly | 1332 | * If using hard params, the following will get set correctly |
@@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw) | |||
1333 | */ | 1334 | */ |
1334 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { | 1335 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { |
1335 | csio_wr_reg32(hw, | 1336 | csio_wr_reg32(hw, |
1336 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + | 1337 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) + |
1337 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | 1338 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), |
1338 | SGE_FL_BUFFER_SIZE2); | 1339 | SGE_FL_BUFFER_SIZE2_A); |
1339 | csio_wr_reg32(hw, | 1340 | csio_wr_reg32(hw, |
1340 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + | 1341 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) + |
1341 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | 1342 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), |
1342 | SGE_FL_BUFFER_SIZE3); | 1343 | SGE_FL_BUFFER_SIZE3_A); |
1343 | } | 1344 | } |
1344 | 1345 | ||
1345 | csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); | 1346 | csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A); |
1346 | 1347 | ||
1347 | /* default value of rx_dma_offset of the NIC driver */ | 1348 | /* default value of rx_dma_offset of the NIC driver */ |
1348 | csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, | 1349 | csio_set_reg_field(hw, SGE_CONTROL_A, |
1349 | PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); | 1350 | PKTSHIFT_V(PKTSHIFT_M), |
1351 | PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET)); | ||
1350 | 1352 | ||
1351 | csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG, | 1353 | csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A, |
1352 | CSUM_HAS_PSEUDO_HDR, 0); | 1354 | CSUM_HAS_PSEUDO_HDR_F, 0); |
1353 | } | 1355 | } |
1354 | 1356 | ||
1355 | static void | 1357 | static void |
@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw) | |||
1384 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; | 1386 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; |
1385 | u32 ingress_rx_threshold; | 1387 | u32 ingress_rx_threshold; |
1386 | 1388 | ||
1387 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); | 1389 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); |
1388 | 1390 | ||
1389 | ingpad = INGPADBOUNDARY_GET(sge->sge_control); | 1391 | ingpad = INGPADBOUNDARY_G(sge->sge_control); |
1390 | 1392 | ||
1391 | switch (ingpad) { | 1393 | switch (ingpad) { |
1392 | case X_INGPCIEBOUNDARY_32B: | 1394 | case X_INGPCIEBOUNDARY_32B: |
@@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw) | |||
1410 | for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) | 1412 | for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) |
1411 | csio_get_flbuf_size(hw, sge, i); | 1413 | csio_get_flbuf_size(hw, sge, i); |
1412 | 1414 | ||
1413 | timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1); | 1415 | timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A); |
1414 | timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3); | 1416 | timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A); |
1415 | timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5); | 1417 | timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A); |
1416 | 1418 | ||
1417 | sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw, | 1419 | sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw, |
1418 | TIMERVALUE0_GET(timer_value_0_and_1)); | 1420 | TIMERVALUE0_G(timer_value_0_and_1)); |
1419 | sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw, | 1421 | sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw, |
1420 | TIMERVALUE1_GET(timer_value_0_and_1)); | 1422 | TIMERVALUE1_G(timer_value_0_and_1)); |
1421 | sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw, | 1423 | sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw, |
1422 | TIMERVALUE2_GET(timer_value_2_and_3)); | 1424 | TIMERVALUE2_G(timer_value_2_and_3)); |
1423 | sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw, | 1425 | sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw, |
1424 | TIMERVALUE3_GET(timer_value_2_and_3)); | 1426 | TIMERVALUE3_G(timer_value_2_and_3)); |
1425 | sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw, | 1427 | sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw, |
1426 | TIMERVALUE4_GET(timer_value_4_and_5)); | 1428 | TIMERVALUE4_G(timer_value_4_and_5)); |
1427 | sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, | 1429 | sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, |
1428 | TIMERVALUE5_GET(timer_value_4_and_5)); | 1430 | TIMERVALUE5_G(timer_value_4_and_5)); |
1429 | 1431 | ||
1430 | ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD); | 1432 | ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A); |
1431 | sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); | 1433 | sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); |
1432 | sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); | 1434 | sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); |
1433 | sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); | 1435 | sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); |
1434 | sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); | 1436 | sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); |
1435 | 1437 | ||
1436 | csio_init_intr_coalesce_parms(hw); | 1438 | csio_init_intr_coalesce_parms(hw); |
1437 | } | 1439 | } |
@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw) | |||
1454 | * Set up our basic SGE mode to deliver CPL messages to our Ingress | 1456 | * Set up our basic SGE mode to deliver CPL messages to our Ingress |
1455 | * Queue and Packet Date to the Free List. | 1457 | * Queue and Packet Date to the Free List. |
1456 | */ | 1458 | */ |
1457 | csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1)); | 1459 | csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F); |
1458 | 1460 | ||
1459 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); | 1461 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); |
1460 | 1462 | ||
1461 | /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ | 1463 | /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ |
1462 | 1464 | ||
@@ -1464,22 +1466,23 @@ csio_wr_set_sge(struct csio_hw *hw) | |||
1464 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | 1466 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows |
1465 | * and generate an interrupt when this occurs so we can recover. | 1467 | * and generate an interrupt when this occurs so we can recover. |
1466 | */ | 1468 | */ |
1467 | csio_set_reg_field(hw, SGE_DBFIFO_STATUS, | 1469 | csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A, |
1468 | HP_INT_THRESH(HP_INT_THRESH_MASK) | | 1470 | LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), |
1469 | CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)), | 1471 | LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); |
1470 | HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | | 1472 | csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A, |
1471 | CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH)); | 1473 | HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), |
1474 | HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); | ||
1472 | 1475 | ||
1473 | csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, | 1476 | csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F, |
1474 | ENABLE_DROP); | 1477 | ENABLE_DROP_F); |
1475 | 1478 | ||
1476 | /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ | 1479 | /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ |
1477 | 1480 | ||
1478 | CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); | 1481 | CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); |
1479 | csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) | 1482 | csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) |
1480 | & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2); | 1483 | & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A); |
1481 | csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) | 1484 | csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) |
1482 | & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3); | 1485 | & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A); |
1483 | CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); | 1486 | CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); |
1484 | CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); | 1487 | CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); |
1485 | CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); | 1488 | CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); |
@@ -1502,26 +1505,26 @@ csio_wr_set_sge(struct csio_hw *hw) | |||
1502 | sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; | 1505 | sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; |
1503 | sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; | 1506 | sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; |
1504 | 1507 | ||
1505 | csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) | | 1508 | csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) | |
1506 | THRESHOLD_1(sge->counter_val[1]) | | 1509 | THRESHOLD_1_V(sge->counter_val[1]) | |
1507 | THRESHOLD_2(sge->counter_val[2]) | | 1510 | THRESHOLD_2_V(sge->counter_val[2]) | |
1508 | THRESHOLD_3(sge->counter_val[3]), | 1511 | THRESHOLD_3_V(sge->counter_val[3]), |
1509 | SGE_INGRESS_RX_THRESHOLD); | 1512 | SGE_INGRESS_RX_THRESHOLD_A); |
1510 | 1513 | ||
1511 | csio_wr_reg32(hw, | 1514 | csio_wr_reg32(hw, |
1512 | TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) | | 1515 | TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) | |
1513 | TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])), | 1516 | TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])), |
1514 | SGE_TIMER_VALUE_0_AND_1); | 1517 | SGE_TIMER_VALUE_0_AND_1_A); |
1515 | 1518 | ||
1516 | csio_wr_reg32(hw, | 1519 | csio_wr_reg32(hw, |
1517 | TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) | | 1520 | TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) | |
1518 | TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])), | 1521 | TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])), |
1519 | SGE_TIMER_VALUE_2_AND_3); | 1522 | SGE_TIMER_VALUE_2_AND_3_A); |
1520 | 1523 | ||
1521 | csio_wr_reg32(hw, | 1524 | csio_wr_reg32(hw, |
1522 | TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) | | 1525 | TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) | |
1523 | TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])), | 1526 | TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])), |
1524 | SGE_TIMER_VALUE_4_AND_5); | 1527 | SGE_TIMER_VALUE_4_AND_5_A); |
1525 | 1528 | ||
1526 | csio_init_intr_coalesce_parms(hw); | 1529 | csio_init_intr_coalesce_parms(hw); |
1527 | } | 1530 | } |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index a83d2ceded83..dd00e5fe4a5e 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "t4fw_api.h" | 28 | #include "t4fw_api.h" |
29 | #include "l2t.h" | 29 | #include "l2t.h" |
30 | #include "cxgb4i.h" | 30 | #include "cxgb4i.h" |
31 | #include "clip_tbl.h" | ||
31 | 32 | ||
32 | static unsigned int dbg_level; | 33 | static unsigned int dbg_level; |
33 | 34 | ||
@@ -704,7 +705,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
704 | struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; | 705 | struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; |
705 | unsigned short tcp_opt = ntohs(req->tcp_opt); | 706 | unsigned short tcp_opt = ntohs(req->tcp_opt); |
706 | unsigned int tid = GET_TID(req); | 707 | unsigned int tid = GET_TID(req); |
707 | unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); | 708 | unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); |
708 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | 709 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
709 | struct tid_info *t = lldi->tids; | 710 | struct tid_info *t = lldi->tids; |
710 | u32 rcv_isn = be32_to_cpu(req->rcv_isn); | 711 | u32 rcv_isn = be32_to_cpu(req->rcv_isn); |
@@ -752,15 +753,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
752 | if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) | 753 | if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) |
753 | csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); | 754 | csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); |
754 | 755 | ||
755 | csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; | 756 | csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; |
756 | if (GET_TCPOPT_TSTAMP(tcp_opt)) | 757 | if (TCPOPT_TSTAMP_G(tcp_opt)) |
757 | csk->advmss -= 12; | 758 | csk->advmss -= 12; |
758 | if (csk->advmss < 128) | 759 | if (csk->advmss < 128) |
759 | csk->advmss = 128; | 760 | csk->advmss = 128; |
760 | 761 | ||
761 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 762 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
762 | "csk 0x%p, mss_idx %u, advmss %u.\n", | 763 | "csk 0x%p, mss_idx %u, advmss %u.\n", |
763 | csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); | 764 | csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); |
764 | 765 | ||
765 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | 766 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); |
766 | 767 | ||
@@ -856,8 +857,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
856 | struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; | 857 | struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; |
857 | unsigned int tid = GET_TID(rpl); | 858 | unsigned int tid = GET_TID(rpl); |
858 | unsigned int atid = | 859 | unsigned int atid = |
859 | GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); | 860 | TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); |
860 | unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); | 861 | unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); |
861 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | 862 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
862 | struct tid_info *t = lldi->tids; | 863 | struct tid_info *t = lldi->tids; |
863 | 864 | ||
@@ -1112,7 +1113,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
1112 | hlen = ntohs(cpl->len); | 1113 | hlen = ntohs(cpl->len); |
1113 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; | 1114 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; |
1114 | 1115 | ||
1115 | plen = ISCSI_PDU_LEN(pdu_len_ddp); | 1116 | plen = ISCSI_PDU_LEN_G(pdu_len_ddp); |
1116 | if (is_t4(lldi->adapter_type)) | 1117 | if (is_t4(lldi->adapter_type)) |
1117 | plen -= 40; | 1118 | plen -= 40; |
1118 | 1119 | ||
@@ -1322,6 +1323,9 @@ static inline void l2t_put(struct cxgbi_sock *csk) | |||
1322 | static void release_offload_resources(struct cxgbi_sock *csk) | 1323 | static void release_offload_resources(struct cxgbi_sock *csk) |
1323 | { | 1324 | { |
1324 | struct cxgb4_lld_info *lldi; | 1325 | struct cxgb4_lld_info *lldi; |
1326 | #if IS_ENABLED(CONFIG_IPV6) | ||
1327 | struct net_device *ndev = csk->cdev->ports[csk->port_id]; | ||
1328 | #endif | ||
1325 | 1329 | ||
1326 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1330 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1327 | "csk 0x%p,%u,0x%lx,%u.\n", | 1331 | "csk 0x%p,%u,0x%lx,%u.\n", |
@@ -1334,6 +1338,12 @@ static void release_offload_resources(struct cxgbi_sock *csk) | |||
1334 | } | 1338 | } |
1335 | 1339 | ||
1336 | l2t_put(csk); | 1340 | l2t_put(csk); |
1341 | #if IS_ENABLED(CONFIG_IPV6) | ||
1342 | if (csk->csk_family == AF_INET6) | ||
1343 | cxgb4_clip_release(ndev, | ||
1344 | (const u32 *)&csk->saddr6.sin6_addr, 1); | ||
1345 | #endif | ||
1346 | |||
1337 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) | 1347 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) |
1338 | free_atid(csk); | 1348 | free_atid(csk); |
1339 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { | 1349 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { |
@@ -1391,10 +1401,15 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
1391 | csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); | 1401 | csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); |
1392 | if (!csk->l2t) { | 1402 | if (!csk->l2t) { |
1393 | pr_err("%s, cannot alloc l2t.\n", ndev->name); | 1403 | pr_err("%s, cannot alloc l2t.\n", ndev->name); |
1394 | goto rel_resource; | 1404 | goto rel_resource_without_clip; |
1395 | } | 1405 | } |
1396 | cxgbi_sock_get(csk); | 1406 | cxgbi_sock_get(csk); |
1397 | 1407 | ||
1408 | #if IS_ENABLED(CONFIG_IPV6) | ||
1409 | if (csk->csk_family == AF_INET6) | ||
1410 | cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); | ||
1411 | #endif | ||
1412 | |||
1398 | if (t4) { | 1413 | if (t4) { |
1399 | size = sizeof(struct cpl_act_open_req); | 1414 | size = sizeof(struct cpl_act_open_req); |
1400 | size6 = sizeof(struct cpl_act_open_req6); | 1415 | size6 = sizeof(struct cpl_act_open_req6); |
@@ -1451,6 +1466,12 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
1451 | return 0; | 1466 | return 0; |
1452 | 1467 | ||
1453 | rel_resource: | 1468 | rel_resource: |
1469 | #if IS_ENABLED(CONFIG_IPV6) | ||
1470 | if (csk->csk_family == AF_INET6) | ||
1471 | cxgb4_clip_release(ndev, | ||
1472 | (const u32 *)&csk->saddr6.sin6_addr, 1); | ||
1473 | #endif | ||
1474 | rel_resource_without_clip: | ||
1454 | if (n) | 1475 | if (n) |
1455 | neigh_release(n); | 1476 | neigh_release(n); |
1456 | if (skb) | 1477 | if (skb) |
@@ -1619,7 +1640,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
1619 | req = (struct cpl_set_tcb_field *)skb->head; | 1640 | req = (struct cpl_set_tcb_field *)skb->head; |
1620 | INIT_TP_WR(req, csk->tid); | 1641 | INIT_TP_WR(req, csk->tid); |
1621 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | 1642 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
1622 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); | 1643 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); |
1623 | req->word_cookie = htons(0); | 1644 | req->word_cookie = htons(0); |
1624 | req->mask = cpu_to_be64(0x3 << 8); | 1645 | req->mask = cpu_to_be64(0x3 << 8); |
1625 | req->val = cpu_to_be64(pg_idx << 8); | 1646 | req->val = cpu_to_be64(pg_idx << 8); |
@@ -1651,7 +1672,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
1651 | req = (struct cpl_set_tcb_field *)skb->head; | 1672 | req = (struct cpl_set_tcb_field *)skb->head; |
1652 | INIT_TP_WR(req, tid); | 1673 | INIT_TP_WR(req, tid); |
1653 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1674 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
1654 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); | 1675 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); |
1655 | req->word_cookie = htons(0); | 1676 | req->word_cookie = htons(0); |
1656 | req->mask = cpu_to_be64(0x3 << 4); | 1677 | req->mask = cpu_to_be64(0x3 << 4); |
1657 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | | 1678 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 0c6be0a17f53..5ee7f44cf869 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -4610,13 +4610,10 @@ static void adapter_uninit(struct AdapterCtlBlk *acb) | |||
4610 | } | 4610 | } |
4611 | 4611 | ||
4612 | 4612 | ||
4613 | #undef SPRINTF | ||
4614 | #define SPRINTF(args...) seq_printf(m,##args) | ||
4615 | |||
4616 | #undef YESNO | 4613 | #undef YESNO |
4617 | #define YESNO(YN) \ | 4614 | #define YESNO(YN) \ |
4618 | if (YN) SPRINTF(" Yes ");\ | 4615 | if (YN) seq_printf(m, " Yes ");\ |
4619 | else SPRINTF(" No ") | 4616 | else seq_printf(m, " No ") |
4620 | 4617 | ||
4621 | static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | 4618 | static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) |
4622 | { | 4619 | { |
@@ -4626,47 +4623,45 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
4626 | unsigned long flags; | 4623 | unsigned long flags; |
4627 | int dev; | 4624 | int dev; |
4628 | 4625 | ||
4629 | SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n"); | 4626 | seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n" |
4630 | SPRINTF(" Driver Version " DC395X_VERSION "\n"); | 4627 | " Driver Version " DC395X_VERSION "\n"); |
4631 | 4628 | ||
4632 | DC395x_LOCK_IO(acb->scsi_host, flags); | 4629 | DC395x_LOCK_IO(acb->scsi_host, flags); |
4633 | 4630 | ||
4634 | SPRINTF("SCSI Host Nr %i, ", host->host_no); | 4631 | seq_printf(m, "SCSI Host Nr %i, ", host->host_no); |
4635 | SPRINTF("DC395U/UW/F DC315/U %s\n", | 4632 | seq_printf(m, "DC395U/UW/F DC315/U %s\n", |
4636 | (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); | 4633 | (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); |
4637 | SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base); | 4634 | seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base); |
4638 | SPRINTF("irq_level 0x%04x, ", acb->irq_level); | 4635 | seq_printf(m, "irq_level 0x%04x, ", acb->irq_level); |
4639 | SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); | 4636 | seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); |
4640 | 4637 | ||
4641 | SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); | 4638 | seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); |
4642 | SPRINTF("AdapterID %i\n", host->this_id); | 4639 | seq_printf(m, "AdapterID %i\n", host->this_id); |
4643 | 4640 | ||
4644 | SPRINTF("tag_max_num %i", acb->tag_max_num); | 4641 | seq_printf(m, "tag_max_num %i", acb->tag_max_num); |
4645 | /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ | 4642 | /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ |
4646 | SPRINTF(", FilterCfg 0x%02x", | 4643 | seq_printf(m, ", FilterCfg 0x%02x", |
4647 | DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); | 4644 | DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); |
4648 | SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time); | 4645 | seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time); |
4649 | /*SPRINTF("\n"); */ | 4646 | /*seq_printf(m, "\n"); */ |
4650 | 4647 | ||
4651 | SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list)); | 4648 | seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list)); |
4652 | SPRINTF | 4649 | seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n", |
4653 | ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
4654 | acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2], | 4650 | acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2], |
4655 | acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5], | 4651 | acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5], |
4656 | acb->dcb_map[6], acb->dcb_map[7]); | 4652 | acb->dcb_map[6], acb->dcb_map[7]); |
4657 | SPRINTF | 4653 | seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n", |
4658 | (" %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
4659 | acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10], | 4654 | acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10], |
4660 | acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13], | 4655 | acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13], |
4661 | acb->dcb_map[14], acb->dcb_map[15]); | 4656 | acb->dcb_map[14], acb->dcb_map[15]); |
4662 | 4657 | ||
4663 | SPRINTF | 4658 | seq_puts(m, |
4664 | ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); | 4659 | "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); |
4665 | 4660 | ||
4666 | dev = 0; | 4661 | dev = 0; |
4667 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4662 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4668 | int nego_period; | 4663 | int nego_period; |
4669 | SPRINTF("%02i %02i %02i ", dev, dcb->target_id, | 4664 | seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id, |
4670 | dcb->target_lun); | 4665 | dcb->target_lun); |
4671 | YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); | 4666 | YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); |
4672 | YESNO(dcb->sync_offset); | 4667 | YESNO(dcb->sync_offset); |
@@ -4676,53 +4671,53 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
4676 | YESNO(dcb->sync_mode & EN_TAG_QUEUEING); | 4671 | YESNO(dcb->sync_mode & EN_TAG_QUEUEING); |
4677 | nego_period = clock_period[dcb->sync_period & 0x07] << 2; | 4672 | nego_period = clock_period[dcb->sync_period & 0x07] << 2; |
4678 | if (dcb->sync_offset) | 4673 | if (dcb->sync_offset) |
4679 | SPRINTF(" %03i ns ", nego_period); | 4674 | seq_printf(m, " %03i ns ", nego_period); |
4680 | else | 4675 | else |
4681 | SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2)); | 4676 | seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2)); |
4682 | 4677 | ||
4683 | if (dcb->sync_offset & 0x0f) { | 4678 | if (dcb->sync_offset & 0x0f) { |
4684 | spd = 1000 / (nego_period); | 4679 | spd = 1000 / (nego_period); |
4685 | spd1 = 1000 % (nego_period); | 4680 | spd1 = 1000 % (nego_period); |
4686 | spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); | 4681 | spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); |
4687 | SPRINTF(" %2i.%1i M %02i ", spd, spd1, | 4682 | seq_printf(m, " %2i.%1i M %02i ", spd, spd1, |
4688 | (dcb->sync_offset & 0x0f)); | 4683 | (dcb->sync_offset & 0x0f)); |
4689 | } else | 4684 | } else |
4690 | SPRINTF(" "); | 4685 | seq_puts(m, " "); |
4691 | 4686 | ||
4692 | /* Add more info ... */ | 4687 | /* Add more info ... */ |
4693 | SPRINTF(" %02i\n", dcb->max_command); | 4688 | seq_printf(m, " %02i\n", dcb->max_command); |
4694 | dev++; | 4689 | dev++; |
4695 | } | 4690 | } |
4696 | 4691 | ||
4697 | if (timer_pending(&acb->waiting_timer)) | 4692 | if (timer_pending(&acb->waiting_timer)) |
4698 | SPRINTF("Waiting queue timer running\n"); | 4693 | seq_puts(m, "Waiting queue timer running\n"); |
4699 | else | 4694 | else |
4700 | SPRINTF("\n"); | 4695 | seq_putc(m, '\n'); |
4701 | 4696 | ||
4702 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4697 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4703 | struct ScsiReqBlk *srb; | 4698 | struct ScsiReqBlk *srb; |
4704 | if (!list_empty(&dcb->srb_waiting_list)) | 4699 | if (!list_empty(&dcb->srb_waiting_list)) |
4705 | SPRINTF("DCB (%02i-%i): Waiting: %i:", | 4700 | seq_printf(m, "DCB (%02i-%i): Waiting: %i:", |
4706 | dcb->target_id, dcb->target_lun, | 4701 | dcb->target_id, dcb->target_lun, |
4707 | list_size(&dcb->srb_waiting_list)); | 4702 | list_size(&dcb->srb_waiting_list)); |
4708 | list_for_each_entry(srb, &dcb->srb_waiting_list, list) | 4703 | list_for_each_entry(srb, &dcb->srb_waiting_list, list) |
4709 | SPRINTF(" %p", srb->cmd); | 4704 | seq_printf(m, " %p", srb->cmd); |
4710 | if (!list_empty(&dcb->srb_going_list)) | 4705 | if (!list_empty(&dcb->srb_going_list)) |
4711 | SPRINTF("\nDCB (%02i-%i): Going : %i:", | 4706 | seq_printf(m, "\nDCB (%02i-%i): Going : %i:", |
4712 | dcb->target_id, dcb->target_lun, | 4707 | dcb->target_id, dcb->target_lun, |
4713 | list_size(&dcb->srb_going_list)); | 4708 | list_size(&dcb->srb_going_list)); |
4714 | list_for_each_entry(srb, &dcb->srb_going_list, list) | 4709 | list_for_each_entry(srb, &dcb->srb_going_list, list) |
4715 | SPRINTF(" %p", srb->cmd); | 4710 | seq_printf(m, " %p", srb->cmd); |
4716 | if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) | 4711 | if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) |
4717 | SPRINTF("\n"); | 4712 | seq_putc(m, '\n'); |
4718 | } | 4713 | } |
4719 | 4714 | ||
4720 | if (debug_enabled(DBG_1)) { | 4715 | if (debug_enabled(DBG_1)) { |
4721 | SPRINTF("DCB list for ACB %p:\n", acb); | 4716 | seq_printf(m, "DCB list for ACB %p:\n", acb); |
4722 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4717 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4723 | SPRINTF("%p -> ", dcb); | 4718 | seq_printf(m, "%p -> ", dcb); |
4724 | } | 4719 | } |
4725 | SPRINTF("END\n"); | 4720 | seq_puts(m, "END\n"); |
4726 | } | 4721 | } |
4727 | 4722 | ||
4728 | DC395x_UNLOCK_IO(acb->scsi_host, flags); | 4723 | DC395x_UNLOCK_IO(acb->scsi_host, flags); |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 0bf976936a10..2806cfbec2b9 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -568,7 +568,7 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
568 | seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", | 568 | seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", |
569 | host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); | 569 | host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); |
570 | 570 | ||
571 | seq_printf(m, "Devices:\n"); | 571 | seq_puts(m, "Devices:\n"); |
572 | for(chan = 0; chan < MAX_CHANNEL; chan++) { | 572 | for(chan = 0; chan < MAX_CHANNEL; chan++) { |
573 | for(id = 0; id < MAX_ID; id++) { | 573 | for(id = 0; id < MAX_ID; id++) { |
574 | d = pHba->channel[chan].device[id]; | 574 | d = pHba->channel[chan].device[id]; |
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 8319d2b417b8..ca8003f0d8a3 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -102,7 +102,7 @@ static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
102 | shost->host_no, SD(shost)->name); | 102 | shost->host_no, SD(shost)->name); |
103 | seq_printf(m, "Firmware revision: v%s\n", | 103 | seq_printf(m, "Firmware revision: v%s\n", |
104 | SD(shost)->revision); | 104 | SD(shost)->revision); |
105 | seq_printf(m, "IO: PIO\n"); | 105 | seq_puts(m, "IO: PIO\n"); |
106 | seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base); | 106 | seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base); |
107 | seq_printf(m, "Host Bus: %s\n", | 107 | seq_printf(m, "Host Bus: %s\n", |
108 | (SD(shost)->bustype == 'P')?"PCI ": | 108 | (SD(shost)->bustype == 'P')?"PCI ": |
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 6776931e25d4..78ce4d61a69b 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c | |||
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) | |||
813 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, | 813 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, |
814 | &devcontrol); | 814 | &devcontrol); |
815 | 815 | ||
816 | if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { | 816 | if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > |
817 | PCI_EXP_DEVCTL_READRQ_512B) { | ||
817 | esas2r_log(ESAS2R_LOG_INFO, | 818 | esas2r_log(ESAS2R_LOG_INFO, |
818 | "max read request size > 512B"); | 819 | "max read request size > 512B"); |
819 | 820 | ||
820 | devcontrol &= ~PCI_EXP_DEVCTL_READRQ; | 821 | devcontrol &= ~PCI_EXP_DEVCTL_READRQ; |
821 | devcontrol |= 0x2000; | 822 | devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; |
822 | pci_write_config_word(a->pcid, | 823 | pci_write_config_word(a->pcid, |
823 | pcie_cap_reg + PCI_EXP_DEVCTL, | 824 | pcie_cap_reg + PCI_EXP_DEVCTL, |
824 | devcontrol); | 825 | devcontrol); |
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 7e1c21e6736b..31f8966b2e03 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c | |||
@@ -749,7 +749,7 @@ int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) | |||
749 | if (dev_count == 0) | 749 | if (dev_count == 0) |
750 | seq_puts(m, "none\n"); | 750 | seq_puts(m, "none\n"); |
751 | 751 | ||
752 | seq_puts(m, "\n"); | 752 | seq_putc(m, '\n'); |
753 | return 0; | 753 | return 0; |
754 | 754 | ||
755 | } | 755 | } |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index ce5bd52fe692..065b25df741b 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -2396,8 +2396,6 @@ int scsi_esp_register(struct esp *esp, struct device *dev) | |||
2396 | 2396 | ||
2397 | if (!esp->num_tags) | 2397 | if (!esp->num_tags) |
2398 | esp->num_tags = ESP_DEFAULT_TAGS; | 2398 | esp->num_tags = ESP_DEFAULT_TAGS; |
2399 | else if (esp->num_tags >= ESP_MAX_TAG) | ||
2400 | esp->num_tags = ESP_MAX_TAG - 1; | ||
2401 | esp->host->transportt = esp_transport_template; | 2399 | esp->host->transportt = esp_transport_template; |
2402 | esp->host->max_lun = ESP_MAX_LUN; | 2400 | esp->host->max_lun = ESP_MAX_LUN; |
2403 | esp->host->cmd_per_lun = 2; | 2401 | esp->host->cmd_per_lun = 2; |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 9fb632684863..e66e997992e3 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -173,7 +173,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
173 | /* request is i.e. "cat /proc/scsi/gdth/0" */ | 173 | /* request is i.e. "cat /proc/scsi/gdth/0" */ |
174 | /* format: %-15s\t%-10s\t%-15s\t%s */ | 174 | /* format: %-15s\t%-10s\t%-15s\t%s */ |
175 | /* driver parameters */ | 175 | /* driver parameters */ |
176 | seq_printf(m, "Driver Parameters:\n"); | 176 | seq_puts(m, "Driver Parameters:\n"); |
177 | if (reserve_list[0] == 0xff) | 177 | if (reserve_list[0] == 0xff) |
178 | strcpy(hrec, "--"); | 178 | strcpy(hrec, "--"); |
179 | else { | 179 | else { |
@@ -192,7 +192,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
192 | max_ids, hdr_channel); | 192 | max_ids, hdr_channel); |
193 | 193 | ||
194 | /* controller information */ | 194 | /* controller information */ |
195 | seq_printf(m,"\nDisk Array Controller Information:\n"); | 195 | seq_puts(m, "\nDisk Array Controller Information:\n"); |
196 | seq_printf(m, | 196 | seq_printf(m, |
197 | " Number: \t%d \tName: \t%s\n", | 197 | " Number: \t%d \tName: \t%s\n", |
198 | ha->hanum, ha->binfo.type_string); | 198 | ha->hanum, ha->binfo.type_string); |
@@ -219,7 +219,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
219 | 219 | ||
220 | #ifdef GDTH_DMA_STATISTICS | 220 | #ifdef GDTH_DMA_STATISTICS |
221 | /* controller statistics */ | 221 | /* controller statistics */ |
222 | seq_printf(m,"\nController Statistics:\n"); | 222 | seq_puts(m, "\nController Statistics:\n"); |
223 | seq_printf(m, | 223 | seq_printf(m, |
224 | " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", | 224 | " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", |
225 | ha->dma32_cnt, ha->dma64_cnt); | 225 | ha->dma32_cnt, ha->dma64_cnt); |
@@ -227,7 +227,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
227 | 227 | ||
228 | if (ha->more_proc) { | 228 | if (ha->more_proc) { |
229 | /* more information: 2. about physical devices */ | 229 | /* more information: 2. about physical devices */ |
230 | seq_printf(m, "\nPhysical Devices:"); | 230 | seq_puts(m, "\nPhysical Devices:"); |
231 | flag = FALSE; | 231 | flag = FALSE; |
232 | 232 | ||
233 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 233 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -326,10 +326,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
326 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 326 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
327 | 327 | ||
328 | if (!flag) | 328 | if (!flag) |
329 | seq_printf(m, "\n --\n"); | 329 | seq_puts(m, "\n --\n"); |
330 | 330 | ||
331 | /* 3. about logical drives */ | 331 | /* 3. about logical drives */ |
332 | seq_printf(m,"\nLogical Drives:"); | 332 | seq_puts(m, "\nLogical Drives:"); |
333 | flag = FALSE; | 333 | flag = FALSE; |
334 | 334 | ||
335 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 335 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -411,10 +411,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
411 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 411 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
412 | 412 | ||
413 | if (!flag) | 413 | if (!flag) |
414 | seq_printf(m, "\n --\n"); | 414 | seq_puts(m, "\n --\n"); |
415 | 415 | ||
416 | /* 4. about array drives */ | 416 | /* 4. about array drives */ |
417 | seq_printf(m,"\nArray Drives:"); | 417 | seq_puts(m, "\nArray Drives:"); |
418 | flag = FALSE; | 418 | flag = FALSE; |
419 | 419 | ||
420 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 420 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -471,10 +471,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
471 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 471 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
472 | 472 | ||
473 | if (!flag) | 473 | if (!flag) |
474 | seq_printf(m, "\n --\n"); | 474 | seq_puts(m, "\n --\n"); |
475 | 475 | ||
476 | /* 5. about host drives */ | 476 | /* 5. about host drives */ |
477 | seq_printf(m,"\nHost Drives:"); | 477 | seq_puts(m, "\nHost Drives:"); |
478 | flag = FALSE; | 478 | flag = FALSE; |
479 | 479 | ||
480 | buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); | 480 | buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); |
@@ -527,11 +527,11 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
527 | } | 527 | } |
528 | 528 | ||
529 | if (!flag) | 529 | if (!flag) |
530 | seq_printf(m, "\n --\n"); | 530 | seq_puts(m, "\n --\n"); |
531 | } | 531 | } |
532 | 532 | ||
533 | /* controller events */ | 533 | /* controller events */ |
534 | seq_printf(m,"\nController Events:\n"); | 534 | seq_puts(m, "\nController Events:\n"); |
535 | 535 | ||
536 | for (id = -1;;) { | 536 | for (id = -1;;) { |
537 | id = gdth_read_event(ha, id, estr); | 537 | id = gdth_read_event(ha, id, estr); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 6bb4611b238a..a1cfbd3dda47 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/jiffies.h> | 50 | #include <linux/jiffies.h> |
51 | #include <linux/percpu-defs.h> | 51 | #include <linux/percpu-defs.h> |
52 | #include <linux/percpu.h> | 52 | #include <linux/percpu.h> |
53 | #include <asm/unaligned.h> | ||
53 | #include <asm/div64.h> | 54 | #include <asm/div64.h> |
54 | #include "hpsa_cmd.h" | 55 | #include "hpsa_cmd.h" |
55 | #include "hpsa.h" | 56 | #include "hpsa.h" |
@@ -59,8 +60,11 @@ | |||
59 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 60 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
60 | #define HPSA "hpsa" | 61 | #define HPSA "hpsa" |
61 | 62 | ||
62 | /* How long to wait (in milliseconds) for board to go into simple mode */ | 63 | /* How long to wait for CISS doorbell communication */ |
63 | #define MAX_CONFIG_WAIT 30000 | 64 | #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ |
65 | #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ | ||
66 | #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ | ||
67 | #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ | ||
64 | #define MAX_IOCTL_CONFIG_WAIT 1000 | 68 | #define MAX_IOCTL_CONFIG_WAIT 1000 |
65 | 69 | ||
66 | /*define how many times we will try a command because of bus resets */ | 70 | /*define how many times we will try a command because of bus resets */ |
@@ -164,24 +168,24 @@ static struct board_type products[] = { | |||
164 | {0x1926103C, "Smart Array P731m", &SA5_access}, | 168 | {0x1926103C, "Smart Array P731m", &SA5_access}, |
165 | {0x1928103C, "Smart Array P230i", &SA5_access}, | 169 | {0x1928103C, "Smart Array P230i", &SA5_access}, |
166 | {0x1929103C, "Smart Array P530", &SA5_access}, | 170 | {0x1929103C, "Smart Array P530", &SA5_access}, |
167 | {0x21BD103C, "Smart Array", &SA5_access}, | 171 | {0x21BD103C, "Smart Array P244br", &SA5_access}, |
168 | {0x21BE103C, "Smart Array", &SA5_access}, | 172 | {0x21BE103C, "Smart Array P741m", &SA5_access}, |
169 | {0x21BF103C, "Smart Array", &SA5_access}, | 173 | {0x21BF103C, "Smart HBA H240ar", &SA5_access}, |
170 | {0x21C0103C, "Smart Array", &SA5_access}, | 174 | {0x21C0103C, "Smart Array P440ar", &SA5_access}, |
171 | {0x21C1103C, "Smart Array", &SA5_access}, | 175 | {0x21C1103C, "Smart Array P840ar", &SA5_access}, |
172 | {0x21C2103C, "Smart Array", &SA5_access}, | 176 | {0x21C2103C, "Smart Array P440", &SA5_access}, |
173 | {0x21C3103C, "Smart Array", &SA5_access}, | 177 | {0x21C3103C, "Smart Array P441", &SA5_access}, |
174 | {0x21C4103C, "Smart Array", &SA5_access}, | 178 | {0x21C4103C, "Smart Array", &SA5_access}, |
175 | {0x21C5103C, "Smart Array", &SA5_access}, | 179 | {0x21C5103C, "Smart Array P841", &SA5_access}, |
176 | {0x21C6103C, "Smart Array", &SA5_access}, | 180 | {0x21C6103C, "Smart HBA H244br", &SA5_access}, |
177 | {0x21C7103C, "Smart Array", &SA5_access}, | 181 | {0x21C7103C, "Smart HBA H240", &SA5_access}, |
178 | {0x21C8103C, "Smart Array", &SA5_access}, | 182 | {0x21C8103C, "Smart HBA H241", &SA5_access}, |
179 | {0x21C9103C, "Smart Array", &SA5_access}, | 183 | {0x21C9103C, "Smart Array", &SA5_access}, |
180 | {0x21CA103C, "Smart Array", &SA5_access}, | 184 | {0x21CA103C, "Smart Array P246br", &SA5_access}, |
181 | {0x21CB103C, "Smart Array", &SA5_access}, | 185 | {0x21CB103C, "Smart Array P840", &SA5_access}, |
182 | {0x21CC103C, "Smart Array", &SA5_access}, | 186 | {0x21CC103C, "Smart Array", &SA5_access}, |
183 | {0x21CD103C, "Smart Array", &SA5_access}, | 187 | {0x21CD103C, "Smart Array", &SA5_access}, |
184 | {0x21CE103C, "Smart Array", &SA5_access}, | 188 | {0x21CE103C, "Smart HBA", &SA5_access}, |
185 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, | 189 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
186 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, | 190 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, |
187 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, | 191 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, |
@@ -195,8 +199,6 @@ static int number_of_controllers; | |||
195 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); | 199 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
196 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | 200 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
197 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 201 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
198 | static void lock_and_start_io(struct ctlr_info *h); | ||
199 | static void start_io(struct ctlr_info *h, unsigned long *flags); | ||
200 | 202 | ||
201 | #ifdef CONFIG_COMPAT | 203 | #ifdef CONFIG_COMPAT |
202 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, | 204 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, |
@@ -204,18 +206,18 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, | |||
204 | #endif | 206 | #endif |
205 | 207 | ||
206 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); | 208 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); |
207 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | ||
208 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | 209 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
209 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | ||
210 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | 210 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
211 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, | 211 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
212 | int cmd_type); | 212 | int cmd_type); |
213 | static void hpsa_free_cmd_pool(struct ctlr_info *h); | ||
213 | #define VPD_PAGE (1 << 8) | 214 | #define VPD_PAGE (1 << 8) |
214 | 215 | ||
215 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); | 216 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
216 | static void hpsa_scan_start(struct Scsi_Host *); | 217 | static void hpsa_scan_start(struct Scsi_Host *); |
217 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 218 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
218 | unsigned long elapsed_time); | 219 | unsigned long elapsed_time); |
220 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); | ||
219 | 221 | ||
220 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | 222 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
221 | static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); | 223 | static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); |
@@ -229,7 +231,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, | |||
229 | struct CommandList *c); | 231 | struct CommandList *c); |
230 | /* performant mode helper functions */ | 232 | /* performant mode helper functions */ |
231 | static void calc_bucket_map(int *bucket, int num_buckets, | 233 | static void calc_bucket_map(int *bucket, int num_buckets, |
232 | int nsgs, int min_blocks, int *bucket_map); | 234 | int nsgs, int min_blocks, u32 *bucket_map); |
233 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); | 235 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
234 | static inline u32 next_command(struct ctlr_info *h, u8 q); | 236 | static inline u32 next_command(struct ctlr_info *h, u8 q); |
235 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, | 237 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
@@ -241,14 +243,15 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); | |||
241 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, | 243 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
242 | int wait_for_ready); | 244 | int wait_for_ready); |
243 | static inline void finish_cmd(struct CommandList *c); | 245 | static inline void finish_cmd(struct CommandList *c); |
244 | static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); | 246 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); |
245 | #define BOARD_NOT_READY 0 | 247 | #define BOARD_NOT_READY 0 |
246 | #define BOARD_READY 1 | 248 | #define BOARD_READY 1 |
247 | static void hpsa_drain_accel_commands(struct ctlr_info *h); | 249 | static void hpsa_drain_accel_commands(struct ctlr_info *h); |
248 | static void hpsa_flush_cache(struct ctlr_info *h); | 250 | static void hpsa_flush_cache(struct ctlr_info *h); |
249 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | 251 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
250 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 252 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
251 | u8 *scsi3addr); | 253 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); |
254 | static void hpsa_command_resubmit_worker(struct work_struct *work); | ||
252 | 255 | ||
253 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | 256 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
254 | { | 257 | { |
@@ -505,8 +508,8 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) | |||
505 | return (scsi3addr[3] & 0xC0) == 0x40; | 508 | return (scsi3addr[3] & 0xC0) == 0x40; |
506 | } | 509 | } |
507 | 510 | ||
508 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 511 | static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", |
509 | "1(ADM)", "UNKNOWN" | 512 | "1(+0)ADM", "UNKNOWN" |
510 | }; | 513 | }; |
511 | #define HPSA_RAID_0 0 | 514 | #define HPSA_RAID_0 0 |
512 | #define HPSA_RAID_4 1 | 515 | #define HPSA_RAID_4 1 |
@@ -671,7 +674,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
671 | .queuecommand = hpsa_scsi_queue_command, | 674 | .queuecommand = hpsa_scsi_queue_command, |
672 | .scan_start = hpsa_scan_start, | 675 | .scan_start = hpsa_scan_start, |
673 | .scan_finished = hpsa_scan_finished, | 676 | .scan_finished = hpsa_scan_finished, |
674 | .change_queue_depth = scsi_change_queue_depth, | 677 | .change_queue_depth = hpsa_change_queue_depth, |
675 | .this_id = -1, | 678 | .this_id = -1, |
676 | .use_clustering = ENABLE_CLUSTERING, | 679 | .use_clustering = ENABLE_CLUSTERING, |
677 | .eh_abort_handler = hpsa_eh_abort_handler, | 680 | .eh_abort_handler = hpsa_eh_abort_handler, |
@@ -688,13 +691,6 @@ static struct scsi_host_template hpsa_driver_template = { | |||
688 | .no_write_same = 1, | 691 | .no_write_same = 1, |
689 | }; | 692 | }; |
690 | 693 | ||
691 | |||
692 | /* Enqueuing and dequeuing functions for cmdlists. */ | ||
693 | static inline void addQ(struct list_head *list, struct CommandList *c) | ||
694 | { | ||
695 | list_add_tail(&c->list, list); | ||
696 | } | ||
697 | |||
698 | static inline u32 next_command(struct ctlr_info *h, u8 q) | 694 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
699 | { | 695 | { |
700 | u32 a; | 696 | u32 a; |
@@ -828,31 +824,21 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, | |||
828 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, | 824 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
829 | struct CommandList *c) | 825 | struct CommandList *c) |
830 | { | 826 | { |
831 | unsigned long flags; | 827 | dial_down_lockup_detection_during_fw_flash(h, c); |
832 | 828 | atomic_inc(&h->commands_outstanding); | |
833 | switch (c->cmd_type) { | 829 | switch (c->cmd_type) { |
834 | case CMD_IOACCEL1: | 830 | case CMD_IOACCEL1: |
835 | set_ioaccel1_performant_mode(h, c); | 831 | set_ioaccel1_performant_mode(h, c); |
832 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
836 | break; | 833 | break; |
837 | case CMD_IOACCEL2: | 834 | case CMD_IOACCEL2: |
838 | set_ioaccel2_performant_mode(h, c); | 835 | set_ioaccel2_performant_mode(h, c); |
836 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); | ||
839 | break; | 837 | break; |
840 | default: | 838 | default: |
841 | set_performant_mode(h, c); | 839 | set_performant_mode(h, c); |
840 | h->access.submit_command(h, c); | ||
842 | } | 841 | } |
843 | dial_down_lockup_detection_during_fw_flash(h, c); | ||
844 | spin_lock_irqsave(&h->lock, flags); | ||
845 | addQ(&h->reqQ, c); | ||
846 | h->Qdepth++; | ||
847 | start_io(h, &flags); | ||
848 | spin_unlock_irqrestore(&h->lock, flags); | ||
849 | } | ||
850 | |||
851 | static inline void removeQ(struct CommandList *c) | ||
852 | { | ||
853 | if (WARN_ON(list_empty(&c->list))) | ||
854 | return; | ||
855 | list_del_init(&c->list); | ||
856 | } | 842 | } |
857 | 843 | ||
858 | static inline int is_hba_lunid(unsigned char scsi3addr[]) | 844 | static inline int is_hba_lunid(unsigned char scsi3addr[]) |
@@ -919,7 +905,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |||
919 | 905 | ||
920 | /* If this device a non-zero lun of a multi-lun device | 906 | /* If this device a non-zero lun of a multi-lun device |
921 | * byte 4 of the 8-byte LUN addr will contain the logical | 907 | * byte 4 of the 8-byte LUN addr will contain the logical |
922 | * unit no, zero otherise. | 908 | * unit no, zero otherwise. |
923 | */ | 909 | */ |
924 | if (device->scsi3addr[4] == 0) { | 910 | if (device->scsi3addr[4] == 0) { |
925 | /* This is not a non-zero lun of a multi-lun device */ | 911 | /* This is not a non-zero lun of a multi-lun device */ |
@@ -984,12 +970,24 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, | |||
984 | /* Raid level changed. */ | 970 | /* Raid level changed. */ |
985 | h->dev[entry]->raid_level = new_entry->raid_level; | 971 | h->dev[entry]->raid_level = new_entry->raid_level; |
986 | 972 | ||
987 | /* Raid offload parameters changed. */ | 973 | /* Raid offload parameters changed. Careful about the ordering. */ |
974 | if (new_entry->offload_config && new_entry->offload_enabled) { | ||
975 | /* | ||
976 | * if drive is newly offload_enabled, we want to copy the | ||
977 | * raid map data first. If previously offload_enabled and | ||
978 | * offload_config were set, raid map data had better be | ||
979 | * the same as it was before. if raid map data is changed | ||
980 | * then it had better be the case that | ||
981 | * h->dev[entry]->offload_enabled is currently 0. | ||
982 | */ | ||
983 | h->dev[entry]->raid_map = new_entry->raid_map; | ||
984 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | ||
985 | wmb(); /* ensure raid map updated prior to ->offload_enabled */ | ||
986 | } | ||
988 | h->dev[entry]->offload_config = new_entry->offload_config; | 987 | h->dev[entry]->offload_config = new_entry->offload_config; |
989 | h->dev[entry]->offload_enabled = new_entry->offload_enabled; | ||
990 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | ||
991 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; | 988 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; |
992 | h->dev[entry]->raid_map = new_entry->raid_map; | 989 | h->dev[entry]->offload_enabled = new_entry->offload_enabled; |
990 | h->dev[entry]->queue_depth = new_entry->queue_depth; | ||
993 | 991 | ||
994 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", | 992 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", |
995 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | 993 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, |
@@ -1115,6 +1113,8 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1, | |||
1115 | return 1; | 1113 | return 1; |
1116 | if (dev1->offload_enabled != dev2->offload_enabled) | 1114 | if (dev1->offload_enabled != dev2->offload_enabled) |
1117 | return 1; | 1115 | return 1; |
1116 | if (dev1->queue_depth != dev2->queue_depth) | ||
1117 | return 1; | ||
1118 | return 0; | 1118 | return 0; |
1119 | } | 1119 | } |
1120 | 1120 | ||
@@ -1260,6 +1260,85 @@ static void hpsa_show_volume_status(struct ctlr_info *h, | |||
1260 | } | 1260 | } |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | /* | ||
1264 | * Figure the list of physical drive pointers for a logical drive with | ||
1265 | * raid offload configured. | ||
1266 | */ | ||
1267 | static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, | ||
1268 | struct hpsa_scsi_dev_t *dev[], int ndevices, | ||
1269 | struct hpsa_scsi_dev_t *logical_drive) | ||
1270 | { | ||
1271 | struct raid_map_data *map = &logical_drive->raid_map; | ||
1272 | struct raid_map_disk_data *dd = &map->data[0]; | ||
1273 | int i, j; | ||
1274 | int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + | ||
1275 | le16_to_cpu(map->metadata_disks_per_row); | ||
1276 | int nraid_map_entries = le16_to_cpu(map->row_cnt) * | ||
1277 | le16_to_cpu(map->layout_map_count) * | ||
1278 | total_disks_per_row; | ||
1279 | int nphys_disk = le16_to_cpu(map->layout_map_count) * | ||
1280 | total_disks_per_row; | ||
1281 | int qdepth; | ||
1282 | |||
1283 | if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) | ||
1284 | nraid_map_entries = RAID_MAP_MAX_ENTRIES; | ||
1285 | |||
1286 | qdepth = 0; | ||
1287 | for (i = 0; i < nraid_map_entries; i++) { | ||
1288 | logical_drive->phys_disk[i] = NULL; | ||
1289 | if (!logical_drive->offload_config) | ||
1290 | continue; | ||
1291 | for (j = 0; j < ndevices; j++) { | ||
1292 | if (dev[j]->devtype != TYPE_DISK) | ||
1293 | continue; | ||
1294 | if (is_logical_dev_addr_mode(dev[j]->scsi3addr)) | ||
1295 | continue; | ||
1296 | if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) | ||
1297 | continue; | ||
1298 | |||
1299 | logical_drive->phys_disk[i] = dev[j]; | ||
1300 | if (i < nphys_disk) | ||
1301 | qdepth = min(h->nr_cmds, qdepth + | ||
1302 | logical_drive->phys_disk[i]->queue_depth); | ||
1303 | break; | ||
1304 | } | ||
1305 | |||
1306 | /* | ||
1307 | * This can happen if a physical drive is removed and | ||
1308 | * the logical drive is degraded. In that case, the RAID | ||
1309 | * map data will refer to a physical disk which isn't actually | ||
1310 | * present. And in that case offload_enabled should already | ||
1311 | * be 0, but we'll turn it off here just in case | ||
1312 | */ | ||
1313 | if (!logical_drive->phys_disk[i]) { | ||
1314 | logical_drive->offload_enabled = 0; | ||
1315 | logical_drive->queue_depth = h->nr_cmds; | ||
1316 | } | ||
1317 | } | ||
1318 | if (nraid_map_entries) | ||
1319 | /* | ||
1320 | * This is correct for reads, too high for full stripe writes, | ||
1321 | * way too high for partial stripe writes | ||
1322 | */ | ||
1323 | logical_drive->queue_depth = qdepth; | ||
1324 | else | ||
1325 | logical_drive->queue_depth = h->nr_cmds; | ||
1326 | } | ||
1327 | |||
1328 | static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, | ||
1329 | struct hpsa_scsi_dev_t *dev[], int ndevices) | ||
1330 | { | ||
1331 | int i; | ||
1332 | |||
1333 | for (i = 0; i < ndevices; i++) { | ||
1334 | if (dev[i]->devtype != TYPE_DISK) | ||
1335 | continue; | ||
1336 | if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) | ||
1337 | continue; | ||
1338 | hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1263 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | 1342 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
1264 | struct hpsa_scsi_dev_t *sd[], int nsds) | 1343 | struct hpsa_scsi_dev_t *sd[], int nsds) |
1265 | { | 1344 | { |
@@ -1444,8 +1523,12 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) | |||
1444 | spin_lock_irqsave(&h->devlock, flags); | 1523 | spin_lock_irqsave(&h->devlock, flags); |
1445 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), | 1524 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), |
1446 | sdev_id(sdev), sdev->lun); | 1525 | sdev_id(sdev), sdev->lun); |
1447 | if (sd != NULL) | 1526 | if (sd != NULL) { |
1448 | sdev->hostdata = sd; | 1527 | sdev->hostdata = sd; |
1528 | if (sd->queue_depth) | ||
1529 | scsi_change_queue_depth(sdev, sd->queue_depth); | ||
1530 | atomic_set(&sd->ioaccel_cmds_out, 0); | ||
1531 | } | ||
1449 | spin_unlock_irqrestore(&h->devlock, flags); | 1532 | spin_unlock_irqrestore(&h->devlock, flags); |
1450 | return 0; | 1533 | return 0; |
1451 | } | 1534 | } |
@@ -1478,13 +1561,17 @@ static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) | |||
1478 | 1561 | ||
1479 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, | 1562 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, |
1480 | GFP_KERNEL); | 1563 | GFP_KERNEL); |
1481 | if (!h->cmd_sg_list) | 1564 | if (!h->cmd_sg_list) { |
1565 | dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); | ||
1482 | return -ENOMEM; | 1566 | return -ENOMEM; |
1567 | } | ||
1483 | for (i = 0; i < h->nr_cmds; i++) { | 1568 | for (i = 0; i < h->nr_cmds; i++) { |
1484 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * | 1569 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * |
1485 | h->chainsize, GFP_KERNEL); | 1570 | h->chainsize, GFP_KERNEL); |
1486 | if (!h->cmd_sg_list[i]) | 1571 | if (!h->cmd_sg_list[i]) { |
1572 | dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); | ||
1487 | goto clean; | 1573 | goto clean; |
1574 | } | ||
1488 | } | 1575 | } |
1489 | return 0; | 1576 | return 0; |
1490 | 1577 | ||
@@ -1504,7 +1591,7 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h, | |||
1504 | chain_block = h->cmd_sg_list[c->cmdindex]; | 1591 | chain_block = h->cmd_sg_list[c->cmdindex]; |
1505 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); | 1592 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); |
1506 | chain_len = sizeof(*chain_sg) * | 1593 | chain_len = sizeof(*chain_sg) * |
1507 | (c->Header.SGTotal - h->max_cmd_sg_entries); | 1594 | (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
1508 | chain_sg->Len = cpu_to_le32(chain_len); | 1595 | chain_sg->Len = cpu_to_le32(chain_len); |
1509 | temp64 = pci_map_single(h->pdev, chain_block, chain_len, | 1596 | temp64 = pci_map_single(h->pdev, chain_block, chain_len, |
1510 | PCI_DMA_TODEVICE); | 1597 | PCI_DMA_TODEVICE); |
@@ -1635,7 +1722,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
1635 | struct hpsa_scsi_dev_t *dev) | 1722 | struct hpsa_scsi_dev_t *dev) |
1636 | { | 1723 | { |
1637 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | 1724 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
1638 | int raid_retry = 0; | ||
1639 | 1725 | ||
1640 | /* check for good status */ | 1726 | /* check for good status */ |
1641 | if (likely(c2->error_data.serv_response == 0 && | 1727 | if (likely(c2->error_data.serv_response == 0 && |
@@ -1652,26 +1738,22 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
1652 | if (is_logical_dev_addr_mode(dev->scsi3addr) && | 1738 | if (is_logical_dev_addr_mode(dev->scsi3addr) && |
1653 | c2->error_data.serv_response == | 1739 | c2->error_data.serv_response == |
1654 | IOACCEL2_SERV_RESPONSE_FAILURE) { | 1740 | IOACCEL2_SERV_RESPONSE_FAILURE) { |
1655 | dev->offload_enabled = 0; | 1741 | if (c2->error_data.status == |
1656 | h->drv_req_rescan = 1; /* schedule controller for a rescan */ | 1742 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) |
1657 | cmd->result = DID_SOFT_ERROR << 16; | 1743 | dev->offload_enabled = 0; |
1658 | cmd_free(h, c); | 1744 | goto retry_cmd; |
1659 | cmd->scsi_done(cmd); | ||
1660 | return; | ||
1661 | } | ||
1662 | raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); | ||
1663 | /* If error found, disable Smart Path, schedule a rescan, | ||
1664 | * and force a retry on the standard path. | ||
1665 | */ | ||
1666 | if (raid_retry) { | ||
1667 | dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", | ||
1668 | "HP SSD Smart Path"); | ||
1669 | dev->offload_enabled = 0; /* Disable Smart Path */ | ||
1670 | h->drv_req_rescan = 1; /* schedule controller rescan */ | ||
1671 | cmd->result = DID_SOFT_ERROR << 16; | ||
1672 | } | 1745 | } |
1746 | |||
1747 | if (handle_ioaccel_mode2_error(h, c, cmd, c2)) | ||
1748 | goto retry_cmd; | ||
1749 | |||
1673 | cmd_free(h, c); | 1750 | cmd_free(h, c); |
1674 | cmd->scsi_done(cmd); | 1751 | cmd->scsi_done(cmd); |
1752 | return; | ||
1753 | |||
1754 | retry_cmd: | ||
1755 | INIT_WORK(&c->work, hpsa_command_resubmit_worker); | ||
1756 | queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); | ||
1675 | } | 1757 | } |
1676 | 1758 | ||
1677 | static void complete_scsi_command(struct CommandList *cp) | 1759 | static void complete_scsi_command(struct CommandList *cp) |
@@ -1687,18 +1769,21 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1687 | unsigned long sense_data_size; | 1769 | unsigned long sense_data_size; |
1688 | 1770 | ||
1689 | ei = cp->err_info; | 1771 | ei = cp->err_info; |
1690 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; | 1772 | cmd = cp->scsi_cmd; |
1691 | h = cp->h; | 1773 | h = cp->h; |
1692 | dev = cmd->device->hostdata; | 1774 | dev = cmd->device->hostdata; |
1693 | 1775 | ||
1694 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ | 1776 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ |
1695 | if ((cp->cmd_type == CMD_SCSI) && | 1777 | if ((cp->cmd_type == CMD_SCSI) && |
1696 | (cp->Header.SGTotal > h->max_cmd_sg_entries)) | 1778 | (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) |
1697 | hpsa_unmap_sg_chain_block(h, cp); | 1779 | hpsa_unmap_sg_chain_block(h, cp); |
1698 | 1780 | ||
1699 | cmd->result = (DID_OK << 16); /* host byte */ | 1781 | cmd->result = (DID_OK << 16); /* host byte */ |
1700 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | 1782 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
1701 | 1783 | ||
1784 | if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) | ||
1785 | atomic_dec(&cp->phys_disk->ioaccel_cmds_out); | ||
1786 | |||
1702 | if (cp->cmd_type == CMD_IOACCEL2) | 1787 | if (cp->cmd_type == CMD_IOACCEL2) |
1703 | return process_ioaccel2_completion(h, cp, cmd, dev); | 1788 | return process_ioaccel2_completion(h, cp, cmd, dev); |
1704 | 1789 | ||
@@ -1706,6 +1791,8 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1706 | 1791 | ||
1707 | scsi_set_resid(cmd, ei->ResidualCnt); | 1792 | scsi_set_resid(cmd, ei->ResidualCnt); |
1708 | if (ei->CommandStatus == 0) { | 1793 | if (ei->CommandStatus == 0) { |
1794 | if (cp->cmd_type == CMD_IOACCEL1) | ||
1795 | atomic_dec(&cp->phys_disk->ioaccel_cmds_out); | ||
1709 | cmd_free(h, cp); | 1796 | cmd_free(h, cp); |
1710 | cmd->scsi_done(cmd); | 1797 | cmd->scsi_done(cmd); |
1711 | return; | 1798 | return; |
@@ -1726,8 +1813,10 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1726 | */ | 1813 | */ |
1727 | if (cp->cmd_type == CMD_IOACCEL1) { | 1814 | if (cp->cmd_type == CMD_IOACCEL1) { |
1728 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; | 1815 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; |
1729 | cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); | 1816 | cp->Header.SGList = scsi_sg_count(cmd); |
1730 | cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; | 1817 | cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); |
1818 | cp->Request.CDBLen = le16_to_cpu(c->io_flags) & | ||
1819 | IOACCEL1_IOFLAGS_CDBLEN_MASK; | ||
1731 | cp->Header.tag = c->tag; | 1820 | cp->Header.tag = c->tag; |
1732 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); | 1821 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); |
1733 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); | 1822 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); |
@@ -1739,9 +1828,9 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1739 | if (is_logical_dev_addr_mode(dev->scsi3addr)) { | 1828 | if (is_logical_dev_addr_mode(dev->scsi3addr)) { |
1740 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) | 1829 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) |
1741 | dev->offload_enabled = 0; | 1830 | dev->offload_enabled = 0; |
1742 | cmd->result = DID_SOFT_ERROR << 16; | 1831 | INIT_WORK(&cp->work, hpsa_command_resubmit_worker); |
1743 | cmd_free(h, cp); | 1832 | queue_work_on(raw_smp_processor_id(), |
1744 | cmd->scsi_done(cmd); | 1833 | h->resubmit_wq, &cp->work); |
1745 | return; | 1834 | return; |
1746 | } | 1835 | } |
1747 | } | 1836 | } |
@@ -1798,9 +1887,8 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1798 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | 1887 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
1799 | break; | 1888 | break; |
1800 | case CMD_DATA_OVERRUN: | 1889 | case CMD_DATA_OVERRUN: |
1801 | dev_warn(&h->pdev->dev, "cp %p has" | 1890 | dev_warn(&h->pdev->dev, |
1802 | " completed with data overrun " | 1891 | "CDB %16phN data overrun\n", cp->Request.CDB); |
1803 | "reported\n", cp); | ||
1804 | break; | 1892 | break; |
1805 | case CMD_INVALID: { | 1893 | case CMD_INVALID: { |
1806 | /* print_bytes(cp, sizeof(*cp), 1, 0); | 1894 | /* print_bytes(cp, sizeof(*cp), 1, 0); |
@@ -1816,34 +1904,38 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1816 | break; | 1904 | break; |
1817 | case CMD_PROTOCOL_ERR: | 1905 | case CMD_PROTOCOL_ERR: |
1818 | cmd->result = DID_ERROR << 16; | 1906 | cmd->result = DID_ERROR << 16; |
1819 | dev_warn(&h->pdev->dev, "cp %p has " | 1907 | dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", |
1820 | "protocol error\n", cp); | 1908 | cp->Request.CDB); |
1821 | break; | 1909 | break; |
1822 | case CMD_HARDWARE_ERR: | 1910 | case CMD_HARDWARE_ERR: |
1823 | cmd->result = DID_ERROR << 16; | 1911 | cmd->result = DID_ERROR << 16; |
1824 | dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); | 1912 | dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", |
1913 | cp->Request.CDB); | ||
1825 | break; | 1914 | break; |
1826 | case CMD_CONNECTION_LOST: | 1915 | case CMD_CONNECTION_LOST: |
1827 | cmd->result = DID_ERROR << 16; | 1916 | cmd->result = DID_ERROR << 16; |
1828 | dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); | 1917 | dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", |
1918 | cp->Request.CDB); | ||
1829 | break; | 1919 | break; |
1830 | case CMD_ABORTED: | 1920 | case CMD_ABORTED: |
1831 | cmd->result = DID_ABORT << 16; | 1921 | cmd->result = DID_ABORT << 16; |
1832 | dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", | 1922 | dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", |
1833 | cp, ei->ScsiStatus); | 1923 | cp->Request.CDB, ei->ScsiStatus); |
1834 | break; | 1924 | break; |
1835 | case CMD_ABORT_FAILED: | 1925 | case CMD_ABORT_FAILED: |
1836 | cmd->result = DID_ERROR << 16; | 1926 | cmd->result = DID_ERROR << 16; |
1837 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | 1927 | dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", |
1928 | cp->Request.CDB); | ||
1838 | break; | 1929 | break; |
1839 | case CMD_UNSOLICITED_ABORT: | 1930 | case CMD_UNSOLICITED_ABORT: |
1840 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ | 1931 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ |
1841 | dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " | 1932 | dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", |
1842 | "abort\n", cp); | 1933 | cp->Request.CDB); |
1843 | break; | 1934 | break; |
1844 | case CMD_TIMEOUT: | 1935 | case CMD_TIMEOUT: |
1845 | cmd->result = DID_TIME_OUT << 16; | 1936 | cmd->result = DID_TIME_OUT << 16; |
1846 | dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); | 1937 | dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", |
1938 | cp->Request.CDB); | ||
1847 | break; | 1939 | break; |
1848 | case CMD_UNABORTABLE: | 1940 | case CMD_UNABORTABLE: |
1849 | cmd->result = DID_ERROR << 16; | 1941 | cmd->result = DID_ERROR << 16; |
@@ -2048,10 +2140,10 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2048 | struct CommandList *c; | 2140 | struct CommandList *c; |
2049 | struct ErrorInfo *ei; | 2141 | struct ErrorInfo *ei; |
2050 | 2142 | ||
2051 | c = cmd_special_alloc(h); | 2143 | c = cmd_alloc(h); |
2052 | 2144 | ||
2053 | if (c == NULL) { /* trouble... */ | 2145 | if (c == NULL) { |
2054 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2146 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2055 | return -ENOMEM; | 2147 | return -ENOMEM; |
2056 | } | 2148 | } |
2057 | 2149 | ||
@@ -2067,7 +2159,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2067 | rc = -1; | 2159 | rc = -1; |
2068 | } | 2160 | } |
2069 | out: | 2161 | out: |
2070 | cmd_special_free(h, c); | 2162 | cmd_free(h, c); |
2071 | return rc; | 2163 | return rc; |
2072 | } | 2164 | } |
2073 | 2165 | ||
@@ -2079,10 +2171,9 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, | |||
2079 | struct CommandList *c; | 2171 | struct CommandList *c; |
2080 | struct ErrorInfo *ei; | 2172 | struct ErrorInfo *ei; |
2081 | 2173 | ||
2082 | c = cmd_special_alloc(h); | 2174 | c = cmd_alloc(h); |
2083 | |||
2084 | if (c == NULL) { /* trouble... */ | 2175 | if (c == NULL) { /* trouble... */ |
2085 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2176 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2086 | return -ENOMEM; | 2177 | return -ENOMEM; |
2087 | } | 2178 | } |
2088 | 2179 | ||
@@ -2098,7 +2189,7 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, | |||
2098 | rc = -1; | 2189 | rc = -1; |
2099 | } | 2190 | } |
2100 | out: | 2191 | out: |
2101 | cmd_special_free(h, c); | 2192 | cmd_free(h, c); |
2102 | return rc; | 2193 | return rc; |
2103 | } | 2194 | } |
2104 | 2195 | ||
@@ -2109,10 +2200,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2109 | struct CommandList *c; | 2200 | struct CommandList *c; |
2110 | struct ErrorInfo *ei; | 2201 | struct ErrorInfo *ei; |
2111 | 2202 | ||
2112 | c = cmd_special_alloc(h); | 2203 | c = cmd_alloc(h); |
2113 | 2204 | ||
2114 | if (c == NULL) { /* trouble... */ | 2205 | if (c == NULL) { /* trouble... */ |
2115 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2206 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2116 | return -ENOMEM; | 2207 | return -ENOMEM; |
2117 | } | 2208 | } |
2118 | 2209 | ||
@@ -2128,7 +2219,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2128 | hpsa_scsi_interpret_error(h, c); | 2219 | hpsa_scsi_interpret_error(h, c); |
2129 | rc = -1; | 2220 | rc = -1; |
2130 | } | 2221 | } |
2131 | cmd_special_free(h, c); | 2222 | cmd_free(h, c); |
2132 | return rc; | 2223 | return rc; |
2133 | } | 2224 | } |
2134 | 2225 | ||
@@ -2191,15 +2282,13 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, | |||
2191 | le16_to_cpu(map_buff->row_cnt)); | 2282 | le16_to_cpu(map_buff->row_cnt)); |
2192 | dev_info(&h->pdev->dev, "layout_map_count = %u\n", | 2283 | dev_info(&h->pdev->dev, "layout_map_count = %u\n", |
2193 | le16_to_cpu(map_buff->layout_map_count)); | 2284 | le16_to_cpu(map_buff->layout_map_count)); |
2194 | dev_info(&h->pdev->dev, "flags = %u\n", | 2285 | dev_info(&h->pdev->dev, "flags = 0x%x\n", |
2195 | le16_to_cpu(map_buff->flags)); | 2286 | le16_to_cpu(map_buff->flags)); |
2196 | if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) | 2287 | dev_info(&h->pdev->dev, "encrypytion = %s\n", |
2197 | dev_info(&h->pdev->dev, "encrypytion = ON\n"); | 2288 | le16_to_cpu(map_buff->flags) & |
2198 | else | 2289 | RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); |
2199 | dev_info(&h->pdev->dev, "encrypytion = OFF\n"); | ||
2200 | dev_info(&h->pdev->dev, "dekindex = %u\n", | 2290 | dev_info(&h->pdev->dev, "dekindex = %u\n", |
2201 | le16_to_cpu(map_buff->dekindex)); | 2291 | le16_to_cpu(map_buff->dekindex)); |
2202 | |||
2203 | map_cnt = le16_to_cpu(map_buff->layout_map_count); | 2292 | map_cnt = le16_to_cpu(map_buff->layout_map_count); |
2204 | for (map = 0; map < map_cnt; map++) { | 2293 | for (map = 0; map < map_cnt; map++) { |
2205 | dev_info(&h->pdev->dev, "Map%u:\n", map); | 2294 | dev_info(&h->pdev->dev, "Map%u:\n", map); |
@@ -2238,26 +2327,26 @@ static int hpsa_get_raid_map(struct ctlr_info *h, | |||
2238 | struct CommandList *c; | 2327 | struct CommandList *c; |
2239 | struct ErrorInfo *ei; | 2328 | struct ErrorInfo *ei; |
2240 | 2329 | ||
2241 | c = cmd_special_alloc(h); | 2330 | c = cmd_alloc(h); |
2242 | if (c == NULL) { | 2331 | if (c == NULL) { |
2243 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2332 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2244 | return -ENOMEM; | 2333 | return -ENOMEM; |
2245 | } | 2334 | } |
2246 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, | 2335 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, |
2247 | sizeof(this_device->raid_map), 0, | 2336 | sizeof(this_device->raid_map), 0, |
2248 | scsi3addr, TYPE_CMD)) { | 2337 | scsi3addr, TYPE_CMD)) { |
2249 | dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); | 2338 | dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); |
2250 | cmd_special_free(h, c); | 2339 | cmd_free(h, c); |
2251 | return -ENOMEM; | 2340 | return -ENOMEM; |
2252 | } | 2341 | } |
2253 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | 2342 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
2254 | ei = c->err_info; | 2343 | ei = c->err_info; |
2255 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | 2344 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
2256 | hpsa_scsi_interpret_error(h, c); | 2345 | hpsa_scsi_interpret_error(h, c); |
2257 | cmd_special_free(h, c); | 2346 | cmd_free(h, c); |
2258 | return -1; | 2347 | return -1; |
2259 | } | 2348 | } |
2260 | cmd_special_free(h, c); | 2349 | cmd_free(h, c); |
2261 | 2350 | ||
2262 | /* @todo in the future, dynamically allocate RAID map memory */ | 2351 | /* @todo in the future, dynamically allocate RAID map memory */ |
2263 | if (le32_to_cpu(this_device->raid_map.structure_size) > | 2352 | if (le32_to_cpu(this_device->raid_map.structure_size) > |
@@ -2269,6 +2358,34 @@ static int hpsa_get_raid_map(struct ctlr_info *h, | |||
2269 | return rc; | 2358 | return rc; |
2270 | } | 2359 | } |
2271 | 2360 | ||
2361 | static int hpsa_bmic_id_physical_device(struct ctlr_info *h, | ||
2362 | unsigned char scsi3addr[], u16 bmic_device_index, | ||
2363 | struct bmic_identify_physical_device *buf, size_t bufsize) | ||
2364 | { | ||
2365 | int rc = IO_OK; | ||
2366 | struct CommandList *c; | ||
2367 | struct ErrorInfo *ei; | ||
2368 | |||
2369 | c = cmd_alloc(h); | ||
2370 | rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, | ||
2371 | 0, RAID_CTLR_LUNID, TYPE_CMD); | ||
2372 | if (rc) | ||
2373 | goto out; | ||
2374 | |||
2375 | c->Request.CDB[2] = bmic_device_index & 0xff; | ||
2376 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | ||
2377 | |||
2378 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | ||
2379 | ei = c->err_info; | ||
2380 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | ||
2381 | hpsa_scsi_interpret_error(h, c); | ||
2382 | rc = -1; | ||
2383 | } | ||
2384 | out: | ||
2385 | cmd_free(h, c); | ||
2386 | return rc; | ||
2387 | } | ||
2388 | |||
2272 | static int hpsa_vpd_page_supported(struct ctlr_info *h, | 2389 | static int hpsa_vpd_page_supported(struct ctlr_info *h, |
2273 | unsigned char scsi3addr[], u8 page) | 2390 | unsigned char scsi3addr[], u8 page) |
2274 | { | 2391 | { |
@@ -2369,7 +2486,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2369 | } | 2486 | } |
2370 | 2487 | ||
2371 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | 2488 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, |
2372 | struct ReportLUNdata *buf, int bufsize, | 2489 | void *buf, int bufsize, |
2373 | int extended_response) | 2490 | int extended_response) |
2374 | { | 2491 | { |
2375 | int rc = IO_OK; | 2492 | int rc = IO_OK; |
@@ -2377,9 +2494,9 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
2377 | unsigned char scsi3addr[8]; | 2494 | unsigned char scsi3addr[8]; |
2378 | struct ErrorInfo *ei; | 2495 | struct ErrorInfo *ei; |
2379 | 2496 | ||
2380 | c = cmd_special_alloc(h); | 2497 | c = cmd_alloc(h); |
2381 | if (c == NULL) { /* trouble... */ | 2498 | if (c == NULL) { /* trouble... */ |
2382 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2499 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2383 | return -1; | 2500 | return -1; |
2384 | } | 2501 | } |
2385 | /* address the controller */ | 2502 | /* address the controller */ |
@@ -2398,24 +2515,26 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
2398 | hpsa_scsi_interpret_error(h, c); | 2515 | hpsa_scsi_interpret_error(h, c); |
2399 | rc = -1; | 2516 | rc = -1; |
2400 | } else { | 2517 | } else { |
2401 | if (buf->extended_response_flag != extended_response) { | 2518 | struct ReportLUNdata *rld = buf; |
2519 | |||
2520 | if (rld->extended_response_flag != extended_response) { | ||
2402 | dev_err(&h->pdev->dev, | 2521 | dev_err(&h->pdev->dev, |
2403 | "report luns requested format %u, got %u\n", | 2522 | "report luns requested format %u, got %u\n", |
2404 | extended_response, | 2523 | extended_response, |
2405 | buf->extended_response_flag); | 2524 | rld->extended_response_flag); |
2406 | rc = -1; | 2525 | rc = -1; |
2407 | } | 2526 | } |
2408 | } | 2527 | } |
2409 | out: | 2528 | out: |
2410 | cmd_special_free(h, c); | 2529 | cmd_free(h, c); |
2411 | return rc; | 2530 | return rc; |
2412 | } | 2531 | } |
2413 | 2532 | ||
2414 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | 2533 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
2415 | struct ReportLUNdata *buf, | 2534 | struct ReportExtendedLUNdata *buf, int bufsize) |
2416 | int bufsize, int extended_response) | ||
2417 | { | 2535 | { |
2418 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); | 2536 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, |
2537 | HPSA_REPORT_PHYS_EXTENDED); | ||
2419 | } | 2538 | } |
2420 | 2539 | ||
2421 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, | 2540 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, |
@@ -2590,6 +2709,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
2590 | this_device->offload_config = 0; | 2709 | this_device->offload_config = 0; |
2591 | this_device->offload_enabled = 0; | 2710 | this_device->offload_enabled = 0; |
2592 | this_device->volume_offline = 0; | 2711 | this_device->volume_offline = 0; |
2712 | this_device->queue_depth = h->nr_cmds; | ||
2593 | } | 2713 | } |
2594 | 2714 | ||
2595 | if (is_OBDR_device) { | 2715 | if (is_OBDR_device) { |
@@ -2732,7 +2852,6 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2732 | { | 2852 | { |
2733 | struct ReportExtendedLUNdata *physicals = NULL; | 2853 | struct ReportExtendedLUNdata *physicals = NULL; |
2734 | int responsesize = 24; /* size of physical extended response */ | 2854 | int responsesize = 24; /* size of physical extended response */ |
2735 | int extended = 2; /* flag forces reporting 'other dev info'. */ | ||
2736 | int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; | 2855 | int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; |
2737 | u32 nphysicals = 0; /* number of reported physical devs */ | 2856 | u32 nphysicals = 0; /* number of reported physical devs */ |
2738 | int found = 0; /* found match (1) or not (0) */ | 2857 | int found = 0; /* found match (1) or not (0) */ |
@@ -2741,8 +2860,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2741 | struct scsi_cmnd *scmd; /* scsi command within request being aborted */ | 2860 | struct scsi_cmnd *scmd; /* scsi command within request being aborted */ |
2742 | struct hpsa_scsi_dev_t *d; /* device of request being aborted */ | 2861 | struct hpsa_scsi_dev_t *d; /* device of request being aborted */ |
2743 | struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ | 2862 | struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ |
2744 | u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ | 2863 | __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ |
2745 | u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ | 2864 | __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ |
2746 | 2865 | ||
2747 | if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) | 2866 | if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) |
2748 | return 0; /* no match */ | 2867 | return 0; /* no match */ |
@@ -2761,8 +2880,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2761 | return 0; /* no match */ | 2880 | return 0; /* no match */ |
2762 | 2881 | ||
2763 | it_nexus = cpu_to_le32(d->ioaccel_handle); | 2882 | it_nexus = cpu_to_le32(d->ioaccel_handle); |
2764 | scsi_nexus = cpu_to_le32(c2a->scsi_nexus); | 2883 | scsi_nexus = c2a->scsi_nexus; |
2765 | find = c2a->scsi_nexus; | 2884 | find = le32_to_cpu(c2a->scsi_nexus); |
2766 | 2885 | ||
2767 | if (h->raid_offload_debug > 0) | 2886 | if (h->raid_offload_debug > 0) |
2768 | dev_info(&h->pdev->dev, | 2887 | dev_info(&h->pdev->dev, |
@@ -2779,8 +2898,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2779 | physicals = kzalloc(reportsize, GFP_KERNEL); | 2898 | physicals = kzalloc(reportsize, GFP_KERNEL); |
2780 | if (physicals == NULL) | 2899 | if (physicals == NULL) |
2781 | return 0; | 2900 | return 0; |
2782 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, | 2901 | if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { |
2783 | reportsize, extended)) { | ||
2784 | dev_err(&h->pdev->dev, | 2902 | dev_err(&h->pdev->dev, |
2785 | "Can't lookup %s device handle: report physical LUNs failed.\n", | 2903 | "Can't lookup %s device handle: report physical LUNs failed.\n", |
2786 | "HP SSD Smart Path"); | 2904 | "HP SSD Smart Path"); |
@@ -2821,34 +2939,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2821 | * Returns 0 on success, -1 otherwise. | 2939 | * Returns 0 on success, -1 otherwise. |
2822 | */ | 2940 | */ |
2823 | static int hpsa_gather_lun_info(struct ctlr_info *h, | 2941 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
2824 | int reportphyslunsize, int reportloglunsize, | 2942 | struct ReportExtendedLUNdata *physdev, u32 *nphysicals, |
2825 | struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, | ||
2826 | struct ReportLUNdata *logdev, u32 *nlogicals) | 2943 | struct ReportLUNdata *logdev, u32 *nlogicals) |
2827 | { | 2944 | { |
2828 | int physical_entry_size = 8; | 2945 | if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { |
2829 | |||
2830 | *physical_mode = 0; | ||
2831 | |||
2832 | /* For I/O accelerator mode we need to read physical device handles */ | ||
2833 | if (h->transMethod & CFGTBL_Trans_io_accel1 || | ||
2834 | h->transMethod & CFGTBL_Trans_io_accel2) { | ||
2835 | *physical_mode = HPSA_REPORT_PHYS_EXTENDED; | ||
2836 | physical_entry_size = 24; | ||
2837 | } | ||
2838 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize, | ||
2839 | *physical_mode)) { | ||
2840 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | 2946 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
2841 | return -1; | 2947 | return -1; |
2842 | } | 2948 | } |
2843 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / | 2949 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; |
2844 | physical_entry_size; | ||
2845 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { | 2950 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
2846 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | 2951 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", |
2847 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | 2952 | HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); |
2848 | *nphysicals - HPSA_MAX_PHYS_LUN); | ||
2849 | *nphysicals = HPSA_MAX_PHYS_LUN; | 2953 | *nphysicals = HPSA_MAX_PHYS_LUN; |
2850 | } | 2954 | } |
2851 | if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { | 2955 | if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { |
2852 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | 2956 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
2853 | return -1; | 2957 | return -1; |
2854 | } | 2958 | } |
@@ -2921,6 +3025,33 @@ static int hpsa_hba_mode_enabled(struct ctlr_info *h) | |||
2921 | return hba_mode_enabled; | 3025 | return hba_mode_enabled; |
2922 | } | 3026 | } |
2923 | 3027 | ||
3028 | /* get physical drive ioaccel handle and queue depth */ | ||
3029 | static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, | ||
3030 | struct hpsa_scsi_dev_t *dev, | ||
3031 | u8 *lunaddrbytes, | ||
3032 | struct bmic_identify_physical_device *id_phys) | ||
3033 | { | ||
3034 | int rc; | ||
3035 | struct ext_report_lun_entry *rle = | ||
3036 | (struct ext_report_lun_entry *) lunaddrbytes; | ||
3037 | |||
3038 | dev->ioaccel_handle = rle->ioaccel_handle; | ||
3039 | memset(id_phys, 0, sizeof(*id_phys)); | ||
3040 | rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, | ||
3041 | GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, | ||
3042 | sizeof(*id_phys)); | ||
3043 | if (!rc) | ||
3044 | /* Reserve space for FW operations */ | ||
3045 | #define DRIVE_CMDS_RESERVED_FOR_FW 2 | ||
3046 | #define DRIVE_QUEUE_DEPTH 7 | ||
3047 | dev->queue_depth = | ||
3048 | le16_to_cpu(id_phys->current_queue_depth_limit) - | ||
3049 | DRIVE_CMDS_RESERVED_FOR_FW; | ||
3050 | else | ||
3051 | dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ | ||
3052 | atomic_set(&dev->ioaccel_cmds_out, 0); | ||
3053 | } | ||
3054 | |||
2924 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 3055 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
2925 | { | 3056 | { |
2926 | /* the idea here is we could get notified | 3057 | /* the idea here is we could get notified |
@@ -2935,9 +3066,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2935 | */ | 3066 | */ |
2936 | struct ReportExtendedLUNdata *physdev_list = NULL; | 3067 | struct ReportExtendedLUNdata *physdev_list = NULL; |
2937 | struct ReportLUNdata *logdev_list = NULL; | 3068 | struct ReportLUNdata *logdev_list = NULL; |
3069 | struct bmic_identify_physical_device *id_phys = NULL; | ||
2938 | u32 nphysicals = 0; | 3070 | u32 nphysicals = 0; |
2939 | u32 nlogicals = 0; | 3071 | u32 nlogicals = 0; |
2940 | int physical_mode = 0; | ||
2941 | u32 ndev_allocated = 0; | 3072 | u32 ndev_allocated = 0; |
2942 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; | 3073 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
2943 | int ncurrent = 0; | 3074 | int ncurrent = 0; |
@@ -2950,8 +3081,10 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2950 | physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); | 3081 | physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); |
2951 | logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); | 3082 | logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); |
2952 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | 3083 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
3084 | id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); | ||
2953 | 3085 | ||
2954 | if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { | 3086 | if (!currentsd || !physdev_list || !logdev_list || |
3087 | !tmpdevice || !id_phys) { | ||
2955 | dev_err(&h->pdev->dev, "out of memory\n"); | 3088 | dev_err(&h->pdev->dev, "out of memory\n"); |
2956 | goto out; | 3089 | goto out; |
2957 | } | 3090 | } |
@@ -2968,10 +3101,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2968 | 3101 | ||
2969 | h->hba_mode_enabled = rescan_hba_mode; | 3102 | h->hba_mode_enabled = rescan_hba_mode; |
2970 | 3103 | ||
2971 | if (hpsa_gather_lun_info(h, | 3104 | if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, |
2972 | sizeof(*physdev_list), sizeof(*logdev_list), | 3105 | logdev_list, &nlogicals)) |
2973 | (struct ReportLUNdata *) physdev_list, &nphysicals, | ||
2974 | &physical_mode, logdev_list, &nlogicals)) | ||
2975 | goto out; | 3106 | goto out; |
2976 | 3107 | ||
2977 | /* We might see up to the maximum number of logical and physical disks | 3108 | /* We might see up to the maximum number of logical and physical disks |
@@ -3068,10 +3199,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3068 | ncurrent++; | 3199 | ncurrent++; |
3069 | break; | 3200 | break; |
3070 | } | 3201 | } |
3071 | if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { | 3202 | if (h->transMethod & CFGTBL_Trans_io_accel1 || |
3072 | memcpy(&this_device->ioaccel_handle, | 3203 | h->transMethod & CFGTBL_Trans_io_accel2) { |
3073 | &lunaddrbytes[20], | 3204 | hpsa_get_ioaccel_drive_info(h, this_device, |
3074 | sizeof(this_device->ioaccel_handle)); | 3205 | lunaddrbytes, id_phys); |
3206 | atomic_set(&this_device->ioaccel_cmds_out, 0); | ||
3075 | ncurrent++; | 3207 | ncurrent++; |
3076 | } | 3208 | } |
3077 | break; | 3209 | break; |
@@ -3095,6 +3227,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3095 | if (ncurrent >= HPSA_MAX_DEVICES) | 3227 | if (ncurrent >= HPSA_MAX_DEVICES) |
3096 | break; | 3228 | break; |
3097 | } | 3229 | } |
3230 | hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); | ||
3098 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | 3231 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); |
3099 | out: | 3232 | out: |
3100 | kfree(tmpdevice); | 3233 | kfree(tmpdevice); |
@@ -3103,9 +3236,22 @@ out: | |||
3103 | kfree(currentsd); | 3236 | kfree(currentsd); |
3104 | kfree(physdev_list); | 3237 | kfree(physdev_list); |
3105 | kfree(logdev_list); | 3238 | kfree(logdev_list); |
3239 | kfree(id_phys); | ||
3106 | } | 3240 | } |
3107 | 3241 | ||
3108 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | 3242 | static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, |
3243 | struct scatterlist *sg) | ||
3244 | { | ||
3245 | u64 addr64 = (u64) sg_dma_address(sg); | ||
3246 | unsigned int len = sg_dma_len(sg); | ||
3247 | |||
3248 | desc->Addr = cpu_to_le64(addr64); | ||
3249 | desc->Len = cpu_to_le32(len); | ||
3250 | desc->Ext = 0; | ||
3251 | } | ||
3252 | |||
3253 | /* | ||
3254 | * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | ||
3109 | * dma mapping and fills in the scatter gather entries of the | 3255 | * dma mapping and fills in the scatter gather entries of the |
3110 | * hpsa command, cp. | 3256 | * hpsa command, cp. |
3111 | */ | 3257 | */ |
@@ -3113,9 +3259,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3113 | struct CommandList *cp, | 3259 | struct CommandList *cp, |
3114 | struct scsi_cmnd *cmd) | 3260 | struct scsi_cmnd *cmd) |
3115 | { | 3261 | { |
3116 | unsigned int len; | ||
3117 | struct scatterlist *sg; | 3262 | struct scatterlist *sg; |
3118 | u64 addr64; | ||
3119 | int use_sg, i, sg_index, chained; | 3263 | int use_sg, i, sg_index, chained; |
3120 | struct SGDescriptor *curr_sg; | 3264 | struct SGDescriptor *curr_sg; |
3121 | 3265 | ||
@@ -3138,13 +3282,11 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3138 | curr_sg = h->cmd_sg_list[cp->cmdindex]; | 3282 | curr_sg = h->cmd_sg_list[cp->cmdindex]; |
3139 | sg_index = 0; | 3283 | sg_index = 0; |
3140 | } | 3284 | } |
3141 | addr64 = (u64) sg_dma_address(sg); | 3285 | hpsa_set_sg_descriptor(curr_sg, sg); |
3142 | len = sg_dma_len(sg); | ||
3143 | curr_sg->Addr = cpu_to_le64(addr64); | ||
3144 | curr_sg->Len = cpu_to_le32(len); | ||
3145 | curr_sg->Ext = cpu_to_le32(0); | ||
3146 | curr_sg++; | 3286 | curr_sg++; |
3147 | } | 3287 | } |
3288 | |||
3289 | /* Back the pointer up to the last entry and mark it as "last". */ | ||
3148 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); | 3290 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); |
3149 | 3291 | ||
3150 | if (use_sg + chained > h->maxSG) | 3292 | if (use_sg + chained > h->maxSG) |
@@ -3163,7 +3305,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3163 | sglist_finished: | 3305 | sglist_finished: |
3164 | 3306 | ||
3165 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ | 3307 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
3166 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */ | 3308 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ |
3167 | return 0; | 3309 | return 0; |
3168 | } | 3310 | } |
3169 | 3311 | ||
@@ -3217,7 +3359,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) | |||
3217 | 3359 | ||
3218 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | 3360 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, |
3219 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3361 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3220 | u8 *scsi3addr) | 3362 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3221 | { | 3363 | { |
3222 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3364 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3223 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; | 3365 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
@@ -3230,13 +3372,17 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3230 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; | 3372 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; |
3231 | 3373 | ||
3232 | /* TODO: implement chaining support */ | 3374 | /* TODO: implement chaining support */ |
3233 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) | 3375 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
3376 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3234 | return IO_ACCEL_INELIGIBLE; | 3377 | return IO_ACCEL_INELIGIBLE; |
3378 | } | ||
3235 | 3379 | ||
3236 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); | 3380 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); |
3237 | 3381 | ||
3238 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) | 3382 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
3383 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3239 | return IO_ACCEL_INELIGIBLE; | 3384 | return IO_ACCEL_INELIGIBLE; |
3385 | } | ||
3240 | 3386 | ||
3241 | c->cmd_type = CMD_IOACCEL1; | 3387 | c->cmd_type = CMD_IOACCEL1; |
3242 | 3388 | ||
@@ -3246,8 +3392,10 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3246 | BUG_ON(c->busaddr & 0x0000007F); | 3392 | BUG_ON(c->busaddr & 0x0000007F); |
3247 | 3393 | ||
3248 | use_sg = scsi_dma_map(cmd); | 3394 | use_sg = scsi_dma_map(cmd); |
3249 | if (use_sg < 0) | 3395 | if (use_sg < 0) { |
3396 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3250 | return use_sg; | 3397 | return use_sg; |
3398 | } | ||
3251 | 3399 | ||
3252 | if (use_sg) { | 3400 | if (use_sg) { |
3253 | curr_sg = cp->SG; | 3401 | curr_sg = cp->SG; |
@@ -3284,11 +3432,11 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3284 | 3432 | ||
3285 | c->Header.SGList = use_sg; | 3433 | c->Header.SGList = use_sg; |
3286 | /* Fill out the command structure to submit */ | 3434 | /* Fill out the command structure to submit */ |
3287 | cp->dev_handle = ioaccel_handle & 0xFFFF; | 3435 | cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); |
3288 | cp->transfer_len = total_len; | 3436 | cp->transfer_len = cpu_to_le32(total_len); |
3289 | cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | | 3437 | cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | |
3290 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); | 3438 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); |
3291 | cp->control = control; | 3439 | cp->control = cpu_to_le32(control); |
3292 | memcpy(cp->CDB, cdb, cdb_len); | 3440 | memcpy(cp->CDB, cdb, cdb_len); |
3293 | memcpy(cp->CISS_LUN, scsi3addr, 8); | 3441 | memcpy(cp->CISS_LUN, scsi3addr, 8); |
3294 | /* Tag was already set at init time. */ | 3442 | /* Tag was already set at init time. */ |
@@ -3306,8 +3454,10 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, | |||
3306 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3454 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3307 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | 3455 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
3308 | 3456 | ||
3457 | c->phys_disk = dev; | ||
3458 | |||
3309 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, | 3459 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, |
3310 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr); | 3460 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); |
3311 | } | 3461 | } |
3312 | 3462 | ||
3313 | /* | 3463 | /* |
@@ -3321,10 +3471,8 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h, | |||
3321 | struct raid_map_data *map = &dev->raid_map; | 3471 | struct raid_map_data *map = &dev->raid_map; |
3322 | u64 first_block; | 3472 | u64 first_block; |
3323 | 3473 | ||
3324 | BUG_ON(!(dev->offload_config && dev->offload_enabled)); | ||
3325 | |||
3326 | /* Are we doing encryption on this device */ | 3474 | /* Are we doing encryption on this device */ |
3327 | if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) | 3475 | if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) |
3328 | return; | 3476 | return; |
3329 | /* Set the data encryption key index. */ | 3477 | /* Set the data encryption key index. */ |
3330 | cp->dekindex = map->dekindex; | 3478 | cp->dekindex = map->dekindex; |
@@ -3340,101 +3488,38 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h, | |||
3340 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ | 3488 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ |
3341 | case WRITE_6: | 3489 | case WRITE_6: |
3342 | case READ_6: | 3490 | case READ_6: |
3343 | if (map->volume_blk_size == 512) { | 3491 | first_block = get_unaligned_be16(&cmd->cmnd[2]); |
3344 | cp->tweak_lower = | ||
3345 | (((u32) cmd->cmnd[2]) << 8) | | ||
3346 | cmd->cmnd[3]; | ||
3347 | cp->tweak_upper = 0; | ||
3348 | } else { | ||
3349 | first_block = | ||
3350 | (((u64) cmd->cmnd[2]) << 8) | | ||
3351 | cmd->cmnd[3]; | ||
3352 | first_block = (first_block * map->volume_blk_size)/512; | ||
3353 | cp->tweak_lower = (u32)first_block; | ||
3354 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3355 | } | ||
3356 | break; | 3492 | break; |
3357 | case WRITE_10: | 3493 | case WRITE_10: |
3358 | case READ_10: | 3494 | case READ_10: |
3359 | if (map->volume_blk_size == 512) { | ||
3360 | cp->tweak_lower = | ||
3361 | (((u32) cmd->cmnd[2]) << 24) | | ||
3362 | (((u32) cmd->cmnd[3]) << 16) | | ||
3363 | (((u32) cmd->cmnd[4]) << 8) | | ||
3364 | cmd->cmnd[5]; | ||
3365 | cp->tweak_upper = 0; | ||
3366 | } else { | ||
3367 | first_block = | ||
3368 | (((u64) cmd->cmnd[2]) << 24) | | ||
3369 | (((u64) cmd->cmnd[3]) << 16) | | ||
3370 | (((u64) cmd->cmnd[4]) << 8) | | ||
3371 | cmd->cmnd[5]; | ||
3372 | first_block = (first_block * map->volume_blk_size)/512; | ||
3373 | cp->tweak_lower = (u32)first_block; | ||
3374 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3375 | } | ||
3376 | break; | ||
3377 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ | 3495 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ |
3378 | case WRITE_12: | 3496 | case WRITE_12: |
3379 | case READ_12: | 3497 | case READ_12: |
3380 | if (map->volume_blk_size == 512) { | 3498 | first_block = get_unaligned_be32(&cmd->cmnd[2]); |
3381 | cp->tweak_lower = | ||
3382 | (((u32) cmd->cmnd[2]) << 24) | | ||
3383 | (((u32) cmd->cmnd[3]) << 16) | | ||
3384 | (((u32) cmd->cmnd[4]) << 8) | | ||
3385 | cmd->cmnd[5]; | ||
3386 | cp->tweak_upper = 0; | ||
3387 | } else { | ||
3388 | first_block = | ||
3389 | (((u64) cmd->cmnd[2]) << 24) | | ||
3390 | (((u64) cmd->cmnd[3]) << 16) | | ||
3391 | (((u64) cmd->cmnd[4]) << 8) | | ||
3392 | cmd->cmnd[5]; | ||
3393 | first_block = (first_block * map->volume_blk_size)/512; | ||
3394 | cp->tweak_lower = (u32)first_block; | ||
3395 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3396 | } | ||
3397 | break; | 3499 | break; |
3398 | case WRITE_16: | 3500 | case WRITE_16: |
3399 | case READ_16: | 3501 | case READ_16: |
3400 | if (map->volume_blk_size == 512) { | 3502 | first_block = get_unaligned_be64(&cmd->cmnd[2]); |
3401 | cp->tweak_lower = | ||
3402 | (((u32) cmd->cmnd[6]) << 24) | | ||
3403 | (((u32) cmd->cmnd[7]) << 16) | | ||
3404 | (((u32) cmd->cmnd[8]) << 8) | | ||
3405 | cmd->cmnd[9]; | ||
3406 | cp->tweak_upper = | ||
3407 | (((u32) cmd->cmnd[2]) << 24) | | ||
3408 | (((u32) cmd->cmnd[3]) << 16) | | ||
3409 | (((u32) cmd->cmnd[4]) << 8) | | ||
3410 | cmd->cmnd[5]; | ||
3411 | } else { | ||
3412 | first_block = | ||
3413 | (((u64) cmd->cmnd[2]) << 56) | | ||
3414 | (((u64) cmd->cmnd[3]) << 48) | | ||
3415 | (((u64) cmd->cmnd[4]) << 40) | | ||
3416 | (((u64) cmd->cmnd[5]) << 32) | | ||
3417 | (((u64) cmd->cmnd[6]) << 24) | | ||
3418 | (((u64) cmd->cmnd[7]) << 16) | | ||
3419 | (((u64) cmd->cmnd[8]) << 8) | | ||
3420 | cmd->cmnd[9]; | ||
3421 | first_block = (first_block * map->volume_blk_size)/512; | ||
3422 | cp->tweak_lower = (u32)first_block; | ||
3423 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3424 | } | ||
3425 | break; | 3503 | break; |
3426 | default: | 3504 | default: |
3427 | dev_err(&h->pdev->dev, | 3505 | dev_err(&h->pdev->dev, |
3428 | "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", | 3506 | "ERROR: %s: size (0x%x) not supported for encryption\n", |
3429 | __func__); | 3507 | __func__, cmd->cmnd[0]); |
3430 | BUG(); | 3508 | BUG(); |
3431 | break; | 3509 | break; |
3432 | } | 3510 | } |
3511 | |||
3512 | if (le32_to_cpu(map->volume_blk_size) != 512) | ||
3513 | first_block = first_block * | ||
3514 | le32_to_cpu(map->volume_blk_size)/512; | ||
3515 | |||
3516 | cp->tweak_lower = cpu_to_le32(first_block); | ||
3517 | cp->tweak_upper = cpu_to_le32(first_block >> 32); | ||
3433 | } | 3518 | } |
3434 | 3519 | ||
3435 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | 3520 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, |
3436 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3521 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3437 | u8 *scsi3addr) | 3522 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3438 | { | 3523 | { |
3439 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3524 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3440 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; | 3525 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; |
@@ -3445,11 +3530,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3445 | u32 len; | 3530 | u32 len; |
3446 | u32 total_len = 0; | 3531 | u32 total_len = 0; |
3447 | 3532 | ||
3448 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) | 3533 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
3534 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3449 | return IO_ACCEL_INELIGIBLE; | 3535 | return IO_ACCEL_INELIGIBLE; |
3536 | } | ||
3450 | 3537 | ||
3451 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) | 3538 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
3539 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3452 | return IO_ACCEL_INELIGIBLE; | 3540 | return IO_ACCEL_INELIGIBLE; |
3541 | } | ||
3542 | |||
3453 | c->cmd_type = CMD_IOACCEL2; | 3543 | c->cmd_type = CMD_IOACCEL2; |
3454 | /* Adjust the DMA address to point to the accelerated command buffer */ | 3544 | /* Adjust the DMA address to point to the accelerated command buffer */ |
3455 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + | 3545 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + |
@@ -3460,8 +3550,10 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3460 | cp->IU_type = IOACCEL2_IU_TYPE; | 3550 | cp->IU_type = IOACCEL2_IU_TYPE; |
3461 | 3551 | ||
3462 | use_sg = scsi_dma_map(cmd); | 3552 | use_sg = scsi_dma_map(cmd); |
3463 | if (use_sg < 0) | 3553 | if (use_sg < 0) { |
3554 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3464 | return use_sg; | 3555 | return use_sg; |
3556 | } | ||
3465 | 3557 | ||
3466 | if (use_sg) { | 3558 | if (use_sg) { |
3467 | BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); | 3559 | BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); |
@@ -3506,9 +3598,8 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3506 | /* Set encryption parameters, if necessary */ | 3598 | /* Set encryption parameters, if necessary */ |
3507 | set_encrypt_ioaccel2(h, c, cp); | 3599 | set_encrypt_ioaccel2(h, c, cp); |
3508 | 3600 | ||
3509 | cp->scsi_nexus = ioaccel_handle; | 3601 | cp->scsi_nexus = cpu_to_le32(ioaccel_handle); |
3510 | cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | | 3602 | cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); |
3511 | DIRECT_LOOKUP_BIT; | ||
3512 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); | 3603 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); |
3513 | 3604 | ||
3514 | /* fill in sg elements */ | 3605 | /* fill in sg elements */ |
@@ -3528,14 +3619,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3528 | */ | 3619 | */ |
3529 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | 3620 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
3530 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3621 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3531 | u8 *scsi3addr) | 3622 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3532 | { | 3623 | { |
3624 | /* Try to honor the device's queue depth */ | ||
3625 | if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > | ||
3626 | phys_disk->queue_depth) { | ||
3627 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3628 | return IO_ACCEL_INELIGIBLE; | ||
3629 | } | ||
3533 | if (h->transMethod & CFGTBL_Trans_io_accel1) | 3630 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
3534 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, | 3631 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, |
3535 | cdb, cdb_len, scsi3addr); | 3632 | cdb, cdb_len, scsi3addr, |
3633 | phys_disk); | ||
3536 | else | 3634 | else |
3537 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, | 3635 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, |
3538 | cdb, cdb_len, scsi3addr); | 3636 | cdb, cdb_len, scsi3addr, |
3637 | phys_disk); | ||
3539 | } | 3638 | } |
3540 | 3639 | ||
3541 | static void raid_map_helper(struct raid_map_data *map, | 3640 | static void raid_map_helper(struct raid_map_data *map, |
@@ -3543,21 +3642,22 @@ static void raid_map_helper(struct raid_map_data *map, | |||
3543 | { | 3642 | { |
3544 | if (offload_to_mirror == 0) { | 3643 | if (offload_to_mirror == 0) { |
3545 | /* use physical disk in the first mirrored group. */ | 3644 | /* use physical disk in the first mirrored group. */ |
3546 | *map_index %= map->data_disks_per_row; | 3645 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
3547 | return; | 3646 | return; |
3548 | } | 3647 | } |
3549 | do { | 3648 | do { |
3550 | /* determine mirror group that *map_index indicates */ | 3649 | /* determine mirror group that *map_index indicates */ |
3551 | *current_group = *map_index / map->data_disks_per_row; | 3650 | *current_group = *map_index / |
3651 | le16_to_cpu(map->data_disks_per_row); | ||
3552 | if (offload_to_mirror == *current_group) | 3652 | if (offload_to_mirror == *current_group) |
3553 | continue; | 3653 | continue; |
3554 | if (*current_group < (map->layout_map_count - 1)) { | 3654 | if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { |
3555 | /* select map index from next group */ | 3655 | /* select map index from next group */ |
3556 | *map_index += map->data_disks_per_row; | 3656 | *map_index += le16_to_cpu(map->data_disks_per_row); |
3557 | (*current_group)++; | 3657 | (*current_group)++; |
3558 | } else { | 3658 | } else { |
3559 | /* select map index from first group */ | 3659 | /* select map index from first group */ |
3560 | *map_index %= map->data_disks_per_row; | 3660 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
3561 | *current_group = 0; | 3661 | *current_group = 0; |
3562 | } | 3662 | } |
3563 | } while (offload_to_mirror != *current_group); | 3663 | } while (offload_to_mirror != *current_group); |
@@ -3595,13 +3695,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3595 | u32 disk_block_cnt; | 3695 | u32 disk_block_cnt; |
3596 | u8 cdb[16]; | 3696 | u8 cdb[16]; |
3597 | u8 cdb_len; | 3697 | u8 cdb_len; |
3698 | u16 strip_size; | ||
3598 | #if BITS_PER_LONG == 32 | 3699 | #if BITS_PER_LONG == 32 |
3599 | u64 tmpdiv; | 3700 | u64 tmpdiv; |
3600 | #endif | 3701 | #endif |
3601 | int offload_to_mirror; | 3702 | int offload_to_mirror; |
3602 | 3703 | ||
3603 | BUG_ON(!(dev->offload_config && dev->offload_enabled)); | ||
3604 | |||
3605 | /* check for valid opcode, get LBA and block count */ | 3704 | /* check for valid opcode, get LBA and block count */ |
3606 | switch (cmd->cmnd[0]) { | 3705 | switch (cmd->cmnd[0]) { |
3607 | case WRITE_6: | 3706 | case WRITE_6: |
@@ -3668,11 +3767,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3668 | return IO_ACCEL_INELIGIBLE; | 3767 | return IO_ACCEL_INELIGIBLE; |
3669 | 3768 | ||
3670 | /* check for invalid block or wraparound */ | 3769 | /* check for invalid block or wraparound */ |
3671 | if (last_block >= map->volume_blk_cnt || last_block < first_block) | 3770 | if (last_block >= le64_to_cpu(map->volume_blk_cnt) || |
3771 | last_block < first_block) | ||
3672 | return IO_ACCEL_INELIGIBLE; | 3772 | return IO_ACCEL_INELIGIBLE; |
3673 | 3773 | ||
3674 | /* calculate stripe information for the request */ | 3774 | /* calculate stripe information for the request */ |
3675 | blocks_per_row = map->data_disks_per_row * map->strip_size; | 3775 | blocks_per_row = le16_to_cpu(map->data_disks_per_row) * |
3776 | le16_to_cpu(map->strip_size); | ||
3777 | strip_size = le16_to_cpu(map->strip_size); | ||
3676 | #if BITS_PER_LONG == 32 | 3778 | #if BITS_PER_LONG == 32 |
3677 | tmpdiv = first_block; | 3779 | tmpdiv = first_block; |
3678 | (void) do_div(tmpdiv, blocks_per_row); | 3780 | (void) do_div(tmpdiv, blocks_per_row); |
@@ -3683,18 +3785,18 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3683 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | 3785 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
3684 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | 3786 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
3685 | tmpdiv = first_row_offset; | 3787 | tmpdiv = first_row_offset; |
3686 | (void) do_div(tmpdiv, map->strip_size); | 3788 | (void) do_div(tmpdiv, strip_size); |
3687 | first_column = tmpdiv; | 3789 | first_column = tmpdiv; |
3688 | tmpdiv = last_row_offset; | 3790 | tmpdiv = last_row_offset; |
3689 | (void) do_div(tmpdiv, map->strip_size); | 3791 | (void) do_div(tmpdiv, strip_size); |
3690 | last_column = tmpdiv; | 3792 | last_column = tmpdiv; |
3691 | #else | 3793 | #else |
3692 | first_row = first_block / blocks_per_row; | 3794 | first_row = first_block / blocks_per_row; |
3693 | last_row = last_block / blocks_per_row; | 3795 | last_row = last_block / blocks_per_row; |
3694 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | 3796 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
3695 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | 3797 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
3696 | first_column = first_row_offset / map->strip_size; | 3798 | first_column = first_row_offset / strip_size; |
3697 | last_column = last_row_offset / map->strip_size; | 3799 | last_column = last_row_offset / strip_size; |
3698 | #endif | 3800 | #endif |
3699 | 3801 | ||
3700 | /* if this isn't a single row/column then give to the controller */ | 3802 | /* if this isn't a single row/column then give to the controller */ |
@@ -3702,10 +3804,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3702 | return IO_ACCEL_INELIGIBLE; | 3804 | return IO_ACCEL_INELIGIBLE; |
3703 | 3805 | ||
3704 | /* proceeding with driver mapping */ | 3806 | /* proceeding with driver mapping */ |
3705 | total_disks_per_row = map->data_disks_per_row + | 3807 | total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
3706 | map->metadata_disks_per_row; | 3808 | le16_to_cpu(map->metadata_disks_per_row); |
3707 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % | 3809 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
3708 | map->row_cnt; | 3810 | le16_to_cpu(map->row_cnt); |
3709 | map_index = (map_row * total_disks_per_row) + first_column; | 3811 | map_index = (map_row * total_disks_per_row) + first_column; |
3710 | 3812 | ||
3711 | switch (dev->raid_level) { | 3813 | switch (dev->raid_level) { |
@@ -3716,23 +3818,24 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3716 | * (2-drive R1 and R10 with even # of drives.) | 3818 | * (2-drive R1 and R10 with even # of drives.) |
3717 | * Appropriate for SSDs, not optimal for HDDs | 3819 | * Appropriate for SSDs, not optimal for HDDs |
3718 | */ | 3820 | */ |
3719 | BUG_ON(map->layout_map_count != 2); | 3821 | BUG_ON(le16_to_cpu(map->layout_map_count) != 2); |
3720 | if (dev->offload_to_mirror) | 3822 | if (dev->offload_to_mirror) |
3721 | map_index += map->data_disks_per_row; | 3823 | map_index += le16_to_cpu(map->data_disks_per_row); |
3722 | dev->offload_to_mirror = !dev->offload_to_mirror; | 3824 | dev->offload_to_mirror = !dev->offload_to_mirror; |
3723 | break; | 3825 | break; |
3724 | case HPSA_RAID_ADM: | 3826 | case HPSA_RAID_ADM: |
3725 | /* Handles N-way mirrors (R1-ADM) | 3827 | /* Handles N-way mirrors (R1-ADM) |
3726 | * and R10 with # of drives divisible by 3.) | 3828 | * and R10 with # of drives divisible by 3.) |
3727 | */ | 3829 | */ |
3728 | BUG_ON(map->layout_map_count != 3); | 3830 | BUG_ON(le16_to_cpu(map->layout_map_count) != 3); |
3729 | 3831 | ||
3730 | offload_to_mirror = dev->offload_to_mirror; | 3832 | offload_to_mirror = dev->offload_to_mirror; |
3731 | raid_map_helper(map, offload_to_mirror, | 3833 | raid_map_helper(map, offload_to_mirror, |
3732 | &map_index, ¤t_group); | 3834 | &map_index, ¤t_group); |
3733 | /* set mirror group to use next time */ | 3835 | /* set mirror group to use next time */ |
3734 | offload_to_mirror = | 3836 | offload_to_mirror = |
3735 | (offload_to_mirror >= map->layout_map_count - 1) | 3837 | (offload_to_mirror >= |
3838 | le16_to_cpu(map->layout_map_count) - 1) | ||
3736 | ? 0 : offload_to_mirror + 1; | 3839 | ? 0 : offload_to_mirror + 1; |
3737 | dev->offload_to_mirror = offload_to_mirror; | 3840 | dev->offload_to_mirror = offload_to_mirror; |
3738 | /* Avoid direct use of dev->offload_to_mirror within this | 3841 | /* Avoid direct use of dev->offload_to_mirror within this |
@@ -3742,14 +3845,16 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3742 | break; | 3845 | break; |
3743 | case HPSA_RAID_5: | 3846 | case HPSA_RAID_5: |
3744 | case HPSA_RAID_6: | 3847 | case HPSA_RAID_6: |
3745 | if (map->layout_map_count <= 1) | 3848 | if (le16_to_cpu(map->layout_map_count) <= 1) |
3746 | break; | 3849 | break; |
3747 | 3850 | ||
3748 | /* Verify first and last block are in same RAID group */ | 3851 | /* Verify first and last block are in same RAID group */ |
3749 | r5or6_blocks_per_row = | 3852 | r5or6_blocks_per_row = |
3750 | map->strip_size * map->data_disks_per_row; | 3853 | le16_to_cpu(map->strip_size) * |
3854 | le16_to_cpu(map->data_disks_per_row); | ||
3751 | BUG_ON(r5or6_blocks_per_row == 0); | 3855 | BUG_ON(r5or6_blocks_per_row == 0); |
3752 | stripesize = r5or6_blocks_per_row * map->layout_map_count; | 3856 | stripesize = r5or6_blocks_per_row * |
3857 | le16_to_cpu(map->layout_map_count); | ||
3753 | #if BITS_PER_LONG == 32 | 3858 | #if BITS_PER_LONG == 32 |
3754 | tmpdiv = first_block; | 3859 | tmpdiv = first_block; |
3755 | first_group = do_div(tmpdiv, stripesize); | 3860 | first_group = do_div(tmpdiv, stripesize); |
@@ -3812,28 +3917,35 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3812 | r5or6_blocks_per_row); | 3917 | r5or6_blocks_per_row); |
3813 | 3918 | ||
3814 | first_column = r5or6_first_column = | 3919 | first_column = r5or6_first_column = |
3815 | r5or6_first_row_offset / map->strip_size; | 3920 | r5or6_first_row_offset / le16_to_cpu(map->strip_size); |
3816 | r5or6_last_column = | 3921 | r5or6_last_column = |
3817 | r5or6_last_row_offset / map->strip_size; | 3922 | r5or6_last_row_offset / le16_to_cpu(map->strip_size); |
3818 | #endif | 3923 | #endif |
3819 | if (r5or6_first_column != r5or6_last_column) | 3924 | if (r5or6_first_column != r5or6_last_column) |
3820 | return IO_ACCEL_INELIGIBLE; | 3925 | return IO_ACCEL_INELIGIBLE; |
3821 | 3926 | ||
3822 | /* Request is eligible */ | 3927 | /* Request is eligible */ |
3823 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % | 3928 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
3824 | map->row_cnt; | 3929 | le16_to_cpu(map->row_cnt); |
3825 | 3930 | ||
3826 | map_index = (first_group * | 3931 | map_index = (first_group * |
3827 | (map->row_cnt * total_disks_per_row)) + | 3932 | (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + |
3828 | (map_row * total_disks_per_row) + first_column; | 3933 | (map_row * total_disks_per_row) + first_column; |
3829 | break; | 3934 | break; |
3830 | default: | 3935 | default: |
3831 | return IO_ACCEL_INELIGIBLE; | 3936 | return IO_ACCEL_INELIGIBLE; |
3832 | } | 3937 | } |
3833 | 3938 | ||
3939 | if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) | ||
3940 | return IO_ACCEL_INELIGIBLE; | ||
3941 | |||
3942 | c->phys_disk = dev->phys_disk[map_index]; | ||
3943 | |||
3834 | disk_handle = dd[map_index].ioaccel_handle; | 3944 | disk_handle = dd[map_index].ioaccel_handle; |
3835 | disk_block = map->disk_starting_blk + (first_row * map->strip_size) + | 3945 | disk_block = le64_to_cpu(map->disk_starting_blk) + |
3836 | (first_row_offset - (first_column * map->strip_size)); | 3946 | first_row * le16_to_cpu(map->strip_size) + |
3947 | (first_row_offset - first_column * | ||
3948 | le16_to_cpu(map->strip_size)); | ||
3837 | disk_block_cnt = block_cnt; | 3949 | disk_block_cnt = block_cnt; |
3838 | 3950 | ||
3839 | /* handle differing logical/physical block sizes */ | 3951 | /* handle differing logical/physical block sizes */ |
@@ -3876,78 +3988,21 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3876 | cdb_len = 10; | 3988 | cdb_len = 10; |
3877 | } | 3989 | } |
3878 | return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, | 3990 | return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, |
3879 | dev->scsi3addr); | 3991 | dev->scsi3addr, |
3992 | dev->phys_disk[map_index]); | ||
3880 | } | 3993 | } |
3881 | 3994 | ||
3882 | /* | 3995 | /* Submit commands down the "normal" RAID stack path */ |
3883 | * Running in struct Scsi_Host->host_lock less mode using LLD internal | 3996 | static int hpsa_ciss_submit(struct ctlr_info *h, |
3884 | * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection. | 3997 | struct CommandList *c, struct scsi_cmnd *cmd, |
3885 | */ | 3998 | unsigned char scsi3addr[]) |
3886 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | ||
3887 | { | 3999 | { |
3888 | struct ctlr_info *h; | ||
3889 | struct hpsa_scsi_dev_t *dev; | ||
3890 | unsigned char scsi3addr[8]; | ||
3891 | struct CommandList *c; | ||
3892 | int rc = 0; | ||
3893 | |||
3894 | /* Get the ptr to our adapter structure out of cmd->host. */ | ||
3895 | h = sdev_to_hba(cmd->device); | ||
3896 | dev = cmd->device->hostdata; | ||
3897 | if (!dev) { | ||
3898 | cmd->result = DID_NO_CONNECT << 16; | ||
3899 | cmd->scsi_done(cmd); | ||
3900 | return 0; | ||
3901 | } | ||
3902 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | ||
3903 | |||
3904 | if (unlikely(lockup_detected(h))) { | ||
3905 | cmd->result = DID_ERROR << 16; | ||
3906 | cmd->scsi_done(cmd); | ||
3907 | return 0; | ||
3908 | } | ||
3909 | c = cmd_alloc(h); | ||
3910 | if (c == NULL) { /* trouble... */ | ||
3911 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | ||
3912 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3913 | } | ||
3914 | |||
3915 | /* Fill in the command list header */ | ||
3916 | /* save c in case we have to abort it */ | ||
3917 | cmd->host_scribble = (unsigned char *) c; | 4000 | cmd->host_scribble = (unsigned char *) c; |
3918 | |||
3919 | c->cmd_type = CMD_SCSI; | 4001 | c->cmd_type = CMD_SCSI; |
3920 | c->scsi_cmd = cmd; | 4002 | c->scsi_cmd = cmd; |
3921 | |||
3922 | /* Call alternate submit routine for I/O accelerated commands. | ||
3923 | * Retries always go down the normal I/O path. | ||
3924 | */ | ||
3925 | if (likely(cmd->retries == 0 && | ||
3926 | cmd->request->cmd_type == REQ_TYPE_FS && | ||
3927 | h->acciopath_status)) { | ||
3928 | if (dev->offload_enabled) { | ||
3929 | rc = hpsa_scsi_ioaccel_raid_map(h, c); | ||
3930 | if (rc == 0) | ||
3931 | return 0; /* Sent on ioaccel path */ | ||
3932 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
3933 | cmd_free(h, c); | ||
3934 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3935 | } | ||
3936 | } else if (dev->ioaccel_handle) { | ||
3937 | rc = hpsa_scsi_ioaccel_direct_map(h, c); | ||
3938 | if (rc == 0) | ||
3939 | return 0; /* Sent on direct map path */ | ||
3940 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
3941 | cmd_free(h, c); | ||
3942 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3943 | } | ||
3944 | } | ||
3945 | } | ||
3946 | |||
3947 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | 4003 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
3948 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 4004 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
3949 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) | | 4005 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
3950 | DIRECT_LOOKUP_BIT); | ||
3951 | 4006 | ||
3952 | /* Fill in the request block... */ | 4007 | /* Fill in the request block... */ |
3953 | 4008 | ||
@@ -4003,25 +4058,108 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
4003 | return 0; | 4058 | return 0; |
4004 | } | 4059 | } |
4005 | 4060 | ||
4006 | static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) | 4061 | static void hpsa_command_resubmit_worker(struct work_struct *work) |
4007 | { | 4062 | { |
4008 | unsigned long flags; | 4063 | struct scsi_cmnd *cmd; |
4064 | struct hpsa_scsi_dev_t *dev; | ||
4065 | struct CommandList *c = | ||
4066 | container_of(work, struct CommandList, work); | ||
4067 | |||
4068 | cmd = c->scsi_cmd; | ||
4069 | dev = cmd->device->hostdata; | ||
4070 | if (!dev) { | ||
4071 | cmd->result = DID_NO_CONNECT << 16; | ||
4072 | cmd->scsi_done(cmd); | ||
4073 | return; | ||
4074 | } | ||
4075 | if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { | ||
4076 | /* | ||
4077 | * If we get here, it means dma mapping failed. Try | ||
4078 | * again via scsi mid layer, which will then get | ||
4079 | * SCSI_MLQUEUE_HOST_BUSY. | ||
4080 | */ | ||
4081 | cmd->result = DID_IMM_RETRY << 16; | ||
4082 | cmd->scsi_done(cmd); | ||
4083 | } | ||
4084 | } | ||
4085 | |||
4086 | /* Running in struct Scsi_Host->host_lock less mode */ | ||
4087 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | ||
4088 | { | ||
4089 | struct ctlr_info *h; | ||
4090 | struct hpsa_scsi_dev_t *dev; | ||
4091 | unsigned char scsi3addr[8]; | ||
4092 | struct CommandList *c; | ||
4093 | int rc = 0; | ||
4094 | |||
4095 | /* Get the ptr to our adapter structure out of cmd->host. */ | ||
4096 | h = sdev_to_hba(cmd->device); | ||
4097 | dev = cmd->device->hostdata; | ||
4098 | if (!dev) { | ||
4099 | cmd->result = DID_NO_CONNECT << 16; | ||
4100 | cmd->scsi_done(cmd); | ||
4101 | return 0; | ||
4102 | } | ||
4103 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | ||
4104 | |||
4105 | if (unlikely(lockup_detected(h))) { | ||
4106 | cmd->result = DID_ERROR << 16; | ||
4107 | cmd->scsi_done(cmd); | ||
4108 | return 0; | ||
4109 | } | ||
4110 | c = cmd_alloc(h); | ||
4111 | if (c == NULL) { /* trouble... */ | ||
4112 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | ||
4113 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4114 | } | ||
4115 | if (unlikely(lockup_detected(h))) { | ||
4116 | cmd->result = DID_ERROR << 16; | ||
4117 | cmd_free(h, c); | ||
4118 | cmd->scsi_done(cmd); | ||
4119 | return 0; | ||
4120 | } | ||
4009 | 4121 | ||
4010 | /* | 4122 | /* |
4011 | * Don't let rescans be initiated on a controller known | 4123 | * Call alternate submit routine for I/O accelerated commands. |
4012 | * to be locked up. If the controller locks up *during* | 4124 | * Retries always go down the normal I/O path. |
4013 | * a rescan, that thread is probably hosed, but at least | ||
4014 | * we can prevent new rescan threads from piling up on a | ||
4015 | * locked up controller. | ||
4016 | */ | 4125 | */ |
4017 | if (unlikely(lockup_detected(h))) { | 4126 | if (likely(cmd->retries == 0 && |
4018 | spin_lock_irqsave(&h->scan_lock, flags); | 4127 | cmd->request->cmd_type == REQ_TYPE_FS && |
4019 | h->scan_finished = 1; | 4128 | h->acciopath_status)) { |
4020 | wake_up_all(&h->scan_wait_queue); | 4129 | |
4021 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4130 | cmd->host_scribble = (unsigned char *) c; |
4022 | return 1; | 4131 | c->cmd_type = CMD_SCSI; |
4132 | c->scsi_cmd = cmd; | ||
4133 | |||
4134 | if (dev->offload_enabled) { | ||
4135 | rc = hpsa_scsi_ioaccel_raid_map(h, c); | ||
4136 | if (rc == 0) | ||
4137 | return 0; /* Sent on ioaccel path */ | ||
4138 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
4139 | cmd_free(h, c); | ||
4140 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4141 | } | ||
4142 | } else if (dev->ioaccel_handle) { | ||
4143 | rc = hpsa_scsi_ioaccel_direct_map(h, c); | ||
4144 | if (rc == 0) | ||
4145 | return 0; /* Sent on direct map path */ | ||
4146 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
4147 | cmd_free(h, c); | ||
4148 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4149 | } | ||
4150 | } | ||
4023 | } | 4151 | } |
4024 | return 0; | 4152 | return hpsa_ciss_submit(h, c, cmd, scsi3addr); |
4153 | } | ||
4154 | |||
4155 | static void hpsa_scan_complete(struct ctlr_info *h) | ||
4156 | { | ||
4157 | unsigned long flags; | ||
4158 | |||
4159 | spin_lock_irqsave(&h->scan_lock, flags); | ||
4160 | h->scan_finished = 1; | ||
4161 | wake_up_all(&h->scan_wait_queue); | ||
4162 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
4025 | } | 4163 | } |
4026 | 4164 | ||
4027 | static void hpsa_scan_start(struct Scsi_Host *sh) | 4165 | static void hpsa_scan_start(struct Scsi_Host *sh) |
@@ -4029,8 +4167,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
4029 | struct ctlr_info *h = shost_to_hba(sh); | 4167 | struct ctlr_info *h = shost_to_hba(sh); |
4030 | unsigned long flags; | 4168 | unsigned long flags; |
4031 | 4169 | ||
4032 | if (do_not_scan_if_controller_locked_up(h)) | 4170 | /* |
4033 | return; | 4171 | * Don't let rescans be initiated on a controller known to be locked |
4172 | * up. If the controller locks up *during* a rescan, that thread is | ||
4173 | * probably hosed, but at least we can prevent new rescan threads from | ||
4174 | * piling up on a locked up controller. | ||
4175 | */ | ||
4176 | if (unlikely(lockup_detected(h))) | ||
4177 | return hpsa_scan_complete(h); | ||
4034 | 4178 | ||
4035 | /* wait until any scan already in progress is finished. */ | 4179 | /* wait until any scan already in progress is finished. */ |
4036 | while (1) { | 4180 | while (1) { |
@@ -4048,15 +4192,27 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
4048 | h->scan_finished = 0; /* mark scan as in progress */ | 4192 | h->scan_finished = 0; /* mark scan as in progress */ |
4049 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4193 | spin_unlock_irqrestore(&h->scan_lock, flags); |
4050 | 4194 | ||
4051 | if (do_not_scan_if_controller_locked_up(h)) | 4195 | if (unlikely(lockup_detected(h))) |
4052 | return; | 4196 | return hpsa_scan_complete(h); |
4053 | 4197 | ||
4054 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | 4198 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); |
4055 | 4199 | ||
4056 | spin_lock_irqsave(&h->scan_lock, flags); | 4200 | hpsa_scan_complete(h); |
4057 | h->scan_finished = 1; /* mark scan as finished. */ | 4201 | } |
4058 | wake_up_all(&h->scan_wait_queue); | 4202 | |
4059 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4203 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) |
4204 | { | ||
4205 | struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; | ||
4206 | |||
4207 | if (!logical_drive) | ||
4208 | return -ENODEV; | ||
4209 | |||
4210 | if (qdepth < 1) | ||
4211 | qdepth = 1; | ||
4212 | else if (qdepth > logical_drive->queue_depth) | ||
4213 | qdepth = logical_drive->queue_depth; | ||
4214 | |||
4215 | return scsi_change_queue_depth(sdev, qdepth); | ||
4060 | } | 4216 | } |
4061 | 4217 | ||
4062 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 4218 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
@@ -4096,11 +4252,11 @@ static int hpsa_register_scsi(struct ctlr_info *h) | |||
4096 | sh->max_cmd_len = MAX_COMMAND_SIZE; | 4252 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
4097 | sh->max_lun = HPSA_MAX_LUN; | 4253 | sh->max_lun = HPSA_MAX_LUN; |
4098 | sh->max_id = HPSA_MAX_LUN; | 4254 | sh->max_id = HPSA_MAX_LUN; |
4099 | sh->can_queue = h->nr_cmds; | 4255 | sh->can_queue = h->nr_cmds - |
4100 | if (h->hba_mode_enabled) | 4256 | HPSA_CMDS_RESERVED_FOR_ABORTS - |
4101 | sh->cmd_per_lun = 7; | 4257 | HPSA_CMDS_RESERVED_FOR_DRIVER - |
4102 | else | 4258 | HPSA_MAX_CONCURRENT_PASSTHRUS; |
4103 | sh->cmd_per_lun = h->nr_cmds; | 4259 | sh->cmd_per_lun = sh->can_queue; |
4104 | sh->sg_tablesize = h->maxsgentries; | 4260 | sh->sg_tablesize = h->maxsgentries; |
4105 | h->scsi_host = sh; | 4261 | h->scsi_host = sh; |
4106 | sh->hostdata[0] = (unsigned long) h; | 4262 | sh->hostdata[0] = (unsigned long) h; |
@@ -4131,7 +4287,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
4131 | int waittime = 1; /* seconds */ | 4287 | int waittime = 1; /* seconds */ |
4132 | struct CommandList *c; | 4288 | struct CommandList *c; |
4133 | 4289 | ||
4134 | c = cmd_special_alloc(h); | 4290 | c = cmd_alloc(h); |
4135 | if (!c) { | 4291 | if (!c) { |
4136 | dev_warn(&h->pdev->dev, "out of memory in " | 4292 | dev_warn(&h->pdev->dev, "out of memory in " |
4137 | "wait_for_device_to_become_ready.\n"); | 4293 | "wait_for_device_to_become_ready.\n"); |
@@ -4177,7 +4333,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
4177 | else | 4333 | else |
4178 | dev_warn(&h->pdev->dev, "device is ready.\n"); | 4334 | dev_warn(&h->pdev->dev, "device is ready.\n"); |
4179 | 4335 | ||
4180 | cmd_special_free(h, c); | 4336 | cmd_free(h, c); |
4181 | return rc; | 4337 | return rc; |
4182 | } | 4338 | } |
4183 | 4339 | ||
@@ -4194,6 +4350,10 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
4194 | h = sdev_to_hba(scsicmd->device); | 4350 | h = sdev_to_hba(scsicmd->device); |
4195 | if (h == NULL) /* paranoia */ | 4351 | if (h == NULL) /* paranoia */ |
4196 | return FAILED; | 4352 | return FAILED; |
4353 | |||
4354 | if (lockup_detected(h)) | ||
4355 | return FAILED; | ||
4356 | |||
4197 | dev = scsicmd->device->hostdata; | 4357 | dev = scsicmd->device->hostdata; |
4198 | if (!dev) { | 4358 | if (!dev) { |
4199 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | 4359 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " |
@@ -4227,13 +4387,15 @@ static void swizzle_abort_tag(u8 *tag) | |||
4227 | } | 4387 | } |
4228 | 4388 | ||
4229 | static void hpsa_get_tag(struct ctlr_info *h, | 4389 | static void hpsa_get_tag(struct ctlr_info *h, |
4230 | struct CommandList *c, u32 *taglower, u32 *tagupper) | 4390 | struct CommandList *c, __le32 *taglower, __le32 *tagupper) |
4231 | { | 4391 | { |
4392 | u64 tag; | ||
4232 | if (c->cmd_type == CMD_IOACCEL1) { | 4393 | if (c->cmd_type == CMD_IOACCEL1) { |
4233 | struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) | 4394 | struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) |
4234 | &h->ioaccel_cmd_pool[c->cmdindex]; | 4395 | &h->ioaccel_cmd_pool[c->cmdindex]; |
4235 | *tagupper = (u32) (cm1->tag >> 32); | 4396 | tag = le64_to_cpu(cm1->tag); |
4236 | *taglower = (u32) (cm1->tag & 0x0ffffffffULL); | 4397 | *tagupper = cpu_to_le32(tag >> 32); |
4398 | *taglower = cpu_to_le32(tag); | ||
4237 | return; | 4399 | return; |
4238 | } | 4400 | } |
4239 | if (c->cmd_type == CMD_IOACCEL2) { | 4401 | if (c->cmd_type == CMD_IOACCEL2) { |
@@ -4244,8 +4406,9 @@ static void hpsa_get_tag(struct ctlr_info *h, | |||
4244 | *taglower = cm2->Tag; | 4406 | *taglower = cm2->Tag; |
4245 | return; | 4407 | return; |
4246 | } | 4408 | } |
4247 | *tagupper = (u32) (c->Header.tag >> 32); | 4409 | tag = le64_to_cpu(c->Header.tag); |
4248 | *taglower = (u32) (c->Header.tag & 0x0ffffffffULL); | 4410 | *tagupper = cpu_to_le32(tag >> 32); |
4411 | *taglower = cpu_to_le32(tag); | ||
4249 | } | 4412 | } |
4250 | 4413 | ||
4251 | static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | 4414 | static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, |
@@ -4254,11 +4417,11 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
4254 | int rc = IO_OK; | 4417 | int rc = IO_OK; |
4255 | struct CommandList *c; | 4418 | struct CommandList *c; |
4256 | struct ErrorInfo *ei; | 4419 | struct ErrorInfo *ei; |
4257 | u32 tagupper, taglower; | 4420 | __le32 tagupper, taglower; |
4258 | 4421 | ||
4259 | c = cmd_special_alloc(h); | 4422 | c = cmd_alloc(h); |
4260 | if (c == NULL) { /* trouble... */ | 4423 | if (c == NULL) { /* trouble... */ |
4261 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 4424 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
4262 | return -ENOMEM; | 4425 | return -ENOMEM; |
4263 | } | 4426 | } |
4264 | 4427 | ||
@@ -4287,62 +4450,12 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
4287 | rc = -1; | 4450 | rc = -1; |
4288 | break; | 4451 | break; |
4289 | } | 4452 | } |
4290 | cmd_special_free(h, c); | 4453 | cmd_free(h, c); |
4291 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", | 4454 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", |
4292 | __func__, tagupper, taglower); | 4455 | __func__, tagupper, taglower); |
4293 | return rc; | 4456 | return rc; |
4294 | } | 4457 | } |
4295 | 4458 | ||
4296 | /* | ||
4297 | * hpsa_find_cmd_in_queue | ||
4298 | * | ||
4299 | * Used to determine whether a command (find) is still present | ||
4300 | * in queue_head. Optionally excludes the last element of queue_head. | ||
4301 | * | ||
4302 | * This is used to avoid unnecessary aborts. Commands in h->reqQ have | ||
4303 | * not yet been submitted, and so can be aborted by the driver without | ||
4304 | * sending an abort to the hardware. | ||
4305 | * | ||
4306 | * Returns pointer to command if found in queue, NULL otherwise. | ||
4307 | */ | ||
4308 | static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, | ||
4309 | struct scsi_cmnd *find, struct list_head *queue_head) | ||
4310 | { | ||
4311 | unsigned long flags; | ||
4312 | struct CommandList *c = NULL; /* ptr into cmpQ */ | ||
4313 | |||
4314 | if (!find) | ||
4315 | return NULL; | ||
4316 | spin_lock_irqsave(&h->lock, flags); | ||
4317 | list_for_each_entry(c, queue_head, list) { | ||
4318 | if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ | ||
4319 | continue; | ||
4320 | if (c->scsi_cmd == find) { | ||
4321 | spin_unlock_irqrestore(&h->lock, flags); | ||
4322 | return c; | ||
4323 | } | ||
4324 | } | ||
4325 | spin_unlock_irqrestore(&h->lock, flags); | ||
4326 | return NULL; | ||
4327 | } | ||
4328 | |||
4329 | static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, | ||
4330 | u8 *tag, struct list_head *queue_head) | ||
4331 | { | ||
4332 | unsigned long flags; | ||
4333 | struct CommandList *c; | ||
4334 | |||
4335 | spin_lock_irqsave(&h->lock, flags); | ||
4336 | list_for_each_entry(c, queue_head, list) { | ||
4337 | if (memcmp(&c->Header.tag, tag, 8) != 0) | ||
4338 | continue; | ||
4339 | spin_unlock_irqrestore(&h->lock, flags); | ||
4340 | return c; | ||
4341 | } | ||
4342 | spin_unlock_irqrestore(&h->lock, flags); | ||
4343 | return NULL; | ||
4344 | } | ||
4345 | |||
4346 | /* ioaccel2 path firmware cannot handle abort task requests. | 4459 | /* ioaccel2 path firmware cannot handle abort task requests. |
4347 | * Change abort requests to physical target reset, and send to the | 4460 | * Change abort requests to physical target reset, and send to the |
4348 | * address of the physical disk used for the ioaccel 2 command. | 4461 | * address of the physical disk used for the ioaccel 2 command. |
@@ -4360,7 +4473,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, | |||
4360 | unsigned char *psa = &phys_scsi3addr[0]; | 4473 | unsigned char *psa = &phys_scsi3addr[0]; |
4361 | 4474 | ||
4362 | /* Get a pointer to the hpsa logical device. */ | 4475 | /* Get a pointer to the hpsa logical device. */ |
4363 | scmd = (struct scsi_cmnd *) abort->scsi_cmd; | 4476 | scmd = abort->scsi_cmd; |
4364 | dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); | 4477 | dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); |
4365 | if (dev == NULL) { | 4478 | if (dev == NULL) { |
4366 | dev_warn(&h->pdev->dev, | 4479 | dev_warn(&h->pdev->dev, |
@@ -4429,10 +4542,6 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, | |||
4429 | static int hpsa_send_abort_both_ways(struct ctlr_info *h, | 4542 | static int hpsa_send_abort_both_ways(struct ctlr_info *h, |
4430 | unsigned char *scsi3addr, struct CommandList *abort) | 4543 | unsigned char *scsi3addr, struct CommandList *abort) |
4431 | { | 4544 | { |
4432 | u8 swizzled_tag[8]; | ||
4433 | struct CommandList *c; | ||
4434 | int rc = 0, rc2 = 0; | ||
4435 | |||
4436 | /* ioccelerator mode 2 commands should be aborted via the | 4545 | /* ioccelerator mode 2 commands should be aborted via the |
4437 | * accelerated path, since RAID path is unaware of these commands, | 4546 | * accelerated path, since RAID path is unaware of these commands, |
4438 | * but underlying firmware can't handle abort TMF. | 4547 | * but underlying firmware can't handle abort TMF. |
@@ -4441,27 +4550,8 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, | |||
4441 | if (abort->cmd_type == CMD_IOACCEL2) | 4550 | if (abort->cmd_type == CMD_IOACCEL2) |
4442 | return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); | 4551 | return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); |
4443 | 4552 | ||
4444 | /* we do not expect to find the swizzled tag in our queue, but | 4553 | return hpsa_send_abort(h, scsi3addr, abort, 0) && |
4445 | * check anyway just to be sure the assumptions which make this | 4554 | hpsa_send_abort(h, scsi3addr, abort, 1); |
4446 | * the case haven't become wrong. | ||
4447 | */ | ||
4448 | memcpy(swizzled_tag, &abort->Request.CDB[4], 8); | ||
4449 | swizzle_abort_tag(swizzled_tag); | ||
4450 | c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); | ||
4451 | if (c != NULL) { | ||
4452 | dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); | ||
4453 | return hpsa_send_abort(h, scsi3addr, abort, 0); | ||
4454 | } | ||
4455 | rc = hpsa_send_abort(h, scsi3addr, abort, 0); | ||
4456 | |||
4457 | /* if the command is still in our queue, we can't conclude that it was | ||
4458 | * aborted (it might have just completed normally) but in any case | ||
4459 | * we don't need to try to abort it another way. | ||
4460 | */ | ||
4461 | c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); | ||
4462 | if (c) | ||
4463 | rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); | ||
4464 | return rc && rc2; | ||
4465 | } | 4555 | } |
4466 | 4556 | ||
4467 | /* Send an abort for the specified command. | 4557 | /* Send an abort for the specified command. |
@@ -4475,11 +4565,11 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4475 | struct ctlr_info *h; | 4565 | struct ctlr_info *h; |
4476 | struct hpsa_scsi_dev_t *dev; | 4566 | struct hpsa_scsi_dev_t *dev; |
4477 | struct CommandList *abort; /* pointer to command to be aborted */ | 4567 | struct CommandList *abort; /* pointer to command to be aborted */ |
4478 | struct CommandList *found; | ||
4479 | struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ | 4568 | struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ |
4480 | char msg[256]; /* For debug messaging. */ | 4569 | char msg[256]; /* For debug messaging. */ |
4481 | int ml = 0; | 4570 | int ml = 0; |
4482 | u32 tagupper, taglower; | 4571 | __le32 tagupper, taglower; |
4572 | int refcount; | ||
4483 | 4573 | ||
4484 | /* Find the controller of the command to be aborted */ | 4574 | /* Find the controller of the command to be aborted */ |
4485 | h = sdev_to_hba(sc->device); | 4575 | h = sdev_to_hba(sc->device); |
@@ -4487,6 +4577,9 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4487 | "ABORT REQUEST FAILED, Controller lookup failed.\n")) | 4577 | "ABORT REQUEST FAILED, Controller lookup failed.\n")) |
4488 | return FAILED; | 4578 | return FAILED; |
4489 | 4579 | ||
4580 | if (lockup_detected(h)) | ||
4581 | return FAILED; | ||
4582 | |||
4490 | /* Check that controller supports some kind of task abort */ | 4583 | /* Check that controller supports some kind of task abort */ |
4491 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && | 4584 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && |
4492 | !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) | 4585 | !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) |
@@ -4508,41 +4601,23 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4508 | /* Get SCSI command to be aborted */ | 4601 | /* Get SCSI command to be aborted */ |
4509 | abort = (struct CommandList *) sc->host_scribble; | 4602 | abort = (struct CommandList *) sc->host_scribble; |
4510 | if (abort == NULL) { | 4603 | if (abort == NULL) { |
4511 | dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", | 4604 | /* This can happen if the command already completed. */ |
4512 | msg); | 4605 | return SUCCESS; |
4513 | return FAILED; | 4606 | } |
4607 | refcount = atomic_inc_return(&abort->refcount); | ||
4608 | if (refcount == 1) { /* Command is done already. */ | ||
4609 | cmd_free(h, abort); | ||
4610 | return SUCCESS; | ||
4514 | } | 4611 | } |
4515 | hpsa_get_tag(h, abort, &taglower, &tagupper); | 4612 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
4516 | ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); | 4613 | ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); |
4517 | as = (struct scsi_cmnd *) abort->scsi_cmd; | 4614 | as = abort->scsi_cmd; |
4518 | if (as != NULL) | 4615 | if (as != NULL) |
4519 | ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", | 4616 | ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", |
4520 | as->cmnd[0], as->serial_number); | 4617 | as->cmnd[0], as->serial_number); |
4521 | dev_dbg(&h->pdev->dev, "%s\n", msg); | 4618 | dev_dbg(&h->pdev->dev, "%s\n", msg); |
4522 | dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", | 4619 | dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", |
4523 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | 4620 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); |
4524 | |||
4525 | /* Search reqQ to See if command is queued but not submitted, | ||
4526 | * if so, complete the command with aborted status and remove | ||
4527 | * it from the reqQ. | ||
4528 | */ | ||
4529 | found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); | ||
4530 | if (found) { | ||
4531 | found->err_info->CommandStatus = CMD_ABORTED; | ||
4532 | finish_cmd(found); | ||
4533 | dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", | ||
4534 | msg); | ||
4535 | return SUCCESS; | ||
4536 | } | ||
4537 | |||
4538 | /* not in reqQ, if also not in cmpQ, must have already completed */ | ||
4539 | found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); | ||
4540 | if (!found) { | ||
4541 | dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", | ||
4542 | msg); | ||
4543 | return SUCCESS; | ||
4544 | } | ||
4545 | |||
4546 | /* | 4621 | /* |
4547 | * Command is in flight, or possibly already completed | 4622 | * Command is in flight, or possibly already completed |
4548 | * by the firmware (but not to the scsi mid layer) but we can't | 4623 | * by the firmware (but not to the scsi mid layer) but we can't |
@@ -4554,6 +4629,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4554 | dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", | 4629 | dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", |
4555 | h->scsi_host->host_no, | 4630 | h->scsi_host->host_no, |
4556 | dev->bus, dev->target, dev->lun); | 4631 | dev->bus, dev->target, dev->lun); |
4632 | cmd_free(h, abort); | ||
4557 | return FAILED; | 4633 | return FAILED; |
4558 | } | 4634 | } |
4559 | dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); | 4635 | dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); |
@@ -4565,32 +4641,38 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4565 | */ | 4641 | */ |
4566 | #define ABORT_COMPLETE_WAIT_SECS 30 | 4642 | #define ABORT_COMPLETE_WAIT_SECS 30 |
4567 | for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { | 4643 | for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { |
4568 | found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); | 4644 | refcount = atomic_read(&abort->refcount); |
4569 | if (!found) | 4645 | if (refcount < 2) { |
4646 | cmd_free(h, abort); | ||
4570 | return SUCCESS; | 4647 | return SUCCESS; |
4571 | msleep(100); | 4648 | } else { |
4649 | msleep(100); | ||
4650 | } | ||
4572 | } | 4651 | } |
4573 | dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", | 4652 | dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", |
4574 | msg, ABORT_COMPLETE_WAIT_SECS); | 4653 | msg, ABORT_COMPLETE_WAIT_SECS); |
4654 | cmd_free(h, abort); | ||
4575 | return FAILED; | 4655 | return FAILED; |
4576 | } | 4656 | } |
4577 | 4657 | ||
4578 | |||
4579 | /* | 4658 | /* |
4580 | * For operations that cannot sleep, a command block is allocated at init, | 4659 | * For operations that cannot sleep, a command block is allocated at init, |
4581 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | 4660 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track |
4582 | * which ones are free or in use. Lock must be held when calling this. | 4661 | * which ones are free or in use. Lock must be held when calling this. |
4583 | * cmd_free() is the complement. | 4662 | * cmd_free() is the complement. |
4584 | */ | 4663 | */ |
4664 | |||
4585 | static struct CommandList *cmd_alloc(struct ctlr_info *h) | 4665 | static struct CommandList *cmd_alloc(struct ctlr_info *h) |
4586 | { | 4666 | { |
4587 | struct CommandList *c; | 4667 | struct CommandList *c; |
4588 | int i; | 4668 | int i; |
4589 | union u64bit temp64; | 4669 | union u64bit temp64; |
4590 | dma_addr_t cmd_dma_handle, err_dma_handle; | 4670 | dma_addr_t cmd_dma_handle, err_dma_handle; |
4591 | int loopcount; | 4671 | int refcount; |
4672 | unsigned long offset; | ||
4592 | 4673 | ||
4593 | /* There is some *extremely* small but non-zero chance that that | 4674 | /* |
4675 | * There is some *extremely* small but non-zero chance that that | ||
4594 | * multiple threads could get in here, and one thread could | 4676 | * multiple threads could get in here, and one thread could |
4595 | * be scanning through the list of bits looking for a free | 4677 | * be scanning through the list of bits looking for a free |
4596 | * one, but the free ones are always behind him, and other | 4678 | * one, but the free ones are always behind him, and other |
@@ -4601,24 +4683,30 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
4601 | * infrequently as to be indistinguishable from never. | 4683 | * infrequently as to be indistinguishable from never. |
4602 | */ | 4684 | */ |
4603 | 4685 | ||
4604 | loopcount = 0; | 4686 | offset = h->last_allocation; /* benignly racy */ |
4605 | do { | 4687 | for (;;) { |
4606 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); | 4688 | i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); |
4607 | if (i == h->nr_cmds) | 4689 | if (unlikely(i == h->nr_cmds)) { |
4608 | i = 0; | 4690 | offset = 0; |
4609 | loopcount++; | 4691 | continue; |
4610 | } while (test_and_set_bit(i & (BITS_PER_LONG - 1), | 4692 | } |
4611 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 && | 4693 | c = h->cmd_pool + i; |
4612 | loopcount < 10); | 4694 | refcount = atomic_inc_return(&c->refcount); |
4613 | 4695 | if (unlikely(refcount > 1)) { | |
4614 | /* Thread got starved? We do not expect this to ever happen. */ | 4696 | cmd_free(h, c); /* already in use */ |
4615 | if (loopcount >= 10) | 4697 | offset = (i + 1) % h->nr_cmds; |
4616 | return NULL; | 4698 | continue; |
4617 | 4699 | } | |
4618 | c = h->cmd_pool + i; | 4700 | set_bit(i & (BITS_PER_LONG - 1), |
4619 | memset(c, 0, sizeof(*c)); | 4701 | h->cmd_pool_bits + (i / BITS_PER_LONG)); |
4620 | cmd_dma_handle = h->cmd_pool_dhandle | 4702 | break; /* it's ours now. */ |
4621 | + i * sizeof(*c); | 4703 | } |
4704 | h->last_allocation = i; /* benignly racy */ | ||
4705 | |||
4706 | /* Zero out all of commandlist except the last field, refcount */ | ||
4707 | memset(c, 0, offsetof(struct CommandList, refcount)); | ||
4708 | c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT)); | ||
4709 | cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); | ||
4622 | c->err_info = h->errinfo_pool + i; | 4710 | c->err_info = h->errinfo_pool + i; |
4623 | memset(c->err_info, 0, sizeof(*c->err_info)); | 4711 | memset(c->err_info, 0, sizeof(*c->err_info)); |
4624 | err_dma_handle = h->errinfo_pool_dhandle | 4712 | err_dma_handle = h->errinfo_pool_dhandle |
@@ -4626,45 +4714,10 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
4626 | 4714 | ||
4627 | c->cmdindex = i; | 4715 | c->cmdindex = i; |
4628 | 4716 | ||
4629 | INIT_LIST_HEAD(&c->list); | ||
4630 | c->busaddr = (u32) cmd_dma_handle; | 4717 | c->busaddr = (u32) cmd_dma_handle; |
4631 | temp64.val = (u64) err_dma_handle; | 4718 | temp64.val = (u64) err_dma_handle; |
4632 | c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); | 4719 | c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); |
4633 | c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); | 4720 | c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); |
4634 | |||
4635 | c->h = h; | ||
4636 | return c; | ||
4637 | } | ||
4638 | |||
4639 | /* For operations that can wait for kmalloc to possibly sleep, | ||
4640 | * this routine can be called. Lock need not be held to call | ||
4641 | * cmd_special_alloc. cmd_special_free() is the complement. | ||
4642 | */ | ||
4643 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | ||
4644 | { | ||
4645 | struct CommandList *c; | ||
4646 | dma_addr_t cmd_dma_handle, err_dma_handle; | ||
4647 | |||
4648 | c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); | ||
4649 | if (c == NULL) | ||
4650 | return NULL; | ||
4651 | |||
4652 | c->cmd_type = CMD_SCSI; | ||
4653 | c->cmdindex = -1; | ||
4654 | |||
4655 | c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info), | ||
4656 | &err_dma_handle); | ||
4657 | |||
4658 | if (c->err_info == NULL) { | ||
4659 | pci_free_consistent(h->pdev, | ||
4660 | sizeof(*c), c, cmd_dma_handle); | ||
4661 | return NULL; | ||
4662 | } | ||
4663 | |||
4664 | INIT_LIST_HEAD(&c->list); | ||
4665 | c->busaddr = (u32) cmd_dma_handle; | ||
4666 | c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); | ||
4667 | c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); | ||
4668 | 4721 | ||
4669 | c->h = h; | 4722 | c->h = h; |
4670 | return c; | 4723 | return c; |
@@ -4672,20 +4725,13 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |||
4672 | 4725 | ||
4673 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) | 4726 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) |
4674 | { | 4727 | { |
4675 | int i; | 4728 | if (atomic_dec_and_test(&c->refcount)) { |
4676 | 4729 | int i; | |
4677 | i = c - h->cmd_pool; | ||
4678 | clear_bit(i & (BITS_PER_LONG - 1), | ||
4679 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | ||
4680 | } | ||
4681 | 4730 | ||
4682 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | 4731 | i = c - h->cmd_pool; |
4683 | { | 4732 | clear_bit(i & (BITS_PER_LONG - 1), |
4684 | pci_free_consistent(h->pdev, sizeof(*c->err_info), | 4733 | h->cmd_pool_bits + (i / BITS_PER_LONG)); |
4685 | c->err_info, | 4734 | } |
4686 | (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr)); | ||
4687 | pci_free_consistent(h->pdev, sizeof(*c), | ||
4688 | c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); | ||
4689 | } | 4735 | } |
4690 | 4736 | ||
4691 | #ifdef CONFIG_COMPAT | 4737 | #ifdef CONFIG_COMPAT |
@@ -4866,7 +4912,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4866 | memset(buff, 0, iocommand.buf_size); | 4912 | memset(buff, 0, iocommand.buf_size); |
4867 | } | 4913 | } |
4868 | } | 4914 | } |
4869 | c = cmd_special_alloc(h); | 4915 | c = cmd_alloc(h); |
4870 | if (c == NULL) { | 4916 | if (c == NULL) { |
4871 | rc = -ENOMEM; | 4917 | rc = -ENOMEM; |
4872 | goto out_kfree; | 4918 | goto out_kfree; |
@@ -4883,8 +4929,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4883 | c->Header.SGTotal = cpu_to_le16(0); | 4929 | c->Header.SGTotal = cpu_to_le16(0); |
4884 | } | 4930 | } |
4885 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); | 4931 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); |
4886 | /* use the kernel address the cmd block for tag */ | ||
4887 | c->Header.tag = c->busaddr; | ||
4888 | 4932 | ||
4889 | /* Fill in Request block */ | 4933 | /* Fill in Request block */ |
4890 | memcpy(&c->Request, &iocommand.Request, | 4934 | memcpy(&c->Request, &iocommand.Request, |
@@ -4925,7 +4969,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4925 | } | 4969 | } |
4926 | } | 4970 | } |
4927 | out: | 4971 | out: |
4928 | cmd_special_free(h, c); | 4972 | cmd_free(h, c); |
4929 | out_kfree: | 4973 | out_kfree: |
4930 | kfree(buff); | 4974 | kfree(buff); |
4931 | return rc; | 4975 | return rc; |
@@ -4940,7 +4984,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4940 | u64 temp64; | 4984 | u64 temp64; |
4941 | BYTE sg_used = 0; | 4985 | BYTE sg_used = 0; |
4942 | int status = 0; | 4986 | int status = 0; |
4943 | int i; | ||
4944 | u32 left; | 4987 | u32 left; |
4945 | u32 sz; | 4988 | u32 sz; |
4946 | BYTE __user *data_ptr; | 4989 | BYTE __user *data_ptr; |
@@ -5004,7 +5047,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5004 | data_ptr += sz; | 5047 | data_ptr += sz; |
5005 | sg_used++; | 5048 | sg_used++; |
5006 | } | 5049 | } |
5007 | c = cmd_special_alloc(h); | 5050 | c = cmd_alloc(h); |
5008 | if (c == NULL) { | 5051 | if (c == NULL) { |
5009 | status = -ENOMEM; | 5052 | status = -ENOMEM; |
5010 | goto cleanup1; | 5053 | goto cleanup1; |
@@ -5014,7 +5057,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5014 | c->Header.SGList = (u8) sg_used; | 5057 | c->Header.SGList = (u8) sg_used; |
5015 | c->Header.SGTotal = cpu_to_le16(sg_used); | 5058 | c->Header.SGTotal = cpu_to_le16(sg_used); |
5016 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); | 5059 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
5017 | c->Header.tag = c->busaddr; | ||
5018 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); | 5060 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); |
5019 | if (ioc->buf_size > 0) { | 5061 | if (ioc->buf_size > 0) { |
5020 | int i; | 5062 | int i; |
@@ -5047,6 +5089,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5047 | goto cleanup0; | 5089 | goto cleanup0; |
5048 | } | 5090 | } |
5049 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { | 5091 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
5092 | int i; | ||
5093 | |||
5050 | /* Copy the data out of the buffer we created */ | 5094 | /* Copy the data out of the buffer we created */ |
5051 | BYTE __user *ptr = ioc->buf; | 5095 | BYTE __user *ptr = ioc->buf; |
5052 | for (i = 0; i < sg_used; i++) { | 5096 | for (i = 0; i < sg_used; i++) { |
@@ -5059,9 +5103,11 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5059 | } | 5103 | } |
5060 | status = 0; | 5104 | status = 0; |
5061 | cleanup0: | 5105 | cleanup0: |
5062 | cmd_special_free(h, c); | 5106 | cmd_free(h, c); |
5063 | cleanup1: | 5107 | cleanup1: |
5064 | if (buff) { | 5108 | if (buff) { |
5109 | int i; | ||
5110 | |||
5065 | for (i = 0; i < sg_used; i++) | 5111 | for (i = 0; i < sg_used; i++) |
5066 | kfree(buff[i]); | 5112 | kfree(buff[i]); |
5067 | kfree(buff); | 5113 | kfree(buff); |
@@ -5079,35 +5125,6 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, | |||
5079 | (void) check_for_unit_attention(h, c); | 5125 | (void) check_for_unit_attention(h, c); |
5080 | } | 5126 | } |
5081 | 5127 | ||
5082 | static int increment_passthru_count(struct ctlr_info *h) | ||
5083 | { | ||
5084 | unsigned long flags; | ||
5085 | |||
5086 | spin_lock_irqsave(&h->passthru_count_lock, flags); | ||
5087 | if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { | ||
5088 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5089 | return -1; | ||
5090 | } | ||
5091 | h->passthru_count++; | ||
5092 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5093 | return 0; | ||
5094 | } | ||
5095 | |||
5096 | static void decrement_passthru_count(struct ctlr_info *h) | ||
5097 | { | ||
5098 | unsigned long flags; | ||
5099 | |||
5100 | spin_lock_irqsave(&h->passthru_count_lock, flags); | ||
5101 | if (h->passthru_count <= 0) { | ||
5102 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5103 | /* not expecting to get here. */ | ||
5104 | dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); | ||
5105 | return; | ||
5106 | } | ||
5107 | h->passthru_count--; | ||
5108 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5109 | } | ||
5110 | |||
5111 | /* | 5128 | /* |
5112 | * ioctl | 5129 | * ioctl |
5113 | */ | 5130 | */ |
@@ -5130,16 +5147,16 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | |||
5130 | case CCISS_GETDRIVVER: | 5147 | case CCISS_GETDRIVVER: |
5131 | return hpsa_getdrivver_ioctl(h, argp); | 5148 | return hpsa_getdrivver_ioctl(h, argp); |
5132 | case CCISS_PASSTHRU: | 5149 | case CCISS_PASSTHRU: |
5133 | if (increment_passthru_count(h)) | 5150 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
5134 | return -EAGAIN; | 5151 | return -EAGAIN; |
5135 | rc = hpsa_passthru_ioctl(h, argp); | 5152 | rc = hpsa_passthru_ioctl(h, argp); |
5136 | decrement_passthru_count(h); | 5153 | atomic_inc(&h->passthru_cmds_avail); |
5137 | return rc; | 5154 | return rc; |
5138 | case CCISS_BIG_PASSTHRU: | 5155 | case CCISS_BIG_PASSTHRU: |
5139 | if (increment_passthru_count(h)) | 5156 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
5140 | return -EAGAIN; | 5157 | return -EAGAIN; |
5141 | rc = hpsa_big_passthru_ioctl(h, argp); | 5158 | rc = hpsa_big_passthru_ioctl(h, argp); |
5142 | decrement_passthru_count(h); | 5159 | atomic_inc(&h->passthru_cmds_avail); |
5143 | return rc; | 5160 | return rc; |
5144 | default: | 5161 | default: |
5145 | return -ENOTTY; | 5162 | return -ENOTTY; |
@@ -5173,7 +5190,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5173 | { | 5190 | { |
5174 | int pci_dir = XFER_NONE; | 5191 | int pci_dir = XFER_NONE; |
5175 | struct CommandList *a; /* for commands to be aborted */ | 5192 | struct CommandList *a; /* for commands to be aborted */ |
5176 | u32 tupper, tlower; | ||
5177 | 5193 | ||
5178 | c->cmd_type = CMD_IOCTL_PEND; | 5194 | c->cmd_type = CMD_IOCTL_PEND; |
5179 | c->Header.ReplyQueue = 0; | 5195 | c->Header.ReplyQueue = 0; |
@@ -5184,7 +5200,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5184 | c->Header.SGList = 0; | 5200 | c->Header.SGList = 0; |
5185 | c->Header.SGTotal = cpu_to_le16(0); | 5201 | c->Header.SGTotal = cpu_to_le16(0); |
5186 | } | 5202 | } |
5187 | c->Header.tag = c->busaddr; | ||
5188 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); | 5203 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); |
5189 | 5204 | ||
5190 | if (cmd_type == TYPE_CMD) { | 5205 | if (cmd_type == TYPE_CMD) { |
@@ -5256,6 +5271,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5256 | c->Request.CDB[7] = (size >> 16) & 0xFF; | 5271 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
5257 | c->Request.CDB[8] = (size >> 8) & 0xFF; | 5272 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
5258 | break; | 5273 | break; |
5274 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: | ||
5275 | c->Request.CDBLen = 10; | ||
5276 | c->Request.type_attr_dir = | ||
5277 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | ||
5278 | c->Request.Timeout = 0; | ||
5279 | c->Request.CDB[0] = BMIC_READ; | ||
5280 | c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; | ||
5281 | c->Request.CDB[7] = (size >> 16) & 0xFF; | ||
5282 | c->Request.CDB[8] = (size >> 8) & 0XFF; | ||
5283 | break; | ||
5259 | default: | 5284 | default: |
5260 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | 5285 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); |
5261 | BUG(); | 5286 | BUG(); |
@@ -5281,10 +5306,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5281 | break; | 5306 | break; |
5282 | case HPSA_ABORT_MSG: | 5307 | case HPSA_ABORT_MSG: |
5283 | a = buff; /* point to command to be aborted */ | 5308 | a = buff; /* point to command to be aborted */ |
5284 | dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx", | 5309 | dev_dbg(&h->pdev->dev, |
5310 | "Abort Tag:0x%016llx request Tag:0x%016llx", | ||
5285 | a->Header.tag, c->Header.tag); | 5311 | a->Header.tag, c->Header.tag); |
5286 | tlower = (u32) (a->Header.tag >> 32); | ||
5287 | tupper = (u32) (a->Header.tag & 0x0ffffffffULL); | ||
5288 | c->Request.CDBLen = 16; | 5312 | c->Request.CDBLen = 16; |
5289 | c->Request.type_attr_dir = | 5313 | c->Request.type_attr_dir = |
5290 | TYPE_ATTR_DIR(cmd_type, | 5314 | TYPE_ATTR_DIR(cmd_type, |
@@ -5295,14 +5319,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5295 | c->Request.CDB[2] = 0x00; /* reserved */ | 5319 | c->Request.CDB[2] = 0x00; /* reserved */ |
5296 | c->Request.CDB[3] = 0x00; /* reserved */ | 5320 | c->Request.CDB[3] = 0x00; /* reserved */ |
5297 | /* Tag to abort goes in CDB[4]-CDB[11] */ | 5321 | /* Tag to abort goes in CDB[4]-CDB[11] */ |
5298 | c->Request.CDB[4] = tlower & 0xFF; | 5322 | memcpy(&c->Request.CDB[4], &a->Header.tag, |
5299 | c->Request.CDB[5] = (tlower >> 8) & 0xFF; | 5323 | sizeof(a->Header.tag)); |
5300 | c->Request.CDB[6] = (tlower >> 16) & 0xFF; | ||
5301 | c->Request.CDB[7] = (tlower >> 24) & 0xFF; | ||
5302 | c->Request.CDB[8] = tupper & 0xFF; | ||
5303 | c->Request.CDB[9] = (tupper >> 8) & 0xFF; | ||
5304 | c->Request.CDB[10] = (tupper >> 16) & 0xFF; | ||
5305 | c->Request.CDB[11] = (tupper >> 24) & 0xFF; | ||
5306 | c->Request.CDB[12] = 0x00; /* reserved */ | 5324 | c->Request.CDB[12] = 0x00; /* reserved */ |
5307 | c->Request.CDB[13] = 0x00; /* reserved */ | 5325 | c->Request.CDB[13] = 0x00; /* reserved */ |
5308 | c->Request.CDB[14] = 0x00; /* reserved */ | 5326 | c->Request.CDB[14] = 0x00; /* reserved */ |
@@ -5349,47 +5367,6 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) | |||
5349 | return page_remapped ? (page_remapped + page_offs) : NULL; | 5367 | return page_remapped ? (page_remapped + page_offs) : NULL; |
5350 | } | 5368 | } |
5351 | 5369 | ||
5352 | /* Takes cmds off the submission queue and sends them to the hardware, | ||
5353 | * then puts them on the queue of cmds waiting for completion. | ||
5354 | * Assumes h->lock is held | ||
5355 | */ | ||
5356 | static void start_io(struct ctlr_info *h, unsigned long *flags) | ||
5357 | { | ||
5358 | struct CommandList *c; | ||
5359 | |||
5360 | while (!list_empty(&h->reqQ)) { | ||
5361 | c = list_entry(h->reqQ.next, struct CommandList, list); | ||
5362 | /* can't do anything if fifo is full */ | ||
5363 | if ((h->access.fifo_full(h))) { | ||
5364 | h->fifo_recently_full = 1; | ||
5365 | dev_warn(&h->pdev->dev, "fifo full\n"); | ||
5366 | break; | ||
5367 | } | ||
5368 | h->fifo_recently_full = 0; | ||
5369 | |||
5370 | /* Get the first entry from the Request Q */ | ||
5371 | removeQ(c); | ||
5372 | h->Qdepth--; | ||
5373 | |||
5374 | /* Put job onto the completed Q */ | ||
5375 | addQ(&h->cmpQ, c); | ||
5376 | atomic_inc(&h->commands_outstanding); | ||
5377 | spin_unlock_irqrestore(&h->lock, *flags); | ||
5378 | /* Tell the controller execute command */ | ||
5379 | h->access.submit_command(h, c); | ||
5380 | spin_lock_irqsave(&h->lock, *flags); | ||
5381 | } | ||
5382 | } | ||
5383 | |||
5384 | static void lock_and_start_io(struct ctlr_info *h) | ||
5385 | { | ||
5386 | unsigned long flags; | ||
5387 | |||
5388 | spin_lock_irqsave(&h->lock, flags); | ||
5389 | start_io(h, &flags); | ||
5390 | spin_unlock_irqrestore(&h->lock, flags); | ||
5391 | } | ||
5392 | |||
5393 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) | 5370 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) |
5394 | { | 5371 | { |
5395 | return h->access.command_completed(h, q); | 5372 | return h->access.command_completed(h, q); |
@@ -5418,53 +5395,12 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index, | |||
5418 | 5395 | ||
5419 | static inline void finish_cmd(struct CommandList *c) | 5396 | static inline void finish_cmd(struct CommandList *c) |
5420 | { | 5397 | { |
5421 | unsigned long flags; | ||
5422 | int io_may_be_stalled = 0; | ||
5423 | struct ctlr_info *h = c->h; | ||
5424 | int count; | ||
5425 | |||
5426 | spin_lock_irqsave(&h->lock, flags); | ||
5427 | removeQ(c); | ||
5428 | |||
5429 | /* | ||
5430 | * Check for possibly stalled i/o. | ||
5431 | * | ||
5432 | * If a fifo_full condition is encountered, requests will back up | ||
5433 | * in h->reqQ. This queue is only emptied out by start_io which is | ||
5434 | * only called when a new i/o request comes in. If no i/o's are | ||
5435 | * forthcoming, the i/o's in h->reqQ can get stuck. So we call | ||
5436 | * start_io from here if we detect such a danger. | ||
5437 | * | ||
5438 | * Normally, we shouldn't hit this case, but pounding on the | ||
5439 | * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if | ||
5440 | * commands_outstanding is low. We want to avoid calling | ||
5441 | * start_io from in here as much as possible, and esp. don't | ||
5442 | * want to get in a cycle where we call start_io every time | ||
5443 | * through here. | ||
5444 | */ | ||
5445 | count = atomic_read(&h->commands_outstanding); | ||
5446 | spin_unlock_irqrestore(&h->lock, flags); | ||
5447 | if (unlikely(h->fifo_recently_full) && count < 5) | ||
5448 | io_may_be_stalled = 1; | ||
5449 | |||
5450 | dial_up_lockup_detection_on_fw_flash_complete(c->h, c); | 5398 | dial_up_lockup_detection_on_fw_flash_complete(c->h, c); |
5451 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI | 5399 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI |
5452 | || c->cmd_type == CMD_IOACCEL2)) | 5400 | || c->cmd_type == CMD_IOACCEL2)) |
5453 | complete_scsi_command(c); | 5401 | complete_scsi_command(c); |
5454 | else if (c->cmd_type == CMD_IOCTL_PEND) | 5402 | else if (c->cmd_type == CMD_IOCTL_PEND) |
5455 | complete(c->waiting); | 5403 | complete(c->waiting); |
5456 | if (unlikely(io_may_be_stalled)) | ||
5457 | lock_and_start_io(h); | ||
5458 | } | ||
5459 | |||
5460 | static inline u32 hpsa_tag_contains_index(u32 tag) | ||
5461 | { | ||
5462 | return tag & DIRECT_LOOKUP_BIT; | ||
5463 | } | ||
5464 | |||
5465 | static inline u32 hpsa_tag_to_index(u32 tag) | ||
5466 | { | ||
5467 | return tag >> DIRECT_LOOKUP_SHIFT; | ||
5468 | } | 5404 | } |
5469 | 5405 | ||
5470 | 5406 | ||
@@ -5484,34 +5420,13 @@ static inline void process_indexed_cmd(struct ctlr_info *h, | |||
5484 | u32 tag_index; | 5420 | u32 tag_index; |
5485 | struct CommandList *c; | 5421 | struct CommandList *c; |
5486 | 5422 | ||
5487 | tag_index = hpsa_tag_to_index(raw_tag); | 5423 | tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; |
5488 | if (!bad_tag(h, tag_index, raw_tag)) { | 5424 | if (!bad_tag(h, tag_index, raw_tag)) { |
5489 | c = h->cmd_pool + tag_index; | 5425 | c = h->cmd_pool + tag_index; |
5490 | finish_cmd(c); | 5426 | finish_cmd(c); |
5491 | } | 5427 | } |
5492 | } | 5428 | } |
5493 | 5429 | ||
5494 | /* process completion of a non-indexed command */ | ||
5495 | static inline void process_nonindexed_cmd(struct ctlr_info *h, | ||
5496 | u32 raw_tag) | ||
5497 | { | ||
5498 | u32 tag; | ||
5499 | struct CommandList *c = NULL; | ||
5500 | unsigned long flags; | ||
5501 | |||
5502 | tag = hpsa_tag_discard_error_bits(h, raw_tag); | ||
5503 | spin_lock_irqsave(&h->lock, flags); | ||
5504 | list_for_each_entry(c, &h->cmpQ, list) { | ||
5505 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { | ||
5506 | spin_unlock_irqrestore(&h->lock, flags); | ||
5507 | finish_cmd(c); | ||
5508 | return; | ||
5509 | } | ||
5510 | } | ||
5511 | spin_unlock_irqrestore(&h->lock, flags); | ||
5512 | bad_tag(h, h->nr_cmds + 1, raw_tag); | ||
5513 | } | ||
5514 | |||
5515 | /* Some controllers, like p400, will give us one interrupt | 5430 | /* Some controllers, like p400, will give us one interrupt |
5516 | * after a soft reset, even if we turned interrupts off. | 5431 | * after a soft reset, even if we turned interrupts off. |
5517 | * Only need to check for this in the hpsa_xxx_discard_completions | 5432 | * Only need to check for this in the hpsa_xxx_discard_completions |
@@ -5589,10 +5504,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) | |||
5589 | while (interrupt_pending(h)) { | 5504 | while (interrupt_pending(h)) { |
5590 | raw_tag = get_next_completion(h, q); | 5505 | raw_tag = get_next_completion(h, q); |
5591 | while (raw_tag != FIFO_EMPTY) { | 5506 | while (raw_tag != FIFO_EMPTY) { |
5592 | if (likely(hpsa_tag_contains_index(raw_tag))) | 5507 | process_indexed_cmd(h, raw_tag); |
5593 | process_indexed_cmd(h, raw_tag); | ||
5594 | else | ||
5595 | process_nonindexed_cmd(h, raw_tag); | ||
5596 | raw_tag = next_command(h, q); | 5508 | raw_tag = next_command(h, q); |
5597 | } | 5509 | } |
5598 | } | 5510 | } |
@@ -5608,10 +5520,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) | |||
5608 | h->last_intr_timestamp = get_jiffies_64(); | 5520 | h->last_intr_timestamp = get_jiffies_64(); |
5609 | raw_tag = get_next_completion(h, q); | 5521 | raw_tag = get_next_completion(h, q); |
5610 | while (raw_tag != FIFO_EMPTY) { | 5522 | while (raw_tag != FIFO_EMPTY) { |
5611 | if (likely(hpsa_tag_contains_index(raw_tag))) | 5523 | process_indexed_cmd(h, raw_tag); |
5612 | process_indexed_cmd(h, raw_tag); | ||
5613 | else | ||
5614 | process_nonindexed_cmd(h, raw_tag); | ||
5615 | raw_tag = next_command(h, q); | 5524 | raw_tag = next_command(h, q); |
5616 | } | 5525 | } |
5617 | return IRQ_HANDLED; | 5526 | return IRQ_HANDLED; |
@@ -5633,7 +5542,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5633 | static const size_t cmd_sz = sizeof(*cmd) + | 5542 | static const size_t cmd_sz = sizeof(*cmd) + |
5634 | sizeof(cmd->ErrorDescriptor); | 5543 | sizeof(cmd->ErrorDescriptor); |
5635 | dma_addr_t paddr64; | 5544 | dma_addr_t paddr64; |
5636 | uint32_t paddr32, tag; | 5545 | __le32 paddr32; |
5546 | u32 tag; | ||
5637 | void __iomem *vaddr; | 5547 | void __iomem *vaddr; |
5638 | int i, err; | 5548 | int i, err; |
5639 | 5549 | ||
@@ -5648,7 +5558,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5648 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 5558 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
5649 | if (err) { | 5559 | if (err) { |
5650 | iounmap(vaddr); | 5560 | iounmap(vaddr); |
5651 | return -ENOMEM; | 5561 | return err; |
5652 | } | 5562 | } |
5653 | 5563 | ||
5654 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | 5564 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); |
@@ -5661,12 +5571,12 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5661 | * although there's no guarantee, we assume that the address is at | 5571 | * although there's no guarantee, we assume that the address is at |
5662 | * least 4-byte aligned (most likely, it's page-aligned). | 5572 | * least 4-byte aligned (most likely, it's page-aligned). |
5663 | */ | 5573 | */ |
5664 | paddr32 = paddr64; | 5574 | paddr32 = cpu_to_le32(paddr64); |
5665 | 5575 | ||
5666 | cmd->CommandHeader.ReplyQueue = 0; | 5576 | cmd->CommandHeader.ReplyQueue = 0; |
5667 | cmd->CommandHeader.SGList = 0; | 5577 | cmd->CommandHeader.SGList = 0; |
5668 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); | 5578 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); |
5669 | cmd->CommandHeader.tag = paddr32; | 5579 | cmd->CommandHeader.tag = cpu_to_le64(paddr64); |
5670 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); | 5580 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); |
5671 | 5581 | ||
5672 | cmd->Request.CDBLen = 16; | 5582 | cmd->Request.CDBLen = 16; |
@@ -5677,14 +5587,14 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5677 | cmd->Request.CDB[1] = type; | 5587 | cmd->Request.CDB[1] = type; |
5678 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ | 5588 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ |
5679 | cmd->ErrorDescriptor.Addr = | 5589 | cmd->ErrorDescriptor.Addr = |
5680 | cpu_to_le64((paddr32 + sizeof(*cmd))); | 5590 | cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); |
5681 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); | 5591 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); |
5682 | 5592 | ||
5683 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); | 5593 | writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); |
5684 | 5594 | ||
5685 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | 5595 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
5686 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | 5596 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); |
5687 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) | 5597 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) |
5688 | break; | 5598 | break; |
5689 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | 5599 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
5690 | } | 5600 | } |
@@ -5718,8 +5628,6 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5718 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, | 5628 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
5719 | void __iomem *vaddr, u32 use_doorbell) | 5629 | void __iomem *vaddr, u32 use_doorbell) |
5720 | { | 5630 | { |
5721 | u16 pmcsr; | ||
5722 | int pos; | ||
5723 | 5631 | ||
5724 | if (use_doorbell) { | 5632 | if (use_doorbell) { |
5725 | /* For everything after the P600, the PCI power state method | 5633 | /* For everything after the P600, the PCI power state method |
@@ -5745,26 +5653,21 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, | |||
5745 | * this causes a secondary PCI reset which will reset the | 5653 | * this causes a secondary PCI reset which will reset the |
5746 | * controller." */ | 5654 | * controller." */ |
5747 | 5655 | ||
5748 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | 5656 | int rc = 0; |
5749 | if (pos == 0) { | 5657 | |
5750 | dev_err(&pdev->dev, | ||
5751 | "hpsa_reset_controller: " | ||
5752 | "PCI PM not supported\n"); | ||
5753 | return -ENODEV; | ||
5754 | } | ||
5755 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); | 5658 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); |
5659 | |||
5756 | /* enter the D3hot power management state */ | 5660 | /* enter the D3hot power management state */ |
5757 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | 5661 | rc = pci_set_power_state(pdev, PCI_D3hot); |
5758 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 5662 | if (rc) |
5759 | pmcsr |= PCI_D3hot; | 5663 | return rc; |
5760 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | ||
5761 | 5664 | ||
5762 | msleep(500); | 5665 | msleep(500); |
5763 | 5666 | ||
5764 | /* enter the D0 power management state */ | 5667 | /* enter the D0 power management state */ |
5765 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 5668 | rc = pci_set_power_state(pdev, PCI_D0); |
5766 | pmcsr |= PCI_D0; | 5669 | if (rc) |
5767 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | 5670 | return rc; |
5768 | 5671 | ||
5769 | /* | 5672 | /* |
5770 | * The P600 requires a small delay when changing states. | 5673 | * The P600 requires a small delay when changing states. |
@@ -5858,8 +5761,12 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5858 | */ | 5761 | */ |
5859 | 5762 | ||
5860 | rc = hpsa_lookup_board_id(pdev, &board_id); | 5763 | rc = hpsa_lookup_board_id(pdev, &board_id); |
5861 | if (rc < 0 || !ctlr_is_resettable(board_id)) { | 5764 | if (rc < 0) { |
5862 | dev_warn(&pdev->dev, "Not resetting device.\n"); | 5765 | dev_warn(&pdev->dev, "Board ID not found\n"); |
5766 | return rc; | ||
5767 | } | ||
5768 | if (!ctlr_is_resettable(board_id)) { | ||
5769 | dev_warn(&pdev->dev, "Controller not resettable\n"); | ||
5863 | return -ENODEV; | 5770 | return -ENODEV; |
5864 | } | 5771 | } |
5865 | 5772 | ||
@@ -5892,7 +5799,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5892 | } | 5799 | } |
5893 | rc = write_driver_ver_to_cfgtable(cfgtable); | 5800 | rc = write_driver_ver_to_cfgtable(cfgtable); |
5894 | if (rc) | 5801 | if (rc) |
5895 | goto unmap_vaddr; | 5802 | goto unmap_cfgtable; |
5896 | 5803 | ||
5897 | /* If reset via doorbell register is supported, use that. | 5804 | /* If reset via doorbell register is supported, use that. |
5898 | * There are two such methods. Favor the newest method. | 5805 | * There are two such methods. Favor the newest method. |
@@ -5904,8 +5811,8 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5904 | } else { | 5811 | } else { |
5905 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 5812 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
5906 | if (use_doorbell) { | 5813 | if (use_doorbell) { |
5907 | dev_warn(&pdev->dev, "Soft reset not supported. " | 5814 | dev_warn(&pdev->dev, |
5908 | "Firmware update is required.\n"); | 5815 | "Soft reset not supported. Firmware update is required.\n"); |
5909 | rc = -ENOTSUPP; /* try soft reset */ | 5816 | rc = -ENOTSUPP; /* try soft reset */ |
5910 | goto unmap_cfgtable; | 5817 | goto unmap_cfgtable; |
5911 | } | 5818 | } |
@@ -5925,8 +5832,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5925 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); | 5832 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); |
5926 | if (rc) { | 5833 | if (rc) { |
5927 | dev_warn(&pdev->dev, | 5834 | dev_warn(&pdev->dev, |
5928 | "failed waiting for board to become ready " | 5835 | "Failed waiting for board to become ready after hard reset\n"); |
5929 | "after hard reset\n"); | ||
5930 | goto unmap_cfgtable; | 5836 | goto unmap_cfgtable; |
5931 | } | 5837 | } |
5932 | 5838 | ||
@@ -5977,7 +5883,7 @@ static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) | |||
5977 | readl(&(tb->HostWrite.CoalIntDelay))); | 5883 | readl(&(tb->HostWrite.CoalIntDelay))); |
5978 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", | 5884 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", |
5979 | readl(&(tb->HostWrite.CoalIntCount))); | 5885 | readl(&(tb->HostWrite.CoalIntCount))); |
5980 | dev_info(dev, " Max outstanding commands = 0x%d\n", | 5886 | dev_info(dev, " Max outstanding commands = %d\n", |
5981 | readl(&(tb->CmdsOutMax))); | 5887 | readl(&(tb->CmdsOutMax))); |
5982 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | 5888 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); |
5983 | for (i = 0; i < 16; i++) | 5889 | for (i = 0; i < 16; i++) |
@@ -6025,7 +5931,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
6025 | } | 5931 | } |
6026 | 5932 | ||
6027 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | 5933 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
6028 | * controllers that are capable. If not, we use IO-APIC mode. | 5934 | * controllers that are capable. If not, we use legacy INTx mode. |
6029 | */ | 5935 | */ |
6030 | 5936 | ||
6031 | static void hpsa_interrupt_mode(struct ctlr_info *h) | 5937 | static void hpsa_interrupt_mode(struct ctlr_info *h) |
@@ -6044,7 +5950,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) | |||
6044 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) | 5950 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) |
6045 | goto default_int_mode; | 5951 | goto default_int_mode; |
6046 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { | 5952 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
6047 | dev_info(&h->pdev->dev, "MSIX\n"); | 5953 | dev_info(&h->pdev->dev, "MSI-X capable controller\n"); |
6048 | h->msix_vector = MAX_REPLY_QUEUES; | 5954 | h->msix_vector = MAX_REPLY_QUEUES; |
6049 | if (h->msix_vector > num_online_cpus()) | 5955 | if (h->msix_vector > num_online_cpus()) |
6050 | h->msix_vector = num_online_cpus(); | 5956 | h->msix_vector = num_online_cpus(); |
@@ -6065,7 +5971,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) | |||
6065 | } | 5971 | } |
6066 | single_msi_mode: | 5972 | single_msi_mode: |
6067 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { | 5973 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
6068 | dev_info(&h->pdev->dev, "MSI\n"); | 5974 | dev_info(&h->pdev->dev, "MSI capable controller\n"); |
6069 | if (!pci_enable_msi(h->pdev)) | 5975 | if (!pci_enable_msi(h->pdev)) |
6070 | h->msi_vector = 1; | 5976 | h->msi_vector = 1; |
6071 | else | 5977 | else |
@@ -6172,8 +6078,10 @@ static int hpsa_find_cfgtables(struct ctlr_info *h) | |||
6172 | return rc; | 6078 | return rc; |
6173 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, | 6079 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
6174 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); | 6080 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
6175 | if (!h->cfgtable) | 6081 | if (!h->cfgtable) { |
6082 | dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); | ||
6176 | return -ENOMEM; | 6083 | return -ENOMEM; |
6084 | } | ||
6177 | rc = write_driver_ver_to_cfgtable(h->cfgtable); | 6085 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
6178 | if (rc) | 6086 | if (rc) |
6179 | return rc; | 6087 | return rc; |
@@ -6204,6 +6112,15 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) | |||
6204 | } | 6112 | } |
6205 | } | 6113 | } |
6206 | 6114 | ||
6115 | /* If the controller reports that the total max sg entries is greater than 512, | ||
6116 | * then we know that chained SG blocks work. (Original smart arrays did not | ||
6117 | * support chained SG blocks and would return zero for max sg entries.) | ||
6118 | */ | ||
6119 | static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) | ||
6120 | { | ||
6121 | return h->maxsgentries > 512; | ||
6122 | } | ||
6123 | |||
6207 | /* Interrogate the hardware for some limits: | 6124 | /* Interrogate the hardware for some limits: |
6208 | * max commands, max SG elements without chaining, and with chaining, | 6125 | * max commands, max SG elements without chaining, and with chaining, |
6209 | * SG chain block size, etc. | 6126 | * SG chain block size, etc. |
@@ -6211,21 +6128,23 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) | |||
6211 | static void hpsa_find_board_params(struct ctlr_info *h) | 6128 | static void hpsa_find_board_params(struct ctlr_info *h) |
6212 | { | 6129 | { |
6213 | hpsa_get_max_perf_mode_cmds(h); | 6130 | hpsa_get_max_perf_mode_cmds(h); |
6214 | h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ | 6131 | h->nr_cmds = h->max_commands; |
6215 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); | 6132 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); |
6216 | h->fw_support = readl(&(h->cfgtable->misc_fw_support)); | 6133 | h->fw_support = readl(&(h->cfgtable->misc_fw_support)); |
6217 | /* | 6134 | if (hpsa_supports_chained_sg_blocks(h)) { |
6218 | * Limit in-command s/g elements to 32 save dma'able memory. | 6135 | /* Limit in-command s/g elements to 32 save dma'able memory. */ |
6219 | * Howvever spec says if 0, use 31 | ||
6220 | */ | ||
6221 | h->max_cmd_sg_entries = 31; | ||
6222 | if (h->maxsgentries > 512) { | ||
6223 | h->max_cmd_sg_entries = 32; | 6136 | h->max_cmd_sg_entries = 32; |
6224 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; | 6137 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; |
6225 | h->maxsgentries--; /* save one for chain pointer */ | 6138 | h->maxsgentries--; /* save one for chain pointer */ |
6226 | } else { | 6139 | } else { |
6227 | h->chainsize = 0; | 6140 | /* |
6141 | * Original smart arrays supported at most 31 s/g entries | ||
6142 | * embedded inline in the command (trying to use more | ||
6143 | * would lock up the controller) | ||
6144 | */ | ||
6145 | h->max_cmd_sg_entries = 31; | ||
6228 | h->maxsgentries = 31; /* default to traditional values */ | 6146 | h->maxsgentries = 31; /* default to traditional values */ |
6147 | h->chainsize = 0; | ||
6229 | } | 6148 | } |
6230 | 6149 | ||
6231 | /* Find out what task management functions are supported and cache */ | 6150 | /* Find out what task management functions are supported and cache */ |
@@ -6239,7 +6158,7 @@ static void hpsa_find_board_params(struct ctlr_info *h) | |||
6239 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) | 6158 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
6240 | { | 6159 | { |
6241 | if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { | 6160 | if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { |
6242 | dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); | 6161 | dev_err(&h->pdev->dev, "not a valid CISS config table\n"); |
6243 | return false; | 6162 | return false; |
6244 | } | 6163 | } |
6245 | return true; | 6164 | return true; |
@@ -6272,24 +6191,27 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) | |||
6272 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | 6191 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
6273 | } | 6192 | } |
6274 | 6193 | ||
6275 | static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) | 6194 | static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) |
6276 | { | 6195 | { |
6277 | int i; | 6196 | int i; |
6278 | u32 doorbell_value; | 6197 | u32 doorbell_value; |
6279 | unsigned long flags; | 6198 | unsigned long flags; |
6280 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ | 6199 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ |
6281 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 6200 | for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { |
6282 | spin_lock_irqsave(&h->lock, flags); | 6201 | spin_lock_irqsave(&h->lock, flags); |
6283 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | 6202 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); |
6284 | spin_unlock_irqrestore(&h->lock, flags); | 6203 | spin_unlock_irqrestore(&h->lock, flags); |
6285 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) | 6204 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) |
6286 | break; | 6205 | goto done; |
6287 | /* delay and try again */ | 6206 | /* delay and try again */ |
6288 | msleep(20); | 6207 | msleep(CLEAR_EVENT_WAIT_INTERVAL); |
6289 | } | 6208 | } |
6209 | return -ENODEV; | ||
6210 | done: | ||
6211 | return 0; | ||
6290 | } | 6212 | } |
6291 | 6213 | ||
6292 | static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) | 6214 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
6293 | { | 6215 | { |
6294 | int i; | 6216 | int i; |
6295 | u32 doorbell_value; | 6217 | u32 doorbell_value; |
@@ -6299,17 +6221,21 @@ static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) | |||
6299 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | 6221 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right |
6300 | * as we enter this code.) | 6222 | * as we enter this code.) |
6301 | */ | 6223 | */ |
6302 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 6224 | for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { |
6303 | spin_lock_irqsave(&h->lock, flags); | 6225 | spin_lock_irqsave(&h->lock, flags); |
6304 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | 6226 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); |
6305 | spin_unlock_irqrestore(&h->lock, flags); | 6227 | spin_unlock_irqrestore(&h->lock, flags); |
6306 | if (!(doorbell_value & CFGTBL_ChangeReq)) | 6228 | if (!(doorbell_value & CFGTBL_ChangeReq)) |
6307 | break; | 6229 | goto done; |
6308 | /* delay and try again */ | 6230 | /* delay and try again */ |
6309 | usleep_range(10000, 20000); | 6231 | msleep(MODE_CHANGE_WAIT_INTERVAL); |
6310 | } | 6232 | } |
6233 | return -ENODEV; | ||
6234 | done: | ||
6235 | return 0; | ||
6311 | } | 6236 | } |
6312 | 6237 | ||
6238 | /* return -ENODEV or other reason on error, 0 on success */ | ||
6313 | static int hpsa_enter_simple_mode(struct ctlr_info *h) | 6239 | static int hpsa_enter_simple_mode(struct ctlr_info *h) |
6314 | { | 6240 | { |
6315 | u32 trans_support; | 6241 | u32 trans_support; |
@@ -6324,14 +6250,15 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h) | |||
6324 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); | 6250 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); |
6325 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); | 6251 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); |
6326 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 6252 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
6327 | hpsa_wait_for_mode_change_ack(h); | 6253 | if (hpsa_wait_for_mode_change_ack(h)) |
6254 | goto error; | ||
6328 | print_cfg_table(&h->pdev->dev, h->cfgtable); | 6255 | print_cfg_table(&h->pdev->dev, h->cfgtable); |
6329 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) | 6256 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) |
6330 | goto error; | 6257 | goto error; |
6331 | h->transMethod = CFGTBL_Trans_Simple; | 6258 | h->transMethod = CFGTBL_Trans_Simple; |
6332 | return 0; | 6259 | return 0; |
6333 | error: | 6260 | error: |
6334 | dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); | 6261 | dev_err(&h->pdev->dev, "failed to enter simple mode\n"); |
6335 | return -ENODEV; | 6262 | return -ENODEV; |
6336 | } | 6263 | } |
6337 | 6264 | ||
@@ -6341,7 +6268,7 @@ static int hpsa_pci_init(struct ctlr_info *h) | |||
6341 | 6268 | ||
6342 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); | 6269 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); |
6343 | if (prod_index < 0) | 6270 | if (prod_index < 0) |
6344 | return -ENODEV; | 6271 | return prod_index; |
6345 | h->product_name = products[prod_index].product_name; | 6272 | h->product_name = products[prod_index].product_name; |
6346 | h->access = *(products[prod_index].access); | 6273 | h->access = *(products[prod_index].access); |
6347 | 6274 | ||
@@ -6422,6 +6349,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h) | |||
6422 | static int hpsa_init_reset_devices(struct pci_dev *pdev) | 6349 | static int hpsa_init_reset_devices(struct pci_dev *pdev) |
6423 | { | 6350 | { |
6424 | int rc, i; | 6351 | int rc, i; |
6352 | void __iomem *vaddr; | ||
6425 | 6353 | ||
6426 | if (!reset_devices) | 6354 | if (!reset_devices) |
6427 | return 0; | 6355 | return 0; |
@@ -6445,6 +6373,14 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) | |||
6445 | 6373 | ||
6446 | pci_set_master(pdev); | 6374 | pci_set_master(pdev); |
6447 | 6375 | ||
6376 | vaddr = pci_ioremap_bar(pdev, 0); | ||
6377 | if (vaddr == NULL) { | ||
6378 | rc = -ENOMEM; | ||
6379 | goto out_disable; | ||
6380 | } | ||
6381 | writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
6382 | iounmap(vaddr); | ||
6383 | |||
6448 | /* Reset the controller with a PCI power-cycle or via doorbell */ | 6384 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
6449 | rc = hpsa_kdump_hard_reset_controller(pdev); | 6385 | rc = hpsa_kdump_hard_reset_controller(pdev); |
6450 | 6386 | ||
@@ -6453,14 +6389,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) | |||
6453 | * "performant mode". Or, it might be 640x, which can't reset | 6389 | * "performant mode". Or, it might be 640x, which can't reset |
6454 | * due to concerns about shared bbwc between 6402/6404 pair. | 6390 | * due to concerns about shared bbwc between 6402/6404 pair. |
6455 | */ | 6391 | */ |
6456 | if (rc) { | 6392 | if (rc) |
6457 | if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */ | ||
6458 | rc = -ENODEV; | ||
6459 | goto out_disable; | 6393 | goto out_disable; |
6460 | } | ||
6461 | 6394 | ||
6462 | /* Now try to get the controller to respond to a no-op */ | 6395 | /* Now try to get the controller to respond to a no-op */ |
6463 | dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); | 6396 | dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); |
6464 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { | 6397 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
6465 | if (hpsa_noop(pdev) == 0) | 6398 | if (hpsa_noop(pdev) == 0) |
6466 | break; | 6399 | break; |
@@ -6490,9 +6423,12 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h) | |||
6490 | || (h->cmd_pool == NULL) | 6423 | || (h->cmd_pool == NULL) |
6491 | || (h->errinfo_pool == NULL)) { | 6424 | || (h->errinfo_pool == NULL)) { |
6492 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); | 6425 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); |
6493 | return -ENOMEM; | 6426 | goto clean_up; |
6494 | } | 6427 | } |
6495 | return 0; | 6428 | return 0; |
6429 | clean_up: | ||
6430 | hpsa_free_cmd_pool(h); | ||
6431 | return -ENOMEM; | ||
6496 | } | 6432 | } |
6497 | 6433 | ||
6498 | static void hpsa_free_cmd_pool(struct ctlr_info *h) | 6434 | static void hpsa_free_cmd_pool(struct ctlr_info *h) |
@@ -6519,16 +6455,38 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) | |||
6519 | 6455 | ||
6520 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) | 6456 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) |
6521 | { | 6457 | { |
6522 | int i, cpu, rc; | 6458 | int i, cpu; |
6523 | 6459 | ||
6524 | cpu = cpumask_first(cpu_online_mask); | 6460 | cpu = cpumask_first(cpu_online_mask); |
6525 | for (i = 0; i < h->msix_vector; i++) { | 6461 | for (i = 0; i < h->msix_vector; i++) { |
6526 | rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); | 6462 | irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); |
6527 | cpu = cpumask_next(cpu, cpu_online_mask); | 6463 | cpu = cpumask_next(cpu, cpu_online_mask); |
6528 | } | 6464 | } |
6529 | } | 6465 | } |
6530 | 6466 | ||
6531 | static int hpsa_request_irq(struct ctlr_info *h, | 6467 | /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ |
6468 | static void hpsa_free_irqs(struct ctlr_info *h) | ||
6469 | { | ||
6470 | int i; | ||
6471 | |||
6472 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | ||
6473 | /* Single reply queue, only one irq to free */ | ||
6474 | i = h->intr_mode; | ||
6475 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6476 | free_irq(h->intr[i], &h->q[i]); | ||
6477 | return; | ||
6478 | } | ||
6479 | |||
6480 | for (i = 0; i < h->msix_vector; i++) { | ||
6481 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6482 | free_irq(h->intr[i], &h->q[i]); | ||
6483 | } | ||
6484 | for (; i < MAX_REPLY_QUEUES; i++) | ||
6485 | h->q[i] = 0; | ||
6486 | } | ||
6487 | |||
6488 | /* returns 0 on success; cleans up and returns -Enn on error */ | ||
6489 | static int hpsa_request_irqs(struct ctlr_info *h, | ||
6532 | irqreturn_t (*msixhandler)(int, void *), | 6490 | irqreturn_t (*msixhandler)(int, void *), |
6533 | irqreturn_t (*intxhandler)(int, void *)) | 6491 | irqreturn_t (*intxhandler)(int, void *)) |
6534 | { | 6492 | { |
@@ -6543,10 +6501,25 @@ static int hpsa_request_irq(struct ctlr_info *h, | |||
6543 | 6501 | ||
6544 | if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { | 6502 | if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { |
6545 | /* If performant mode and MSI-X, use multiple reply queues */ | 6503 | /* If performant mode and MSI-X, use multiple reply queues */ |
6546 | for (i = 0; i < h->msix_vector; i++) | 6504 | for (i = 0; i < h->msix_vector; i++) { |
6547 | rc = request_irq(h->intr[i], msixhandler, | 6505 | rc = request_irq(h->intr[i], msixhandler, |
6548 | 0, h->devname, | 6506 | 0, h->devname, |
6549 | &h->q[i]); | 6507 | &h->q[i]); |
6508 | if (rc) { | ||
6509 | int j; | ||
6510 | |||
6511 | dev_err(&h->pdev->dev, | ||
6512 | "failed to get irq %d for %s\n", | ||
6513 | h->intr[i], h->devname); | ||
6514 | for (j = 0; j < i; j++) { | ||
6515 | free_irq(h->intr[j], &h->q[j]); | ||
6516 | h->q[j] = 0; | ||
6517 | } | ||
6518 | for (; j < MAX_REPLY_QUEUES; j++) | ||
6519 | h->q[j] = 0; | ||
6520 | return rc; | ||
6521 | } | ||
6522 | } | ||
6550 | hpsa_irq_affinity_hints(h); | 6523 | hpsa_irq_affinity_hints(h); |
6551 | } else { | 6524 | } else { |
6552 | /* Use single reply pool */ | 6525 | /* Use single reply pool */ |
@@ -6592,27 +6565,9 @@ static int hpsa_kdump_soft_reset(struct ctlr_info *h) | |||
6592 | return 0; | 6565 | return 0; |
6593 | } | 6566 | } |
6594 | 6567 | ||
6595 | static void free_irqs(struct ctlr_info *h) | ||
6596 | { | ||
6597 | int i; | ||
6598 | |||
6599 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | ||
6600 | /* Single reply queue, only one irq to free */ | ||
6601 | i = h->intr_mode; | ||
6602 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6603 | free_irq(h->intr[i], &h->q[i]); | ||
6604 | return; | ||
6605 | } | ||
6606 | |||
6607 | for (i = 0; i < h->msix_vector; i++) { | ||
6608 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6609 | free_irq(h->intr[i], &h->q[i]); | ||
6610 | } | ||
6611 | } | ||
6612 | |||
6613 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) | 6568 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) |
6614 | { | 6569 | { |
6615 | free_irqs(h); | 6570 | hpsa_free_irqs(h); |
6616 | #ifdef CONFIG_PCI_MSI | 6571 | #ifdef CONFIG_PCI_MSI |
6617 | if (h->msix_vector) { | 6572 | if (h->msix_vector) { |
6618 | if (h->pdev->msix_enabled) | 6573 | if (h->pdev->msix_enabled) |
@@ -6658,16 +6613,20 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
6658 | } | 6613 | } |
6659 | 6614 | ||
6660 | /* Called when controller lockup detected. */ | 6615 | /* Called when controller lockup detected. */ |
6661 | static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) | 6616 | static void fail_all_outstanding_cmds(struct ctlr_info *h) |
6662 | { | 6617 | { |
6663 | struct CommandList *c = NULL; | 6618 | int i, refcount; |
6619 | struct CommandList *c; | ||
6664 | 6620 | ||
6665 | assert_spin_locked(&h->lock); | 6621 | flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ |
6666 | /* Mark all outstanding commands as failed and complete them. */ | 6622 | for (i = 0; i < h->nr_cmds; i++) { |
6667 | while (!list_empty(list)) { | 6623 | c = h->cmd_pool + i; |
6668 | c = list_entry(list->next, struct CommandList, list); | 6624 | refcount = atomic_inc_return(&c->refcount); |
6669 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | 6625 | if (refcount > 1) { |
6670 | finish_cmd(c); | 6626 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; |
6627 | finish_cmd(c); | ||
6628 | } | ||
6629 | cmd_free(h, c); | ||
6671 | } | 6630 | } |
6672 | } | 6631 | } |
6673 | 6632 | ||
@@ -6704,10 +6663,7 @@ static void controller_lockup_detected(struct ctlr_info *h) | |||
6704 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", | 6663 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", |
6705 | lockup_detected); | 6664 | lockup_detected); |
6706 | pci_disable_device(h->pdev); | 6665 | pci_disable_device(h->pdev); |
6707 | spin_lock_irqsave(&h->lock, flags); | 6666 | fail_all_outstanding_cmds(h); |
6708 | fail_all_cmds_on_list(h, &h->cmpQ); | ||
6709 | fail_all_cmds_on_list(h, &h->reqQ); | ||
6710 | spin_unlock_irqrestore(&h->lock, flags); | ||
6711 | } | 6667 | } |
6712 | 6668 | ||
6713 | static void detect_controller_lockup(struct ctlr_info *h) | 6669 | static void detect_controller_lockup(struct ctlr_info *h) |
@@ -6750,8 +6706,8 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) | |||
6750 | int i; | 6706 | int i; |
6751 | char *event_type; | 6707 | char *event_type; |
6752 | 6708 | ||
6753 | /* Clear the driver-requested rescan flag */ | 6709 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
6754 | h->drv_req_rescan = 0; | 6710 | return; |
6755 | 6711 | ||
6756 | /* Ask the controller to clear the events we're handling. */ | 6712 | /* Ask the controller to clear the events we're handling. */ |
6757 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 | 6713 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 |
@@ -6798,9 +6754,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) | |||
6798 | */ | 6754 | */ |
6799 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) | 6755 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) |
6800 | { | 6756 | { |
6801 | if (h->drv_req_rescan) | ||
6802 | return 1; | ||
6803 | |||
6804 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) | 6757 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
6805 | return 0; | 6758 | return 0; |
6806 | 6759 | ||
@@ -6834,34 +6787,58 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h) | |||
6834 | return 0; | 6787 | return 0; |
6835 | } | 6788 | } |
6836 | 6789 | ||
6837 | 6790 | static void hpsa_rescan_ctlr_worker(struct work_struct *work) | |
6838 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) | ||
6839 | { | 6791 | { |
6840 | unsigned long flags; | 6792 | unsigned long flags; |
6841 | struct ctlr_info *h = container_of(to_delayed_work(work), | 6793 | struct ctlr_info *h = container_of(to_delayed_work(work), |
6842 | struct ctlr_info, monitor_ctlr_work); | 6794 | struct ctlr_info, rescan_ctlr_work); |
6843 | detect_controller_lockup(h); | 6795 | |
6844 | if (lockup_detected(h)) | 6796 | |
6797 | if (h->remove_in_progress) | ||
6845 | return; | 6798 | return; |
6846 | 6799 | ||
6847 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { | 6800 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { |
6848 | scsi_host_get(h->scsi_host); | 6801 | scsi_host_get(h->scsi_host); |
6849 | h->drv_req_rescan = 0; | ||
6850 | hpsa_ack_ctlr_events(h); | 6802 | hpsa_ack_ctlr_events(h); |
6851 | hpsa_scan_start(h->scsi_host); | 6803 | hpsa_scan_start(h->scsi_host); |
6852 | scsi_host_put(h->scsi_host); | 6804 | scsi_host_put(h->scsi_host); |
6853 | } | 6805 | } |
6854 | |||
6855 | spin_lock_irqsave(&h->lock, flags); | 6806 | spin_lock_irqsave(&h->lock, flags); |
6856 | if (h->remove_in_progress) { | 6807 | if (!h->remove_in_progress) |
6857 | spin_unlock_irqrestore(&h->lock, flags); | 6808 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, |
6809 | h->heartbeat_sample_interval); | ||
6810 | spin_unlock_irqrestore(&h->lock, flags); | ||
6811 | } | ||
6812 | |||
6813 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) | ||
6814 | { | ||
6815 | unsigned long flags; | ||
6816 | struct ctlr_info *h = container_of(to_delayed_work(work), | ||
6817 | struct ctlr_info, monitor_ctlr_work); | ||
6818 | |||
6819 | detect_controller_lockup(h); | ||
6820 | if (lockup_detected(h)) | ||
6858 | return; | 6821 | return; |
6859 | } | 6822 | |
6860 | schedule_delayed_work(&h->monitor_ctlr_work, | 6823 | spin_lock_irqsave(&h->lock, flags); |
6824 | if (!h->remove_in_progress) | ||
6825 | schedule_delayed_work(&h->monitor_ctlr_work, | ||
6861 | h->heartbeat_sample_interval); | 6826 | h->heartbeat_sample_interval); |
6862 | spin_unlock_irqrestore(&h->lock, flags); | 6827 | spin_unlock_irqrestore(&h->lock, flags); |
6863 | } | 6828 | } |
6864 | 6829 | ||
6830 | static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, | ||
6831 | char *name) | ||
6832 | { | ||
6833 | struct workqueue_struct *wq = NULL; | ||
6834 | |||
6835 | wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); | ||
6836 | if (!wq) | ||
6837 | dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); | ||
6838 | |||
6839 | return wq; | ||
6840 | } | ||
6841 | |||
6865 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 6842 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
6866 | { | 6843 | { |
6867 | int dac, rc; | 6844 | int dac, rc; |
@@ -6898,13 +6875,23 @@ reinit_after_soft_reset: | |||
6898 | 6875 | ||
6899 | h->pdev = pdev; | 6876 | h->pdev = pdev; |
6900 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; | 6877 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
6901 | INIT_LIST_HEAD(&h->cmpQ); | ||
6902 | INIT_LIST_HEAD(&h->reqQ); | ||
6903 | INIT_LIST_HEAD(&h->offline_device_list); | 6878 | INIT_LIST_HEAD(&h->offline_device_list); |
6904 | spin_lock_init(&h->lock); | 6879 | spin_lock_init(&h->lock); |
6905 | spin_lock_init(&h->offline_device_lock); | 6880 | spin_lock_init(&h->offline_device_lock); |
6906 | spin_lock_init(&h->scan_lock); | 6881 | spin_lock_init(&h->scan_lock); |
6907 | spin_lock_init(&h->passthru_count_lock); | 6882 | atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); |
6883 | |||
6884 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); | ||
6885 | if (!h->rescan_ctlr_wq) { | ||
6886 | rc = -ENOMEM; | ||
6887 | goto clean1; | ||
6888 | } | ||
6889 | |||
6890 | h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); | ||
6891 | if (!h->resubmit_wq) { | ||
6892 | rc = -ENOMEM; | ||
6893 | goto clean1; | ||
6894 | } | ||
6908 | 6895 | ||
6909 | /* Allocate and clear per-cpu variable lockup_detected */ | 6896 | /* Allocate and clear per-cpu variable lockup_detected */ |
6910 | h->lockup_detected = alloc_percpu(u32); | 6897 | h->lockup_detected = alloc_percpu(u32); |
@@ -6939,13 +6926,14 @@ reinit_after_soft_reset: | |||
6939 | /* make sure the board interrupts are off */ | 6926 | /* make sure the board interrupts are off */ |
6940 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 6927 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
6941 | 6928 | ||
6942 | if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) | 6929 | if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) |
6943 | goto clean2; | 6930 | goto clean2; |
6944 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", | 6931 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
6945 | h->devname, pdev->device, | 6932 | h->devname, pdev->device, |
6946 | h->intr[h->intr_mode], dac ? "" : " not"); | 6933 | h->intr[h->intr_mode], dac ? "" : " not"); |
6947 | if (hpsa_allocate_cmd_pool(h)) | 6934 | rc = hpsa_allocate_cmd_pool(h); |
6948 | goto clean4; | 6935 | if (rc) |
6936 | goto clean2_and_free_irqs; | ||
6949 | if (hpsa_allocate_sg_chain_blocks(h)) | 6937 | if (hpsa_allocate_sg_chain_blocks(h)) |
6950 | goto clean4; | 6938 | goto clean4; |
6951 | init_waitqueue_head(&h->scan_wait_queue); | 6939 | init_waitqueue_head(&h->scan_wait_queue); |
@@ -6974,12 +6962,12 @@ reinit_after_soft_reset: | |||
6974 | spin_lock_irqsave(&h->lock, flags); | 6962 | spin_lock_irqsave(&h->lock, flags); |
6975 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 6963 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
6976 | spin_unlock_irqrestore(&h->lock, flags); | 6964 | spin_unlock_irqrestore(&h->lock, flags); |
6977 | free_irqs(h); | 6965 | hpsa_free_irqs(h); |
6978 | rc = hpsa_request_irq(h, hpsa_msix_discard_completions, | 6966 | rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, |
6979 | hpsa_intx_discard_completions); | 6967 | hpsa_intx_discard_completions); |
6980 | if (rc) { | 6968 | if (rc) { |
6981 | dev_warn(&h->pdev->dev, "Failed to request_irq after " | 6969 | dev_warn(&h->pdev->dev, |
6982 | "soft reset.\n"); | 6970 | "Failed to request_irq after soft reset.\n"); |
6983 | goto clean4; | 6971 | goto clean4; |
6984 | } | 6972 | } |
6985 | 6973 | ||
@@ -7016,7 +7004,6 @@ reinit_after_soft_reset: | |||
7016 | /* Enable Accelerated IO path at driver layer */ | 7004 | /* Enable Accelerated IO path at driver layer */ |
7017 | h->acciopath_status = 1; | 7005 | h->acciopath_status = 1; |
7018 | 7006 | ||
7019 | h->drv_req_rescan = 0; | ||
7020 | 7007 | ||
7021 | /* Turn the interrupts on so we can service requests */ | 7008 | /* Turn the interrupts on so we can service requests */ |
7022 | h->access.set_intr_mask(h, HPSA_INTR_ON); | 7009 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
@@ -7029,14 +7016,22 @@ reinit_after_soft_reset: | |||
7029 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); | 7016 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); |
7030 | schedule_delayed_work(&h->monitor_ctlr_work, | 7017 | schedule_delayed_work(&h->monitor_ctlr_work, |
7031 | h->heartbeat_sample_interval); | 7018 | h->heartbeat_sample_interval); |
7019 | INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); | ||
7020 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, | ||
7021 | h->heartbeat_sample_interval); | ||
7032 | return 0; | 7022 | return 0; |
7033 | 7023 | ||
7034 | clean4: | 7024 | clean4: |
7035 | hpsa_free_sg_chain_blocks(h); | 7025 | hpsa_free_sg_chain_blocks(h); |
7036 | hpsa_free_cmd_pool(h); | 7026 | hpsa_free_cmd_pool(h); |
7037 | free_irqs(h); | 7027 | clean2_and_free_irqs: |
7028 | hpsa_free_irqs(h); | ||
7038 | clean2: | 7029 | clean2: |
7039 | clean1: | 7030 | clean1: |
7031 | if (h->resubmit_wq) | ||
7032 | destroy_workqueue(h->resubmit_wq); | ||
7033 | if (h->rescan_ctlr_wq) | ||
7034 | destroy_workqueue(h->rescan_ctlr_wq); | ||
7040 | if (h->lockup_detected) | 7035 | if (h->lockup_detected) |
7041 | free_percpu(h->lockup_detected); | 7036 | free_percpu(h->lockup_detected); |
7042 | kfree(h); | 7037 | kfree(h); |
@@ -7055,9 +7050,9 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
7055 | if (!flush_buf) | 7050 | if (!flush_buf) |
7056 | return; | 7051 | return; |
7057 | 7052 | ||
7058 | c = cmd_special_alloc(h); | 7053 | c = cmd_alloc(h); |
7059 | if (!c) { | 7054 | if (!c) { |
7060 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 7055 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
7061 | goto out_of_memory; | 7056 | goto out_of_memory; |
7062 | } | 7057 | } |
7063 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, | 7058 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, |
@@ -7069,7 +7064,7 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
7069 | out: | 7064 | out: |
7070 | dev_warn(&h->pdev->dev, | 7065 | dev_warn(&h->pdev->dev, |
7071 | "error flushing cache on controller\n"); | 7066 | "error flushing cache on controller\n"); |
7072 | cmd_special_free(h, c); | 7067 | cmd_free(h, c); |
7073 | out_of_memory: | 7068 | out_of_memory: |
7074 | kfree(flush_buf); | 7069 | kfree(flush_buf); |
7075 | } | 7070 | } |
@@ -7110,9 +7105,11 @@ static void hpsa_remove_one(struct pci_dev *pdev) | |||
7110 | /* Get rid of any controller monitoring work items */ | 7105 | /* Get rid of any controller monitoring work items */ |
7111 | spin_lock_irqsave(&h->lock, flags); | 7106 | spin_lock_irqsave(&h->lock, flags); |
7112 | h->remove_in_progress = 1; | 7107 | h->remove_in_progress = 1; |
7113 | cancel_delayed_work(&h->monitor_ctlr_work); | ||
7114 | spin_unlock_irqrestore(&h->lock, flags); | 7108 | spin_unlock_irqrestore(&h->lock, flags); |
7115 | 7109 | cancel_delayed_work_sync(&h->monitor_ctlr_work); | |
7110 | cancel_delayed_work_sync(&h->rescan_ctlr_work); | ||
7111 | destroy_workqueue(h->rescan_ctlr_wq); | ||
7112 | destroy_workqueue(h->resubmit_wq); | ||
7116 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ | 7113 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
7117 | hpsa_shutdown(pdev); | 7114 | hpsa_shutdown(pdev); |
7118 | iounmap(h->vaddr); | 7115 | iounmap(h->vaddr); |
@@ -7172,7 +7169,7 @@ static struct pci_driver hpsa_pci_driver = { | |||
7172 | * bits of the command address. | 7169 | * bits of the command address. |
7173 | */ | 7170 | */ |
7174 | static void calc_bucket_map(int bucket[], int num_buckets, | 7171 | static void calc_bucket_map(int bucket[], int num_buckets, |
7175 | int nsgs, int min_blocks, int *bucket_map) | 7172 | int nsgs, int min_blocks, u32 *bucket_map) |
7176 | { | 7173 | { |
7177 | int i, j, b, size; | 7174 | int i, j, b, size; |
7178 | 7175 | ||
@@ -7193,7 +7190,8 @@ static void calc_bucket_map(int bucket[], int num_buckets, | |||
7193 | } | 7190 | } |
7194 | } | 7191 | } |
7195 | 7192 | ||
7196 | static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | 7193 | /* return -ENODEV or other reason on error, 0 on success */ |
7194 | static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | ||
7197 | { | 7195 | { |
7198 | int i; | 7196 | int i; |
7199 | unsigned long register_value; | 7197 | unsigned long register_value; |
@@ -7285,12 +7283,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7285 | } | 7283 | } |
7286 | } | 7284 | } |
7287 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 7285 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
7288 | hpsa_wait_for_mode_change_ack(h); | 7286 | if (hpsa_wait_for_mode_change_ack(h)) { |
7287 | dev_err(&h->pdev->dev, | ||
7288 | "performant mode problem - doorbell timeout\n"); | ||
7289 | return -ENODEV; | ||
7290 | } | ||
7289 | register_value = readl(&(h->cfgtable->TransportActive)); | 7291 | register_value = readl(&(h->cfgtable->TransportActive)); |
7290 | if (!(register_value & CFGTBL_Trans_Performant)) { | 7292 | if (!(register_value & CFGTBL_Trans_Performant)) { |
7291 | dev_warn(&h->pdev->dev, "unable to get board into" | 7293 | dev_err(&h->pdev->dev, |
7292 | " performant mode\n"); | 7294 | "performant mode problem - transport not active\n"); |
7293 | return; | 7295 | return -ENODEV; |
7294 | } | 7296 | } |
7295 | /* Change the access methods to the performant access methods */ | 7297 | /* Change the access methods to the performant access methods */ |
7296 | h->access = access; | 7298 | h->access = access; |
@@ -7298,7 +7300,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7298 | 7300 | ||
7299 | if (!((trans_support & CFGTBL_Trans_io_accel1) || | 7301 | if (!((trans_support & CFGTBL_Trans_io_accel1) || |
7300 | (trans_support & CFGTBL_Trans_io_accel2))) | 7302 | (trans_support & CFGTBL_Trans_io_accel2))) |
7301 | return; | 7303 | return 0; |
7302 | 7304 | ||
7303 | if (trans_support & CFGTBL_Trans_io_accel1) { | 7305 | if (trans_support & CFGTBL_Trans_io_accel1) { |
7304 | /* Set up I/O accelerator mode */ | 7306 | /* Set up I/O accelerator mode */ |
@@ -7328,12 +7330,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7328 | (i * sizeof(struct ErrorInfo))); | 7330 | (i * sizeof(struct ErrorInfo))); |
7329 | cp->err_info_len = sizeof(struct ErrorInfo); | 7331 | cp->err_info_len = sizeof(struct ErrorInfo); |
7330 | cp->sgl_offset = IOACCEL1_SGLOFFSET; | 7332 | cp->sgl_offset = IOACCEL1_SGLOFFSET; |
7331 | cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; | 7333 | cp->host_context_flags = |
7334 | cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); | ||
7332 | cp->timeout_sec = 0; | 7335 | cp->timeout_sec = 0; |
7333 | cp->ReplyQueue = 0; | 7336 | cp->ReplyQueue = 0; |
7334 | cp->tag = | 7337 | cp->tag = |
7335 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) | | 7338 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); |
7336 | DIRECT_LOOKUP_BIT); | ||
7337 | cp->host_addr = | 7339 | cp->host_addr = |
7338 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + | 7340 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + |
7339 | (i * sizeof(struct io_accel1_cmd))); | 7341 | (i * sizeof(struct io_accel1_cmd))); |
@@ -7362,7 +7364,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7362 | writel(bft2[i], &h->ioaccel2_bft2_regs[i]); | 7364 | writel(bft2[i], &h->ioaccel2_bft2_regs[i]); |
7363 | } | 7365 | } |
7364 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 7366 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
7365 | hpsa_wait_for_mode_change_ack(h); | 7367 | if (hpsa_wait_for_mode_change_ack(h)) { |
7368 | dev_err(&h->pdev->dev, | ||
7369 | "performant mode problem - enabling ioaccel mode\n"); | ||
7370 | return -ENODEV; | ||
7371 | } | ||
7372 | return 0; | ||
7366 | } | 7373 | } |
7367 | 7374 | ||
7368 | static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) | 7375 | static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) |
@@ -7508,17 +7515,18 @@ static int is_accelerated_cmd(struct CommandList *c) | |||
7508 | static void hpsa_drain_accel_commands(struct ctlr_info *h) | 7515 | static void hpsa_drain_accel_commands(struct ctlr_info *h) |
7509 | { | 7516 | { |
7510 | struct CommandList *c = NULL; | 7517 | struct CommandList *c = NULL; |
7511 | unsigned long flags; | 7518 | int i, accel_cmds_out; |
7512 | int accel_cmds_out; | 7519 | int refcount; |
7513 | 7520 | ||
7514 | do { /* wait for all outstanding commands to drain out */ | 7521 | do { /* wait for all outstanding ioaccel commands to drain out */ |
7515 | accel_cmds_out = 0; | 7522 | accel_cmds_out = 0; |
7516 | spin_lock_irqsave(&h->lock, flags); | 7523 | for (i = 0; i < h->nr_cmds; i++) { |
7517 | list_for_each_entry(c, &h->cmpQ, list) | 7524 | c = h->cmd_pool + i; |
7518 | accel_cmds_out += is_accelerated_cmd(c); | 7525 | refcount = atomic_inc_return(&c->refcount); |
7519 | list_for_each_entry(c, &h->reqQ, list) | 7526 | if (refcount > 1) /* Command is allocated */ |
7520 | accel_cmds_out += is_accelerated_cmd(c); | 7527 | accel_cmds_out += is_accelerated_cmd(c); |
7521 | spin_unlock_irqrestore(&h->lock, flags); | 7528 | cmd_free(h, c); |
7529 | } | ||
7522 | if (accel_cmds_out <= 0) | 7530 | if (accel_cmds_out <= 0) |
7523 | break; | 7531 | break; |
7524 | msleep(100); | 7532 | msleep(100); |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 8e06d9e280ec..657713050349 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -32,7 +32,6 @@ struct access_method { | |||
32 | void (*submit_command)(struct ctlr_info *h, | 32 | void (*submit_command)(struct ctlr_info *h, |
33 | struct CommandList *c); | 33 | struct CommandList *c); |
34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); | 34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); |
35 | unsigned long (*fifo_full)(struct ctlr_info *h); | ||
36 | bool (*intr_pending)(struct ctlr_info *h); | 35 | bool (*intr_pending)(struct ctlr_info *h); |
37 | unsigned long (*command_completed)(struct ctlr_info *h, u8 q); | 36 | unsigned long (*command_completed)(struct ctlr_info *h, u8 q); |
38 | }; | 37 | }; |
@@ -47,6 +46,11 @@ struct hpsa_scsi_dev_t { | |||
47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ | 46 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ |
48 | unsigned char raid_level; /* from inquiry page 0xC1 */ | 47 | unsigned char raid_level; /* from inquiry page 0xC1 */ |
49 | unsigned char volume_offline; /* discovered via TUR or VPD */ | 48 | unsigned char volume_offline; /* discovered via TUR or VPD */ |
49 | u16 queue_depth; /* max queue_depth for this device */ | ||
50 | atomic_t ioaccel_cmds_out; /* Only used for physical devices | ||
51 | * counts commands sent to physical | ||
52 | * device via "ioaccel" path. | ||
53 | */ | ||
50 | u32 ioaccel_handle; | 54 | u32 ioaccel_handle; |
51 | int offload_config; /* I/O accel RAID offload configured */ | 55 | int offload_config; /* I/O accel RAID offload configured */ |
52 | int offload_enabled; /* I/O accel RAID offload enabled */ | 56 | int offload_enabled; /* I/O accel RAID offload enabled */ |
@@ -55,6 +59,15 @@ struct hpsa_scsi_dev_t { | |||
55 | */ | 59 | */ |
56 | struct raid_map_data raid_map; /* I/O accelerator RAID map */ | 60 | struct raid_map_data raid_map; /* I/O accelerator RAID map */ |
57 | 61 | ||
62 | /* | ||
63 | * Pointers from logical drive map indices to the phys drives that | ||
64 | * make those logical drives. Note, multiple logical drives may | ||
65 | * share physical drives. You can have for instance 5 physical | ||
66 | * drives with 3 logical drives each using those same 5 physical | ||
67 | * disks. We need these pointers for counting i/o's out to physical | ||
68 | * devices in order to honor physical device queue depth limits. | ||
69 | */ | ||
70 | struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; | ||
58 | }; | 71 | }; |
59 | 72 | ||
60 | struct reply_queue_buffer { | 73 | struct reply_queue_buffer { |
@@ -115,9 +128,12 @@ struct ctlr_info { | |||
115 | void __iomem *vaddr; | 128 | void __iomem *vaddr; |
116 | unsigned long paddr; | 129 | unsigned long paddr; |
117 | int nr_cmds; /* Number of commands allowed on this controller */ | 130 | int nr_cmds; /* Number of commands allowed on this controller */ |
131 | #define HPSA_CMDS_RESERVED_FOR_ABORTS 2 | ||
132 | #define HPSA_CMDS_RESERVED_FOR_DRIVER 1 | ||
118 | struct CfgTable __iomem *cfgtable; | 133 | struct CfgTable __iomem *cfgtable; |
119 | int interrupts_enabled; | 134 | int interrupts_enabled; |
120 | int max_commands; | 135 | int max_commands; |
136 | int last_allocation; | ||
121 | atomic_t commands_outstanding; | 137 | atomic_t commands_outstanding; |
122 | # define PERF_MODE_INT 0 | 138 | # define PERF_MODE_INT 0 |
123 | # define DOORBELL_INT 1 | 139 | # define DOORBELL_INT 1 |
@@ -131,8 +147,6 @@ struct ctlr_info { | |||
131 | char hba_mode_enabled; | 147 | char hba_mode_enabled; |
132 | 148 | ||
133 | /* queue and queue Info */ | 149 | /* queue and queue Info */ |
134 | struct list_head reqQ; | ||
135 | struct list_head cmpQ; | ||
136 | unsigned int Qdepth; | 150 | unsigned int Qdepth; |
137 | unsigned int maxSG; | 151 | unsigned int maxSG; |
138 | spinlock_t lock; | 152 | spinlock_t lock; |
@@ -168,9 +182,8 @@ struct ctlr_info { | |||
168 | unsigned long transMethod; | 182 | unsigned long transMethod; |
169 | 183 | ||
170 | /* cap concurrent passthrus at some reasonable maximum */ | 184 | /* cap concurrent passthrus at some reasonable maximum */ |
171 | #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) | 185 | #define HPSA_MAX_CONCURRENT_PASSTHRUS (10) |
172 | spinlock_t passthru_count_lock; /* protects passthru_count */ | 186 | atomic_t passthru_cmds_avail; |
173 | int passthru_count; | ||
174 | 187 | ||
175 | /* | 188 | /* |
176 | * Performant mode completion buffers | 189 | * Performant mode completion buffers |
@@ -194,8 +207,8 @@ struct ctlr_info { | |||
194 | atomic_t firmware_flash_in_progress; | 207 | atomic_t firmware_flash_in_progress; |
195 | u32 __percpu *lockup_detected; | 208 | u32 __percpu *lockup_detected; |
196 | struct delayed_work monitor_ctlr_work; | 209 | struct delayed_work monitor_ctlr_work; |
210 | struct delayed_work rescan_ctlr_work; | ||
197 | int remove_in_progress; | 211 | int remove_in_progress; |
198 | u32 fifo_recently_full; | ||
199 | /* Address of h->q[x] is passed to intr handler to know which queue */ | 212 | /* Address of h->q[x] is passed to intr handler to know which queue */ |
200 | u8 q[MAX_REPLY_QUEUES]; | 213 | u8 q[MAX_REPLY_QUEUES]; |
201 | u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ | 214 | u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ |
@@ -237,8 +250,9 @@ struct ctlr_info { | |||
237 | spinlock_t offline_device_lock; | 250 | spinlock_t offline_device_lock; |
238 | struct list_head offline_device_list; | 251 | struct list_head offline_device_list; |
239 | int acciopath_status; | 252 | int acciopath_status; |
240 | int drv_req_rescan; /* flag for driver to request rescan event */ | ||
241 | int raid_offload_debug; | 253 | int raid_offload_debug; |
254 | struct workqueue_struct *resubmit_wq; | ||
255 | struct workqueue_struct *rescan_ctlr_wq; | ||
242 | }; | 256 | }; |
243 | 257 | ||
244 | struct offline_device_entry { | 258 | struct offline_device_entry { |
@@ -297,6 +311,8 @@ struct offline_device_entry { | |||
297 | */ | 311 | */ |
298 | #define SA5_DOORBELL 0x20 | 312 | #define SA5_DOORBELL 0x20 |
299 | #define SA5_REQUEST_PORT_OFFSET 0x40 | 313 | #define SA5_REQUEST_PORT_OFFSET 0x40 |
314 | #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0 | ||
315 | #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4 | ||
300 | #define SA5_REPLY_INTR_MASK_OFFSET 0x34 | 316 | #define SA5_REPLY_INTR_MASK_OFFSET 0x34 |
301 | #define SA5_REPLY_PORT_OFFSET 0x44 | 317 | #define SA5_REPLY_PORT_OFFSET 0x44 |
302 | #define SA5_INTR_STATUS 0x30 | 318 | #define SA5_INTR_STATUS 0x30 |
@@ -353,10 +369,7 @@ static void SA5_submit_command_no_read(struct ctlr_info *h, | |||
353 | static void SA5_submit_command_ioaccel2(struct ctlr_info *h, | 369 | static void SA5_submit_command_ioaccel2(struct ctlr_info *h, |
354 | struct CommandList *c) | 370 | struct CommandList *c) |
355 | { | 371 | { |
356 | if (c->cmd_type == CMD_IOACCEL2) | 372 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
357 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); | ||
358 | else | ||
359 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
360 | } | 373 | } |
361 | 374 | ||
362 | /* | 375 | /* |
@@ -398,19 +411,19 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) | |||
398 | unsigned long register_value = FIFO_EMPTY; | 411 | unsigned long register_value = FIFO_EMPTY; |
399 | 412 | ||
400 | /* msi auto clears the interrupt pending bit. */ | 413 | /* msi auto clears the interrupt pending bit. */ |
401 | if (!(h->msi_vector || h->msix_vector)) { | 414 | if (unlikely(!(h->msi_vector || h->msix_vector))) { |
402 | /* flush the controller write of the reply queue by reading | 415 | /* flush the controller write of the reply queue by reading |
403 | * outbound doorbell status register. | 416 | * outbound doorbell status register. |
404 | */ | 417 | */ |
405 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 418 | (void) readl(h->vaddr + SA5_OUTDB_STATUS); |
406 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); | 419 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); |
407 | /* Do a read in order to flush the write to the controller | 420 | /* Do a read in order to flush the write to the controller |
408 | * (as per spec.) | 421 | * (as per spec.) |
409 | */ | 422 | */ |
410 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 423 | (void) readl(h->vaddr + SA5_OUTDB_STATUS); |
411 | } | 424 | } |
412 | 425 | ||
413 | if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { | 426 | if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { |
414 | register_value = rq->head[rq->current_entry]; | 427 | register_value = rq->head[rq->current_entry]; |
415 | rq->current_entry++; | 428 | rq->current_entry++; |
416 | atomic_dec(&h->commands_outstanding); | 429 | atomic_dec(&h->commands_outstanding); |
@@ -426,14 +439,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) | |||
426 | } | 439 | } |
427 | 440 | ||
428 | /* | 441 | /* |
429 | * Returns true if fifo is full. | ||
430 | * | ||
431 | */ | ||
432 | static unsigned long SA5_fifo_full(struct ctlr_info *h) | ||
433 | { | ||
434 | return atomic_read(&h->commands_outstanding) >= h->max_commands; | ||
435 | } | ||
436 | /* | ||
437 | * returns value read from hardware. | 442 | * returns value read from hardware. |
438 | * returns FIFO_EMPTY if there is nothing to read | 443 | * returns FIFO_EMPTY if there is nothing to read |
439 | */ | 444 | */ |
@@ -473,9 +478,6 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h) | |||
473 | if (!register_value) | 478 | if (!register_value) |
474 | return false; | 479 | return false; |
475 | 480 | ||
476 | if (h->msi_vector || h->msix_vector) | ||
477 | return true; | ||
478 | |||
479 | /* Read outbound doorbell to flush */ | 481 | /* Read outbound doorbell to flush */ |
480 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 482 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); |
481 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; | 483 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; |
@@ -525,7 +527,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) | |||
525 | static struct access_method SA5_access = { | 527 | static struct access_method SA5_access = { |
526 | SA5_submit_command, | 528 | SA5_submit_command, |
527 | SA5_intr_mask, | 529 | SA5_intr_mask, |
528 | SA5_fifo_full, | ||
529 | SA5_intr_pending, | 530 | SA5_intr_pending, |
530 | SA5_completed, | 531 | SA5_completed, |
531 | }; | 532 | }; |
@@ -533,7 +534,6 @@ static struct access_method SA5_access = { | |||
533 | static struct access_method SA5_ioaccel_mode1_access = { | 534 | static struct access_method SA5_ioaccel_mode1_access = { |
534 | SA5_submit_command, | 535 | SA5_submit_command, |
535 | SA5_performant_intr_mask, | 536 | SA5_performant_intr_mask, |
536 | SA5_fifo_full, | ||
537 | SA5_ioaccel_mode1_intr_pending, | 537 | SA5_ioaccel_mode1_intr_pending, |
538 | SA5_ioaccel_mode1_completed, | 538 | SA5_ioaccel_mode1_completed, |
539 | }; | 539 | }; |
@@ -541,7 +541,6 @@ static struct access_method SA5_ioaccel_mode1_access = { | |||
541 | static struct access_method SA5_ioaccel_mode2_access = { | 541 | static struct access_method SA5_ioaccel_mode2_access = { |
542 | SA5_submit_command_ioaccel2, | 542 | SA5_submit_command_ioaccel2, |
543 | SA5_performant_intr_mask, | 543 | SA5_performant_intr_mask, |
544 | SA5_fifo_full, | ||
545 | SA5_performant_intr_pending, | 544 | SA5_performant_intr_pending, |
546 | SA5_performant_completed, | 545 | SA5_performant_completed, |
547 | }; | 546 | }; |
@@ -549,7 +548,6 @@ static struct access_method SA5_ioaccel_mode2_access = { | |||
549 | static struct access_method SA5_performant_access = { | 548 | static struct access_method SA5_performant_access = { |
550 | SA5_submit_command, | 549 | SA5_submit_command, |
551 | SA5_performant_intr_mask, | 550 | SA5_performant_intr_mask, |
552 | SA5_fifo_full, | ||
553 | SA5_performant_intr_pending, | 551 | SA5_performant_intr_pending, |
554 | SA5_performant_completed, | 552 | SA5_performant_completed, |
555 | }; | 553 | }; |
@@ -557,7 +555,6 @@ static struct access_method SA5_performant_access = { | |||
557 | static struct access_method SA5_performant_access_no_read = { | 555 | static struct access_method SA5_performant_access_no_read = { |
558 | SA5_submit_command_no_read, | 556 | SA5_submit_command_no_read, |
559 | SA5_performant_intr_mask, | 557 | SA5_performant_intr_mask, |
560 | SA5_fifo_full, | ||
561 | SA5_performant_intr_pending, | 558 | SA5_performant_intr_pending, |
562 | SA5_performant_completed, | 559 | SA5_performant_completed, |
563 | }; | 560 | }; |
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index cb988c41cad9..3a621c74b76f 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
@@ -206,27 +206,27 @@ struct raid_map_disk_data { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | struct raid_map_data { | 208 | struct raid_map_data { |
209 | u32 structure_size; /* Size of entire structure in bytes */ | 209 | __le32 structure_size; /* Size of entire structure in bytes */ |
210 | u32 volume_blk_size; /* bytes / block in the volume */ | 210 | __le32 volume_blk_size; /* bytes / block in the volume */ |
211 | u64 volume_blk_cnt; /* logical blocks on the volume */ | 211 | __le64 volume_blk_cnt; /* logical blocks on the volume */ |
212 | u8 phys_blk_shift; /* Shift factor to convert between | 212 | u8 phys_blk_shift; /* Shift factor to convert between |
213 | * units of logical blocks and physical | 213 | * units of logical blocks and physical |
214 | * disk blocks */ | 214 | * disk blocks */ |
215 | u8 parity_rotation_shift; /* Shift factor to convert between units | 215 | u8 parity_rotation_shift; /* Shift factor to convert between units |
216 | * of logical stripes and physical | 216 | * of logical stripes and physical |
217 | * stripes */ | 217 | * stripes */ |
218 | u16 strip_size; /* blocks used on each disk / stripe */ | 218 | __le16 strip_size; /* blocks used on each disk / stripe */ |
219 | u64 disk_starting_blk; /* First disk block used in volume */ | 219 | __le64 disk_starting_blk; /* First disk block used in volume */ |
220 | u64 disk_blk_cnt; /* disk blocks used by volume / disk */ | 220 | __le64 disk_blk_cnt; /* disk blocks used by volume / disk */ |
221 | u16 data_disks_per_row; /* data disk entries / row in the map */ | 221 | __le16 data_disks_per_row; /* data disk entries / row in the map */ |
222 | u16 metadata_disks_per_row; /* mirror/parity disk entries / row | 222 | __le16 metadata_disks_per_row;/* mirror/parity disk entries / row |
223 | * in the map */ | 223 | * in the map */ |
224 | u16 row_cnt; /* rows in each layout map */ | 224 | __le16 row_cnt; /* rows in each layout map */ |
225 | u16 layout_map_count; /* layout maps (1 map per mirror/parity | 225 | __le16 layout_map_count; /* layout maps (1 map per mirror/parity |
226 | * group) */ | 226 | * group) */ |
227 | u16 flags; /* Bit 0 set if encryption enabled */ | 227 | __le16 flags; /* Bit 0 set if encryption enabled */ |
228 | #define RAID_MAP_FLAG_ENCRYPT_ON 0x01 | 228 | #define RAID_MAP_FLAG_ENCRYPT_ON 0x01 |
229 | u16 dekindex; /* Data encryption key index. */ | 229 | __le16 dekindex; /* Data encryption key index. */ |
230 | u8 reserved[16]; | 230 | u8 reserved[16]; |
231 | struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; | 231 | struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; |
232 | }; | 232 | }; |
@@ -240,6 +240,10 @@ struct ReportLUNdata { | |||
240 | 240 | ||
241 | struct ext_report_lun_entry { | 241 | struct ext_report_lun_entry { |
242 | u8 lunid[8]; | 242 | u8 lunid[8]; |
243 | #define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F) | ||
244 | #define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) | ||
245 | #define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \ | ||
246 | GET_BMIC_LEVEL_TWO_TARGET((lunid))) | ||
243 | u8 wwid[8]; | 247 | u8 wwid[8]; |
244 | u8 device_type; | 248 | u8 device_type; |
245 | u8 device_flags; | 249 | u8 device_flags; |
@@ -268,6 +272,7 @@ struct SenseSubsystem_info { | |||
268 | #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ | 272 | #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ |
269 | #define BMIC_FLASH_FIRMWARE 0xF7 | 273 | #define BMIC_FLASH_FIRMWARE 0xF7 |
270 | #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 | 274 | #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 |
275 | #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 | ||
271 | 276 | ||
272 | /* Command List Structure */ | 277 | /* Command List Structure */ |
273 | union SCSI3Addr { | 278 | union SCSI3Addr { |
@@ -313,8 +318,8 @@ union LUNAddr { | |||
313 | struct CommandListHeader { | 318 | struct CommandListHeader { |
314 | u8 ReplyQueue; | 319 | u8 ReplyQueue; |
315 | u8 SGList; | 320 | u8 SGList; |
316 | u16 SGTotal; | 321 | __le16 SGTotal; |
317 | u64 tag; | 322 | __le64 tag; |
318 | union LUNAddr LUN; | 323 | union LUNAddr LUN; |
319 | }; | 324 | }; |
320 | 325 | ||
@@ -338,14 +343,14 @@ struct RequestBlock { | |||
338 | }; | 343 | }; |
339 | 344 | ||
340 | struct ErrDescriptor { | 345 | struct ErrDescriptor { |
341 | u64 Addr; | 346 | __le64 Addr; |
342 | u32 Len; | 347 | __le32 Len; |
343 | }; | 348 | }; |
344 | 349 | ||
345 | struct SGDescriptor { | 350 | struct SGDescriptor { |
346 | u64 Addr; | 351 | __le64 Addr; |
347 | u32 Len; | 352 | __le32 Len; |
348 | u32 Ext; | 353 | __le32 Ext; |
349 | }; | 354 | }; |
350 | 355 | ||
351 | union MoreErrInfo { | 356 | union MoreErrInfo { |
@@ -375,22 +380,19 @@ struct ErrorInfo { | |||
375 | #define CMD_IOACCEL1 0x04 | 380 | #define CMD_IOACCEL1 0x04 |
376 | #define CMD_IOACCEL2 0x05 | 381 | #define CMD_IOACCEL2 0x05 |
377 | 382 | ||
378 | #define DIRECT_LOOKUP_SHIFT 5 | 383 | #define DIRECT_LOOKUP_SHIFT 4 |
379 | #define DIRECT_LOOKUP_BIT 0x10 | ||
380 | #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) | 384 | #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) |
381 | 385 | ||
382 | #define HPSA_ERROR_BIT 0x02 | 386 | #define HPSA_ERROR_BIT 0x02 |
383 | struct ctlr_info; /* defined in hpsa.h */ | 387 | struct ctlr_info; /* defined in hpsa.h */ |
384 | /* The size of this structure needs to be divisible by 32 | 388 | /* The size of this structure needs to be divisible by 128 |
385 | * on all architectures because low 5 bits of the addresses | 389 | * on all architectures. The low 4 bits of the addresses |
386 | * are used as follows: | 390 | * are used as follows: |
387 | * | 391 | * |
388 | * bit 0: to device, used to indicate "performant mode" command | 392 | * bit 0: to device, used to indicate "performant mode" command |
389 | * from device, indidcates error status. | 393 | * from device, indidcates error status. |
390 | * bit 1-3: to device, indicates block fetch table entry for | 394 | * bit 1-3: to device, indicates block fetch table entry for |
391 | * reducing DMA in fetching commands from host memory. | 395 | * reducing DMA in fetching commands from host memory. |
392 | * bit 4: used to indicate whether tag is "direct lookup" (index), | ||
393 | * or a bus address. | ||
394 | */ | 396 | */ |
395 | 397 | ||
396 | #define COMMANDLIST_ALIGNMENT 128 | 398 | #define COMMANDLIST_ALIGNMENT 128 |
@@ -405,9 +407,21 @@ struct CommandList { | |||
405 | struct ctlr_info *h; | 407 | struct ctlr_info *h; |
406 | int cmd_type; | 408 | int cmd_type; |
407 | long cmdindex; | 409 | long cmdindex; |
408 | struct list_head list; | ||
409 | struct completion *waiting; | 410 | struct completion *waiting; |
410 | void *scsi_cmd; | 411 | struct scsi_cmnd *scsi_cmd; |
412 | struct work_struct work; | ||
413 | |||
414 | /* | ||
415 | * For commands using either of the two "ioaccel" paths to | ||
416 | * bypass the RAID stack and go directly to the physical disk | ||
417 | * phys_disk is a pointer to the hpsa_scsi_dev_t to which the | ||
418 | * i/o is destined. We need to store that here because the command | ||
419 | * may potentially encounter TASK SET FULL and need to be resubmitted | ||
420 | * For "normal" i/o's not using the "ioaccel" paths, phys_disk is | ||
421 | * not used. | ||
422 | */ | ||
423 | struct hpsa_scsi_dev_t *phys_disk; | ||
424 | atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */ | ||
411 | } __aligned(COMMANDLIST_ALIGNMENT); | 425 | } __aligned(COMMANDLIST_ALIGNMENT); |
412 | 426 | ||
413 | /* Max S/G elements in I/O accelerator command */ | 427 | /* Max S/G elements in I/O accelerator command */ |
@@ -420,7 +434,7 @@ struct CommandList { | |||
420 | */ | 434 | */ |
421 | #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 | 435 | #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 |
422 | struct io_accel1_cmd { | 436 | struct io_accel1_cmd { |
423 | u16 dev_handle; /* 0x00 - 0x01 */ | 437 | __le16 dev_handle; /* 0x00 - 0x01 */ |
424 | u8 reserved1; /* 0x02 */ | 438 | u8 reserved1; /* 0x02 */ |
425 | u8 function; /* 0x03 */ | 439 | u8 function; /* 0x03 */ |
426 | u8 reserved2[8]; /* 0x04 - 0x0B */ | 440 | u8 reserved2[8]; /* 0x04 - 0x0B */ |
@@ -430,20 +444,20 @@ struct io_accel1_cmd { | |||
430 | u8 reserved4; /* 0x13 */ | 444 | u8 reserved4; /* 0x13 */ |
431 | u8 sgl_offset; /* 0x14 */ | 445 | u8 sgl_offset; /* 0x14 */ |
432 | u8 reserved5[7]; /* 0x15 - 0x1B */ | 446 | u8 reserved5[7]; /* 0x15 - 0x1B */ |
433 | u32 transfer_len; /* 0x1C - 0x1F */ | 447 | __le32 transfer_len; /* 0x1C - 0x1F */ |
434 | u8 reserved6[4]; /* 0x20 - 0x23 */ | 448 | u8 reserved6[4]; /* 0x20 - 0x23 */ |
435 | u16 io_flags; /* 0x24 - 0x25 */ | 449 | __le16 io_flags; /* 0x24 - 0x25 */ |
436 | u8 reserved7[14]; /* 0x26 - 0x33 */ | 450 | u8 reserved7[14]; /* 0x26 - 0x33 */ |
437 | u8 LUN[8]; /* 0x34 - 0x3B */ | 451 | u8 LUN[8]; /* 0x34 - 0x3B */ |
438 | u32 control; /* 0x3C - 0x3F */ | 452 | __le32 control; /* 0x3C - 0x3F */ |
439 | u8 CDB[16]; /* 0x40 - 0x4F */ | 453 | u8 CDB[16]; /* 0x40 - 0x4F */ |
440 | u8 reserved8[16]; /* 0x50 - 0x5F */ | 454 | u8 reserved8[16]; /* 0x50 - 0x5F */ |
441 | u16 host_context_flags; /* 0x60 - 0x61 */ | 455 | __le16 host_context_flags; /* 0x60 - 0x61 */ |
442 | u16 timeout_sec; /* 0x62 - 0x63 */ | 456 | __le16 timeout_sec; /* 0x62 - 0x63 */ |
443 | u8 ReplyQueue; /* 0x64 */ | 457 | u8 ReplyQueue; /* 0x64 */ |
444 | u8 reserved9[3]; /* 0x65 - 0x67 */ | 458 | u8 reserved9[3]; /* 0x65 - 0x67 */ |
445 | u64 tag; /* 0x68 - 0x6F */ | 459 | __le64 tag; /* 0x68 - 0x6F */ |
446 | u64 host_addr; /* 0x70 - 0x77 */ | 460 | __le64 host_addr; /* 0x70 - 0x77 */ |
447 | u8 CISS_LUN[8]; /* 0x78 - 0x7F */ | 461 | u8 CISS_LUN[8]; /* 0x78 - 0x7F */ |
448 | struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; | 462 | struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; |
449 | } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); | 463 | } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); |
@@ -470,8 +484,8 @@ struct io_accel1_cmd { | |||
470 | #define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 | 484 | #define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 |
471 | 485 | ||
472 | struct ioaccel2_sg_element { | 486 | struct ioaccel2_sg_element { |
473 | u64 address; | 487 | __le64 address; |
474 | u32 length; | 488 | __le32 length; |
475 | u8 reserved[3]; | 489 | u8 reserved[3]; |
476 | u8 chain_indicator; | 490 | u8 chain_indicator; |
477 | #define IOACCEL2_CHAIN 0x80 | 491 | #define IOACCEL2_CHAIN 0x80 |
@@ -526,20 +540,20 @@ struct io_accel2_cmd { | |||
526 | /* 0=off, 1=on */ | 540 | /* 0=off, 1=on */ |
527 | u8 reply_queue; /* Reply Queue ID */ | 541 | u8 reply_queue; /* Reply Queue ID */ |
528 | u8 reserved1; /* Reserved */ | 542 | u8 reserved1; /* Reserved */ |
529 | u32 scsi_nexus; /* Device Handle */ | 543 | __le32 scsi_nexus; /* Device Handle */ |
530 | u32 Tag; /* cciss tag, lower 4 bytes only */ | 544 | __le32 Tag; /* cciss tag, lower 4 bytes only */ |
531 | u32 tweak_lower; /* Encryption tweak, lower 4 bytes */ | 545 | __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */ |
532 | u8 cdb[16]; /* SCSI Command Descriptor Block */ | 546 | u8 cdb[16]; /* SCSI Command Descriptor Block */ |
533 | u8 cciss_lun[8]; /* 8 byte SCSI address */ | 547 | u8 cciss_lun[8]; /* 8 byte SCSI address */ |
534 | u32 data_len; /* Total bytes to transfer */ | 548 | __le32 data_len; /* Total bytes to transfer */ |
535 | u8 cmd_priority_task_attr; /* priority and task attrs */ | 549 | u8 cmd_priority_task_attr; /* priority and task attrs */ |
536 | #define IOACCEL2_PRIORITY_MASK 0x78 | 550 | #define IOACCEL2_PRIORITY_MASK 0x78 |
537 | #define IOACCEL2_ATTR_MASK 0x07 | 551 | #define IOACCEL2_ATTR_MASK 0x07 |
538 | u8 sg_count; /* Number of sg elements */ | 552 | u8 sg_count; /* Number of sg elements */ |
539 | u16 dekindex; /* Data encryption key index */ | 553 | __le16 dekindex; /* Data encryption key index */ |
540 | u64 err_ptr; /* Error Pointer */ | 554 | __le64 err_ptr; /* Error Pointer */ |
541 | u32 err_len; /* Error Length*/ | 555 | __le32 err_len; /* Error Length*/ |
542 | u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ | 556 | __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ |
543 | struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; | 557 | struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; |
544 | struct io_accel2_scsi_response error_data; | 558 | struct io_accel2_scsi_response error_data; |
545 | } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); | 559 | } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); |
@@ -563,18 +577,18 @@ struct hpsa_tmf_struct { | |||
563 | u8 reserved1; /* byte 3 Reserved */ | 577 | u8 reserved1; /* byte 3 Reserved */ |
564 | u32 it_nexus; /* SCSI I-T Nexus */ | 578 | u32 it_nexus; /* SCSI I-T Nexus */ |
565 | u8 lun_id[8]; /* LUN ID for TMF request */ | 579 | u8 lun_id[8]; /* LUN ID for TMF request */ |
566 | u64 tag; /* cciss tag associated w/ request */ | 580 | __le64 tag; /* cciss tag associated w/ request */ |
567 | u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */ | 581 | __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ |
568 | u64 error_ptr; /* Error Pointer */ | 582 | __le64 error_ptr; /* Error Pointer */ |
569 | u32 error_len; /* Error Length */ | 583 | __le32 error_len; /* Error Length */ |
570 | }; | 584 | }; |
571 | 585 | ||
572 | /* Configuration Table Structure */ | 586 | /* Configuration Table Structure */ |
573 | struct HostWrite { | 587 | struct HostWrite { |
574 | u32 TransportRequest; | 588 | __le32 TransportRequest; |
575 | u32 command_pool_addr_hi; | 589 | __le32 command_pool_addr_hi; |
576 | u32 CoalIntDelay; | 590 | __le32 CoalIntDelay; |
577 | u32 CoalIntCount; | 591 | __le32 CoalIntCount; |
578 | }; | 592 | }; |
579 | 593 | ||
580 | #define SIMPLE_MODE 0x02 | 594 | #define SIMPLE_MODE 0x02 |
@@ -585,54 +599,54 @@ struct HostWrite { | |||
585 | #define DRIVER_SUPPORT_UA_ENABLE 0x00000001 | 599 | #define DRIVER_SUPPORT_UA_ENABLE 0x00000001 |
586 | 600 | ||
587 | struct CfgTable { | 601 | struct CfgTable { |
588 | u8 Signature[4]; | 602 | u8 Signature[4]; |
589 | u32 SpecValence; | 603 | __le32 SpecValence; |
590 | u32 TransportSupport; | 604 | __le32 TransportSupport; |
591 | u32 TransportActive; | 605 | __le32 TransportActive; |
592 | struct HostWrite HostWrite; | 606 | struct HostWrite HostWrite; |
593 | u32 CmdsOutMax; | 607 | __le32 CmdsOutMax; |
594 | u32 BusTypes; | 608 | __le32 BusTypes; |
595 | u32 TransMethodOffset; | 609 | __le32 TransMethodOffset; |
596 | u8 ServerName[16]; | 610 | u8 ServerName[16]; |
597 | u32 HeartBeat; | 611 | __le32 HeartBeat; |
598 | u32 driver_support; | 612 | __le32 driver_support; |
599 | #define ENABLE_SCSI_PREFETCH 0x100 | 613 | #define ENABLE_SCSI_PREFETCH 0x100 |
600 | #define ENABLE_UNIT_ATTN 0x01 | 614 | #define ENABLE_UNIT_ATTN 0x01 |
601 | u32 MaxScatterGatherElements; | 615 | __le32 MaxScatterGatherElements; |
602 | u32 MaxLogicalUnits; | 616 | __le32 MaxLogicalUnits; |
603 | u32 MaxPhysicalDevices; | 617 | __le32 MaxPhysicalDevices; |
604 | u32 MaxPhysicalDrivesPerLogicalUnit; | 618 | __le32 MaxPhysicalDrivesPerLogicalUnit; |
605 | u32 MaxPerformantModeCommands; | 619 | __le32 MaxPerformantModeCommands; |
606 | u32 MaxBlockFetch; | 620 | __le32 MaxBlockFetch; |
607 | u32 PowerConservationSupport; | 621 | __le32 PowerConservationSupport; |
608 | u32 PowerConservationEnable; | 622 | __le32 PowerConservationEnable; |
609 | u32 TMFSupportFlags; | 623 | __le32 TMFSupportFlags; |
610 | u8 TMFTagMask[8]; | 624 | u8 TMFTagMask[8]; |
611 | u8 reserved[0x78 - 0x70]; | 625 | u8 reserved[0x78 - 0x70]; |
612 | u32 misc_fw_support; /* offset 0x78 */ | 626 | __le32 misc_fw_support; /* offset 0x78 */ |
613 | #define MISC_FW_DOORBELL_RESET (0x02) | 627 | #define MISC_FW_DOORBELL_RESET 0x02 |
614 | #define MISC_FW_DOORBELL_RESET2 (0x010) | 628 | #define MISC_FW_DOORBELL_RESET2 0x010 |
615 | #define MISC_FW_RAID_OFFLOAD_BASIC (0x020) | 629 | #define MISC_FW_RAID_OFFLOAD_BASIC 0x020 |
616 | #define MISC_FW_EVENT_NOTIFY (0x080) | 630 | #define MISC_FW_EVENT_NOTIFY 0x080 |
617 | u8 driver_version[32]; | 631 | u8 driver_version[32]; |
618 | u32 max_cached_write_size; | 632 | __le32 max_cached_write_size; |
619 | u8 driver_scratchpad[16]; | 633 | u8 driver_scratchpad[16]; |
620 | u32 max_error_info_length; | 634 | __le32 max_error_info_length; |
621 | u32 io_accel_max_embedded_sg_count; | 635 | __le32 io_accel_max_embedded_sg_count; |
622 | u32 io_accel_request_size_offset; | 636 | __le32 io_accel_request_size_offset; |
623 | u32 event_notify; | 637 | __le32 event_notify; |
624 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) | 638 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) |
625 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) | 639 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) |
626 | u32 clear_event_notify; | 640 | __le32 clear_event_notify; |
627 | }; | 641 | }; |
628 | 642 | ||
629 | #define NUM_BLOCKFETCH_ENTRIES 8 | 643 | #define NUM_BLOCKFETCH_ENTRIES 8 |
630 | struct TransTable_struct { | 644 | struct TransTable_struct { |
631 | u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; | 645 | __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; |
632 | u32 RepQSize; | 646 | __le32 RepQSize; |
633 | u32 RepQCount; | 647 | __le32 RepQCount; |
634 | u32 RepQCtrAddrLow32; | 648 | __le32 RepQCtrAddrLow32; |
635 | u32 RepQCtrAddrHigh32; | 649 | __le32 RepQCtrAddrHigh32; |
636 | #define MAX_REPLY_QUEUES 64 | 650 | #define MAX_REPLY_QUEUES 64 |
637 | struct vals32 RepQAddr[MAX_REPLY_QUEUES]; | 651 | struct vals32 RepQAddr[MAX_REPLY_QUEUES]; |
638 | }; | 652 | }; |
@@ -644,5 +658,137 @@ struct hpsa_pci_info { | |||
644 | u32 board_id; | 658 | u32 board_id; |
645 | }; | 659 | }; |
646 | 660 | ||
661 | struct bmic_identify_physical_device { | ||
662 | u8 scsi_bus; /* SCSI Bus number on controller */ | ||
663 | u8 scsi_id; /* SCSI ID on this bus */ | ||
664 | __le16 block_size; /* sector size in bytes */ | ||
665 | __le32 total_blocks; /* number for sectors on drive */ | ||
666 | __le32 reserved_blocks; /* controller reserved (RIS) */ | ||
667 | u8 model[40]; /* Physical Drive Model */ | ||
668 | u8 serial_number[40]; /* Drive Serial Number */ | ||
669 | u8 firmware_revision[8]; /* drive firmware revision */ | ||
670 | u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ | ||
671 | u8 compaq_drive_stamp; /* 0 means drive not stamped */ | ||
672 | u8 last_failure_reason; | ||
673 | #define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01 | ||
674 | #define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02 | ||
675 | #define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03 | ||
676 | #define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04 | ||
677 | #define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05 | ||
678 | #define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06 | ||
679 | #define BMIC_LAST_FAILURE_TIMEOUT 0x07 | ||
680 | #define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08 | ||
681 | #define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09 | ||
682 | #define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a | ||
683 | #define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b | ||
684 | #define BMIC_LAST_FAILURE_NOT_READY 0x0c | ||
685 | #define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d | ||
686 | #define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e | ||
687 | #define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f | ||
688 | #define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10 | ||
689 | #define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11 | ||
690 | #define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12 | ||
691 | #define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13 | ||
692 | #define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14 | ||
693 | #define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15 | ||
694 | #define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16 | ||
695 | #define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17 | ||
696 | #define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18 | ||
697 | #define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19 | ||
698 | #define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a | ||
699 | #define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b | ||
700 | #define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c | ||
701 | #define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d | ||
702 | #define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e | ||
703 | #define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f | ||
704 | #define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20 | ||
705 | #define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21 | ||
706 | #define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22 | ||
707 | #define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23 | ||
708 | #define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24 | ||
709 | #define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25 | ||
710 | #define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26 | ||
711 | #define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27 | ||
712 | #define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28 | ||
713 | #define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29 | ||
714 | #define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a | ||
715 | #define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b | ||
716 | |||
717 | #define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37 | ||
718 | #define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38 | ||
719 | #define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40 | ||
720 | #define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41 | ||
721 | #define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42 | ||
722 | #define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80 | ||
723 | #define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81 | ||
724 | #define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82 | ||
725 | #define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83 | ||
726 | |||
727 | u8 flags; | ||
728 | u8 more_flags; | ||
729 | u8 scsi_lun; /* SCSI LUN for phys drive */ | ||
730 | u8 yet_more_flags; | ||
731 | u8 even_more_flags; | ||
732 | __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */ | ||
733 | u8 phys_connector[2]; /* connector number on controller */ | ||
734 | u8 phys_box_on_bus; /* phys enclosure this drive resides */ | ||
735 | u8 phys_bay_in_box; /* phys drv bay this drive resides */ | ||
736 | __le32 rpm; /* Drive rotational speed in rpm */ | ||
737 | u8 device_type; /* type of drive */ | ||
738 | u8 sata_version; /* only valid when drive_type is SATA */ | ||
739 | __le64 big_total_block_count; | ||
740 | __le64 ris_starting_lba; | ||
741 | __le32 ris_size; | ||
742 | u8 wwid[20]; | ||
743 | u8 controller_phy_map[32]; | ||
744 | __le16 phy_count; | ||
745 | u8 phy_connected_dev_type[256]; | ||
746 | u8 phy_to_drive_bay_num[256]; | ||
747 | __le16 phy_to_attached_dev_index[256]; | ||
748 | u8 box_index; | ||
749 | u8 reserved; | ||
750 | __le16 extra_physical_drive_flags; | ||
751 | #define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \ | ||
752 | (idphydrv->extra_physical_drive_flags & (1 << 10)) | ||
753 | u8 negotiated_link_rate[256]; | ||
754 | u8 phy_to_phy_map[256]; | ||
755 | u8 redundant_path_present_map; | ||
756 | u8 redundant_path_failure_map; | ||
757 | u8 active_path_number; | ||
758 | __le16 alternate_paths_phys_connector[8]; | ||
759 | u8 alternate_paths_phys_box_on_port[8]; | ||
760 | u8 multi_lun_device_lun_count; | ||
761 | u8 minimum_good_fw_revision[8]; | ||
762 | u8 unique_inquiry_bytes[20]; | ||
763 | u8 current_temperature_degreesC; | ||
764 | u8 temperature_threshold_degreesC; | ||
765 | u8 max_temperature_degreesC; | ||
766 | u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */ | ||
767 | __le16 current_queue_depth_limit; | ||
768 | u8 switch_name[10]; | ||
769 | __le16 switch_port; | ||
770 | u8 alternate_paths_switch_name[40]; | ||
771 | u8 alternate_paths_switch_port[8]; | ||
772 | __le16 power_on_hours; /* valid only if gas gauge supported */ | ||
773 | __le16 percent_endurance_used; /* valid only if gas gauge supported. */ | ||
774 | #define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \ | ||
775 | ((idphydrv->percent_endurance_used & 0x80) || \ | ||
776 | (idphydrv->percent_endurance_used > 10000)) | ||
777 | u8 drive_authentication; | ||
778 | #define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \ | ||
779 | (idphydrv->drive_authentication == 0x80) | ||
780 | u8 smart_carrier_authentication; | ||
781 | #define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \ | ||
782 | (idphydrv->smart_carrier_authentication != 0x0) | ||
783 | #define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \ | ||
784 | (idphydrv->smart_carrier_authentication == 0x01) | ||
785 | u8 smart_carrier_app_fw_version; | ||
786 | u8 smart_carrier_bootloader_fw_version; | ||
787 | u8 encryption_key_name[64]; | ||
788 | __le32 misc_drive_flags; | ||
789 | __le16 dek_index; | ||
790 | u8 padding[112]; | ||
791 | }; | ||
792 | |||
647 | #pragma pack() | 793 | #pragma pack() |
648 | #endif /* HPSA_CMD_H */ | 794 | #endif /* HPSA_CMD_H */ |
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index ddf0694d87f0..3882d9f519c8 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
@@ -2226,36 +2226,36 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2226 | 2226 | ||
2227 | if (hd->proc & PR_INFO) { | 2227 | if (hd->proc & PR_INFO) { |
2228 | seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); | 2228 | seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); |
2229 | seq_printf(m, "\nsync_xfer[] = "); | 2229 | seq_puts(m, "\nsync_xfer[] = "); |
2230 | for (x = 0; x < 7; x++) | 2230 | for (x = 0; x < 7; x++) |
2231 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); | 2231 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); |
2232 | seq_printf(m, "\nsync_stat[] = "); | 2232 | seq_puts(m, "\nsync_stat[] = "); |
2233 | for (x = 0; x < 7; x++) | 2233 | for (x = 0; x < 7; x++) |
2234 | seq_printf(m, "\t%02x", hd->sync_stat[x]); | 2234 | seq_printf(m, "\t%02x", hd->sync_stat[x]); |
2235 | } | 2235 | } |
2236 | #ifdef PROC_STATISTICS | 2236 | #ifdef PROC_STATISTICS |
2237 | if (hd->proc & PR_STATISTICS) { | 2237 | if (hd->proc & PR_STATISTICS) { |
2238 | seq_printf(m, "\ncommands issued: "); | 2238 | seq_puts(m, "\ncommands issued: "); |
2239 | for (x = 0; x < 7; x++) | 2239 | for (x = 0; x < 7; x++) |
2240 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); | 2240 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); |
2241 | seq_printf(m, "\ndisconnects allowed:"); | 2241 | seq_puts(m, "\ndisconnects allowed:"); |
2242 | for (x = 0; x < 7; x++) | 2242 | for (x = 0; x < 7; x++) |
2243 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); | 2243 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); |
2244 | seq_printf(m, "\ndisconnects done: "); | 2244 | seq_puts(m, "\ndisconnects done: "); |
2245 | for (x = 0; x < 7; x++) | 2245 | for (x = 0; x < 7; x++) |
2246 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); | 2246 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); |
2247 | seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); | 2247 | seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); |
2248 | } | 2248 | } |
2249 | #endif | 2249 | #endif |
2250 | if (hd->proc & PR_CONNECTED) { | 2250 | if (hd->proc & PR_CONNECTED) { |
2251 | seq_printf(m, "\nconnected: "); | 2251 | seq_puts(m, "\nconnected: "); |
2252 | if (hd->connected) { | 2252 | if (hd->connected) { |
2253 | cmd = (Scsi_Cmnd *) hd->connected; | 2253 | cmd = (Scsi_Cmnd *) hd->connected; |
2254 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2254 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
2255 | } | 2255 | } |
2256 | } | 2256 | } |
2257 | if (hd->proc & PR_INPUTQ) { | 2257 | if (hd->proc & PR_INPUTQ) { |
2258 | seq_printf(m, "\ninput_Q: "); | 2258 | seq_puts(m, "\ninput_Q: "); |
2259 | cmd = (Scsi_Cmnd *) hd->input_Q; | 2259 | cmd = (Scsi_Cmnd *) hd->input_Q; |
2260 | while (cmd) { | 2260 | while (cmd) { |
2261 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2261 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
@@ -2263,7 +2263,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2263 | } | 2263 | } |
2264 | } | 2264 | } |
2265 | if (hd->proc & PR_DISCQ) { | 2265 | if (hd->proc & PR_DISCQ) { |
2266 | seq_printf(m, "\ndisconnected_Q:"); | 2266 | seq_puts(m, "\ndisconnected_Q:"); |
2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; | 2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; |
2268 | while (cmd) { | 2268 | while (cmd) { |
2269 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2269 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
@@ -2273,7 +2273,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2273 | if (hd->proc & PR_TEST) { | 2273 | if (hd->proc & PR_TEST) { |
2274 | ; /* insert your own custom function here */ | 2274 | ; /* insert your own custom function here */ |
2275 | } | 2275 | } |
2276 | seq_printf(m, "\n"); | 2276 | seq_putc(m, '\n'); |
2277 | spin_unlock_irqrestore(instance->host_lock, flags); | 2277 | spin_unlock_irqrestore(instance->host_lock, flags); |
2278 | #endif /* PROC_INTERFACE */ | 2278 | #endif /* PROC_INTERFACE */ |
2279 | return 0; | 2279 | return 0; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9219953ee949..d9afc51af7d3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = { | |||
6815 | }; | 6815 | }; |
6816 | 6816 | ||
6817 | static struct ata_port_info sata_port_info = { | 6817 | static struct ata_port_info sata_port_info = { |
6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
6819 | ATA_FLAG_SAS_HOST, | ||
6819 | .pio_mask = ATA_PIO4_ONLY, | 6820 | .pio_mask = ATA_PIO4_ONLY, |
6820 | .mwdma_mask = ATA_MWDMA2, | 6821 | .mwdma_mask = ATA_MWDMA2, |
6821 | .udma_mask = ATA_UDMA6, | 6822 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index e5c28435d768..7542f11d3fcd 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -2038,15 +2038,14 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m) | |||
2038 | { | 2038 | { |
2039 | METHOD_TRACE("ips_host_info", 1); | 2039 | METHOD_TRACE("ips_host_info", 1); |
2040 | 2040 | ||
2041 | seq_printf(m, "\nIBM ServeRAID General Information:\n\n"); | 2041 | seq_puts(m, "\nIBM ServeRAID General Information:\n\n"); |
2042 | 2042 | ||
2043 | if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && | 2043 | if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && |
2044 | (le16_to_cpu(ha->nvram->adapter_type) != 0)) | 2044 | (le16_to_cpu(ha->nvram->adapter_type) != 0)) |
2045 | seq_printf(m, "\tController Type : %s\n", | 2045 | seq_printf(m, "\tController Type : %s\n", |
2046 | ips_adapter_name[ha->ad_type - 1]); | 2046 | ips_adapter_name[ha->ad_type - 1]); |
2047 | else | 2047 | else |
2048 | seq_printf(m, | 2048 | seq_puts(m, "\tController Type : Unknown\n"); |
2049 | "\tController Type : Unknown\n"); | ||
2050 | 2049 | ||
2051 | if (ha->io_addr) | 2050 | if (ha->io_addr) |
2052 | seq_printf(m, | 2051 | seq_printf(m, |
@@ -2138,7 +2137,7 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m) | |||
2138 | seq_printf(m, "\tCurrent Active PT Commands : %d\n", | 2137 | seq_printf(m, "\tCurrent Active PT Commands : %d\n", |
2139 | ha->num_ioctl); | 2138 | ha->num_ioctl); |
2140 | 2139 | ||
2141 | seq_printf(m, "\n"); | 2140 | seq_putc(m, '\n'); |
2142 | 2141 | ||
2143 | return 0; | 2142 | return 0; |
2144 | } | 2143 | } |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 932d9cc98d2f..9c706d8c1441 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = { | |||
547 | }; | 547 | }; |
548 | 548 | ||
549 | static struct ata_port_info sata_port_info = { | 549 | static struct ata_port_info sata_port_info = { |
550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, | 550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | |
551 | ATA_FLAG_SAS_HOST, | ||
551 | .pio_mask = ATA_PIO4, | 552 | .pio_mask = ATA_PIO4, |
552 | .mwdma_mask = ATA_MWDMA2, | 553 | .mwdma_mask = ATA_MWDMA2, |
553 | .udma_mask = ATA_UDMA6, | 554 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); | 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
501 | struct asd_sas_port *port = ev->port; | 501 | struct asd_sas_port *port = ev->port; |
502 | struct sas_ha_struct *ha = port->ha; | 502 | struct sas_ha_struct *ha = port->ha; |
503 | struct domain_device *ddev = port->port_dev; | ||
503 | 504 | ||
504 | /* prevent revalidation from finding sata links in recovery */ | 505 | /* prevent revalidation from finding sata links in recovery */ |
505 | mutex_lock(&ha->disco_mutex); | 506 | mutex_lock(&ha->disco_mutex); |
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
514 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, | 515 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, |
515 | task_pid_nr(current)); | 516 | task_pid_nr(current)); |
516 | 517 | ||
517 | if (port->port_dev) | 518 | if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || |
518 | res = sas_ex_revalidate_domain(port->port_dev); | 519 | ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) |
520 | res = sas_ex_revalidate_domain(ddev); | ||
519 | 521 | ||
520 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", | 522 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", |
521 | port->id, task_pid_nr(current), res); | 523 | port->id, task_pid_nr(current), res); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4c25485aa934..c66088d0fd2a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -2225,6 +2225,15 @@ lpfc_adisc_done(struct lpfc_vport *vport) | |||
2225 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2225 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
2226 | !(vport->fc_flag & FC_RSCN_MODE) && | 2226 | !(vport->fc_flag & FC_RSCN_MODE) && |
2227 | (phba->sli_rev < LPFC_SLI_REV4)) { | 2227 | (phba->sli_rev < LPFC_SLI_REV4)) { |
2228 | /* The ADISCs are complete. Doesn't matter if they | ||
2229 | * succeeded or failed because the ADISC completion | ||
2230 | * routine guarantees to call the state machine and | ||
2231 | * the RPI is either unregistered (failed ADISC response) | ||
2232 | * or the RPI is still valid and the node is marked | ||
2233 | * mapped for a target. The exchanges should be in the | ||
2234 | * correct state. This code is specific to SLI3. | ||
2235 | */ | ||
2236 | lpfc_issue_clear_la(phba, vport); | ||
2228 | lpfc_issue_reg_vpi(phba, vport); | 2237 | lpfc_issue_reg_vpi(phba, vport); |
2229 | return; | 2238 | return; |
2230 | } | 2239 | } |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 2485255f3414..bc7b34c02723 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -2240,7 +2240,7 @@ proc_show_battery(struct seq_file *m, void *v) | |||
2240 | goto free_pdev; | 2240 | goto free_pdev; |
2241 | 2241 | ||
2242 | if( mega_adapinq(adapter, dma_handle) != 0 ) { | 2242 | if( mega_adapinq(adapter, dma_handle) != 0 ) { |
2243 | seq_printf(m, "Adapter inquiry failed.\n"); | 2243 | seq_puts(m, "Adapter inquiry failed.\n"); |
2244 | printk(KERN_WARNING "megaraid: inquiry failed.\n"); | 2244 | printk(KERN_WARNING "megaraid: inquiry failed.\n"); |
2245 | goto free_inquiry; | 2245 | goto free_inquiry; |
2246 | } | 2246 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0d44d91c2fce..14e5c7cea929 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -35,7 +35,7 @@ | |||
35 | /* | 35 | /* |
36 | * MegaRAID SAS Driver meta data | 36 | * MegaRAID SAS Driver meta data |
37 | */ | 37 | */ |
38 | #define MEGASAS_VERSION "06.805.06.01-rc1" | 38 | #define MEGASAS_VERSION "06.806.08.00-rc1" |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Device IDs | 41 | * Device IDs |
@@ -969,7 +969,20 @@ struct megasas_ctrl_info { | |||
969 | 969 | ||
970 | struct { | 970 | struct { |
971 | #if defined(__BIG_ENDIAN_BITFIELD) | 971 | #if defined(__BIG_ENDIAN_BITFIELD) |
972 | u32 reserved:25; | 972 | u32 reserved:12; |
973 | u32 discardCacheDuringLDDelete:1; | ||
974 | u32 supportSecurityonJBOD:1; | ||
975 | u32 supportCacheBypassModes:1; | ||
976 | u32 supportDisableSESMonitoring:1; | ||
977 | u32 supportForceFlash:1; | ||
978 | u32 supportNVDRAM:1; | ||
979 | u32 supportDrvActivityLEDSetting:1; | ||
980 | u32 supportAllowedOpsforDrvRemoval:1; | ||
981 | u32 supportHOQRebuild:1; | ||
982 | u32 supportForceTo512e:1; | ||
983 | u32 supportNVCacheErase:1; | ||
984 | u32 supportDebugQueue:1; | ||
985 | u32 supportSwZone:1; | ||
973 | u32 supportCrashDump:1; | 986 | u32 supportCrashDump:1; |
974 | u32 supportMaxExtLDs:1; | 987 | u32 supportMaxExtLDs:1; |
975 | u32 supportT10RebuildAssist:1; | 988 | u32 supportT10RebuildAssist:1; |
@@ -981,9 +994,22 @@ struct megasas_ctrl_info { | |||
981 | u32 supportThermalPollInterval:1; | 994 | u32 supportThermalPollInterval:1; |
982 | u32 supportDisableImmediateIO:1; | 995 | u32 supportDisableImmediateIO:1; |
983 | u32 supportT10RebuildAssist:1; | 996 | u32 supportT10RebuildAssist:1; |
984 | u32 supportMaxExtLDs:1; | 997 | u32 supportMaxExtLDs:1; |
985 | u32 supportCrashDump:1; | 998 | u32 supportCrashDump:1; |
986 | u32 reserved:25; | 999 | u32 supportSwZone:1; |
1000 | u32 supportDebugQueue:1; | ||
1001 | u32 supportNVCacheErase:1; | ||
1002 | u32 supportForceTo512e:1; | ||
1003 | u32 supportHOQRebuild:1; | ||
1004 | u32 supportAllowedOpsforDrvRemoval:1; | ||
1005 | u32 supportDrvActivityLEDSetting:1; | ||
1006 | u32 supportNVDRAM:1; | ||
1007 | u32 supportForceFlash:1; | ||
1008 | u32 supportDisableSESMonitoring:1; | ||
1009 | u32 supportCacheBypassModes:1; | ||
1010 | u32 supportSecurityonJBOD:1; | ||
1011 | u32 discardCacheDuringLDDelete:1; | ||
1012 | u32 reserved:12; | ||
987 | #endif | 1013 | #endif |
988 | } adapterOperations3; | 1014 | } adapterOperations3; |
989 | 1015 | ||
@@ -1022,6 +1048,13 @@ enum MR_MFI_MPT_PTHR_FLAGS { | |||
1022 | MFI_MPT_ATTACHED = 2, | 1048 | MFI_MPT_ATTACHED = 2, |
1023 | }; | 1049 | }; |
1024 | 1050 | ||
1051 | enum MR_SCSI_CMD_TYPE { | ||
1052 | READ_WRITE_LDIO = 0, | ||
1053 | NON_READ_WRITE_LDIO = 1, | ||
1054 | READ_WRITE_SYSPDIO = 2, | ||
1055 | NON_READ_WRITE_SYSPDIO = 3, | ||
1056 | }; | ||
1057 | |||
1025 | /* Frame Type */ | 1058 | /* Frame Type */ |
1026 | #define IO_FRAME 0 | 1059 | #define IO_FRAME 0 |
1027 | #define PTHRU_FRAME 1 | 1060 | #define PTHRU_FRAME 1 |
@@ -1049,6 +1082,8 @@ enum MR_MFI_MPT_PTHR_FLAGS { | |||
1049 | */ | 1082 | */ |
1050 | #define MEGASAS_INT_CMDS 32 | 1083 | #define MEGASAS_INT_CMDS 32 |
1051 | #define MEGASAS_SKINNY_INT_CMDS 5 | 1084 | #define MEGASAS_SKINNY_INT_CMDS 5 |
1085 | #define MEGASAS_FUSION_INTERNAL_CMDS 5 | ||
1086 | #define MEGASAS_FUSION_IOCTL_CMDS 3 | ||
1052 | 1087 | ||
1053 | #define MEGASAS_MAX_MSIX_QUEUES 128 | 1088 | #define MEGASAS_MAX_MSIX_QUEUES 128 |
1054 | /* | 1089 | /* |
@@ -1194,19 +1229,23 @@ union megasas_sgl_frame { | |||
1194 | typedef union _MFI_CAPABILITIES { | 1229 | typedef union _MFI_CAPABILITIES { |
1195 | struct { | 1230 | struct { |
1196 | #if defined(__BIG_ENDIAN_BITFIELD) | 1231 | #if defined(__BIG_ENDIAN_BITFIELD) |
1197 | u32 reserved:27; | 1232 | u32 reserved:25; |
1233 | u32 security_protocol_cmds_fw:1; | ||
1234 | u32 support_core_affinity:1; | ||
1198 | u32 support_ndrive_r1_lb:1; | 1235 | u32 support_ndrive_r1_lb:1; |
1199 | u32 support_max_255lds:1; | 1236 | u32 support_max_255lds:1; |
1200 | u32 reserved1:1; | 1237 | u32 support_fastpath_wb:1; |
1201 | u32 support_additional_msix:1; | 1238 | u32 support_additional_msix:1; |
1202 | u32 support_fp_remote_lun:1; | 1239 | u32 support_fp_remote_lun:1; |
1203 | #else | 1240 | #else |
1204 | u32 support_fp_remote_lun:1; | 1241 | u32 support_fp_remote_lun:1; |
1205 | u32 support_additional_msix:1; | 1242 | u32 support_additional_msix:1; |
1206 | u32 reserved1:1; | 1243 | u32 support_fastpath_wb:1; |
1207 | u32 support_max_255lds:1; | 1244 | u32 support_max_255lds:1; |
1208 | u32 support_ndrive_r1_lb:1; | 1245 | u32 support_ndrive_r1_lb:1; |
1209 | u32 reserved:27; | 1246 | u32 support_core_affinity:1; |
1247 | u32 security_protocol_cmds_fw:1; | ||
1248 | u32 reserved:25; | ||
1210 | #endif | 1249 | #endif |
1211 | } mfi_capabilities; | 1250 | } mfi_capabilities; |
1212 | u32 reg; | 1251 | u32 reg; |
@@ -1638,20 +1677,20 @@ struct megasas_instance { | |||
1638 | u32 crash_dump_fw_support; | 1677 | u32 crash_dump_fw_support; |
1639 | u32 crash_dump_drv_support; | 1678 | u32 crash_dump_drv_support; |
1640 | u32 crash_dump_app_support; | 1679 | u32 crash_dump_app_support; |
1680 | u32 secure_jbod_support; | ||
1641 | spinlock_t crashdump_lock; | 1681 | spinlock_t crashdump_lock; |
1642 | 1682 | ||
1643 | struct megasas_register_set __iomem *reg_set; | 1683 | struct megasas_register_set __iomem *reg_set; |
1644 | u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; | 1684 | u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; |
1645 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; | 1685 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; |
1646 | struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; | 1686 | struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; |
1647 | u8 ld_ids[MEGASAS_MAX_LD_IDS]; | 1687 | u8 ld_ids[MEGASAS_MAX_LD_IDS]; |
1648 | s8 init_id; | 1688 | s8 init_id; |
1649 | 1689 | ||
1650 | u16 max_num_sge; | 1690 | u16 max_num_sge; |
1651 | u16 max_fw_cmds; | 1691 | u16 max_fw_cmds; |
1652 | /* For Fusion its num IOCTL cmds, for others MFI based its | ||
1653 | max_fw_cmds */ | ||
1654 | u16 max_mfi_cmds; | 1692 | u16 max_mfi_cmds; |
1693 | u16 max_scsi_cmds; | ||
1655 | u32 max_sectors_per_req; | 1694 | u32 max_sectors_per_req; |
1656 | struct megasas_aen_event *ev; | 1695 | struct megasas_aen_event *ev; |
1657 | 1696 | ||
@@ -1727,7 +1766,7 @@ struct megasas_instance { | |||
1727 | u8 requestorId; | 1766 | u8 requestorId; |
1728 | char PlasmaFW111; | 1767 | char PlasmaFW111; |
1729 | char mpio; | 1768 | char mpio; |
1730 | int throttlequeuedepth; | 1769 | u16 throttlequeuedepth; |
1731 | u8 mask_interrupts; | 1770 | u8 mask_interrupts; |
1732 | u8 is_imr; | 1771 | u8 is_imr; |
1733 | }; | 1772 | }; |
@@ -1946,5 +1985,6 @@ void __megasas_return_cmd(struct megasas_instance *instance, | |||
1946 | 1985 | ||
1947 | void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, | 1986 | void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, |
1948 | struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); | 1987 | struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); |
1988 | int megasas_cmd_type(struct scsi_cmnd *cmd); | ||
1949 | 1989 | ||
1950 | #endif /*LSI_MEGARAID_SAS_H */ | 1990 | #endif /*LSI_MEGARAID_SAS_H */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index ff283d23788a..890637fdd61e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -78,7 +78,7 @@ static int allow_vf_ioctls; | |||
78 | module_param(allow_vf_ioctls, int, S_IRUGO); | 78 | module_param(allow_vf_ioctls, int, S_IRUGO); |
79 | MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); | 79 | MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); |
80 | 80 | ||
81 | static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; | 81 | static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; |
82 | module_param(throttlequeuedepth, int, S_IRUGO); | 82 | module_param(throttlequeuedepth, int, S_IRUGO); |
83 | MODULE_PARM_DESC(throttlequeuedepth, | 83 | MODULE_PARM_DESC(throttlequeuedepth, |
84 | "Adapter queue depth when throttled due to I/O timeout. Default: 16"); | 84 | "Adapter queue depth when throttled due to I/O timeout. Default: 16"); |
@@ -1417,16 +1417,15 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | /** | 1419 | /** |
1420 | * megasas_is_ldio - Checks if the cmd is for logical drive | 1420 | * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD |
1421 | * and whether it's RW or non RW | ||
1421 | * @scmd: SCSI command | 1422 | * @scmd: SCSI command |
1422 | * | 1423 | * |
1423 | * Called by megasas_queue_command to find out if the command to be queued | ||
1424 | * is a logical drive command | ||
1425 | */ | 1424 | */ |
1426 | inline int megasas_is_ldio(struct scsi_cmnd *cmd) | 1425 | inline int megasas_cmd_type(struct scsi_cmnd *cmd) |
1427 | { | 1426 | { |
1428 | if (!MEGASAS_IS_LOGICAL(cmd)) | 1427 | int ret; |
1429 | return 0; | 1428 | |
1430 | switch (cmd->cmnd[0]) { | 1429 | switch (cmd->cmnd[0]) { |
1431 | case READ_10: | 1430 | case READ_10: |
1432 | case WRITE_10: | 1431 | case WRITE_10: |
@@ -1436,10 +1435,14 @@ inline int megasas_is_ldio(struct scsi_cmnd *cmd) | |||
1436 | case WRITE_6: | 1435 | case WRITE_6: |
1437 | case READ_16: | 1436 | case READ_16: |
1438 | case WRITE_16: | 1437 | case WRITE_16: |
1439 | return 1; | 1438 | ret = (MEGASAS_IS_LOGICAL(cmd)) ? |
1439 | READ_WRITE_LDIO : READ_WRITE_SYSPDIO; | ||
1440 | break; | ||
1440 | default: | 1441 | default: |
1441 | return 0; | 1442 | ret = (MEGASAS_IS_LOGICAL(cmd)) ? |
1443 | NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; | ||
1442 | } | 1444 | } |
1445 | return ret; | ||
1443 | } | 1446 | } |
1444 | 1447 | ||
1445 | /** | 1448 | /** |
@@ -1471,7 +1474,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1471 | if(!cmd->scmd) | 1474 | if(!cmd->scmd) |
1472 | continue; | 1475 | continue; |
1473 | printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); | 1476 | printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); |
1474 | if (megasas_is_ldio(cmd->scmd)){ | 1477 | if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { |
1475 | ldio = (struct megasas_io_frame *)cmd->frame; | 1478 | ldio = (struct megasas_io_frame *)cmd->frame; |
1476 | mfi_sgl = &ldio->sgl; | 1479 | mfi_sgl = &ldio->sgl; |
1477 | sgcount = ldio->sge_count; | 1480 | sgcount = ldio->sge_count; |
@@ -1531,7 +1534,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance, | |||
1531 | /* | 1534 | /* |
1532 | * Logical drive command | 1535 | * Logical drive command |
1533 | */ | 1536 | */ |
1534 | if (megasas_is_ldio(scmd)) | 1537 | if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) |
1535 | frame_count = megasas_build_ldio(instance, scmd, cmd); | 1538 | frame_count = megasas_build_ldio(instance, scmd, cmd); |
1536 | else | 1539 | else |
1537 | frame_count = megasas_build_dcdb(instance, scmd, cmd); | 1540 | frame_count = megasas_build_dcdb(instance, scmd, cmd); |
@@ -1689,22 +1692,66 @@ static int megasas_slave_alloc(struct scsi_device *sdev) | |||
1689 | return 0; | 1692 | return 0; |
1690 | } | 1693 | } |
1691 | 1694 | ||
1695 | /* | ||
1696 | * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a | ||
1697 | * kill adapter | ||
1698 | * @instance: Adapter soft state | ||
1699 | * | ||
1700 | */ | ||
1701 | void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) | ||
1702 | { | ||
1703 | int i; | ||
1704 | struct megasas_cmd *cmd_mfi; | ||
1705 | struct megasas_cmd_fusion *cmd_fusion; | ||
1706 | struct fusion_context *fusion = instance->ctrl_context; | ||
1707 | |||
1708 | /* Find all outstanding ioctls */ | ||
1709 | if (fusion) { | ||
1710 | for (i = 0; i < instance->max_fw_cmds; i++) { | ||
1711 | cmd_fusion = fusion->cmd_list[i]; | ||
1712 | if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { | ||
1713 | cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; | ||
1714 | if (cmd_mfi->sync_cmd && | ||
1715 | cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) | ||
1716 | megasas_complete_cmd(instance, | ||
1717 | cmd_mfi, DID_OK); | ||
1718 | } | ||
1719 | } | ||
1720 | } else { | ||
1721 | for (i = 0; i < instance->max_fw_cmds; i++) { | ||
1722 | cmd_mfi = instance->cmd_list[i]; | ||
1723 | if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != | ||
1724 | MFI_CMD_ABORT) | ||
1725 | megasas_complete_cmd(instance, cmd_mfi, DID_OK); | ||
1726 | } | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | |||
1692 | void megaraid_sas_kill_hba(struct megasas_instance *instance) | 1731 | void megaraid_sas_kill_hba(struct megasas_instance *instance) |
1693 | { | 1732 | { |
1733 | /* Set critical error to block I/O & ioctls in case caller didn't */ | ||
1734 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
1735 | /* Wait 1 second to ensure IO or ioctls in build have posted */ | ||
1736 | msleep(1000); | ||
1694 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 1737 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
1695 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || | 1738 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
1696 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || | 1739 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || |
1697 | (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || | 1740 | (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || |
1698 | (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 1741 | (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || |
1699 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { | 1742 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { |
1700 | writel(MFI_STOP_ADP, &instance->reg_set->doorbell); | 1743 | writel(MFI_STOP_ADP, |
1744 | &instance->reg_set->doorbell); | ||
1701 | /* Flush */ | 1745 | /* Flush */ |
1702 | readl(&instance->reg_set->doorbell); | 1746 | readl(&instance->reg_set->doorbell); |
1703 | if (instance->mpio && instance->requestorId) | 1747 | if (instance->mpio && instance->requestorId) |
1704 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | 1748 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); |
1705 | } else { | 1749 | } else { |
1706 | writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); | 1750 | writel(MFI_STOP_ADP, |
1751 | &instance->reg_set->inbound_doorbell); | ||
1707 | } | 1752 | } |
1753 | /* Complete outstanding ioctls when adapter is killed */ | ||
1754 | megasas_complete_outstanding_ioctls(instance); | ||
1708 | } | 1755 | } |
1709 | 1756 | ||
1710 | /** | 1757 | /** |
@@ -1717,6 +1764,7 @@ void | |||
1717 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance) | 1764 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance) |
1718 | { | 1765 | { |
1719 | unsigned long flags; | 1766 | unsigned long flags; |
1767 | |||
1720 | if (instance->flag & MEGASAS_FW_BUSY | 1768 | if (instance->flag & MEGASAS_FW_BUSY |
1721 | && time_after(jiffies, instance->last_time + 5 * HZ) | 1769 | && time_after(jiffies, instance->last_time + 5 * HZ) |
1722 | && atomic_read(&instance->fw_outstanding) < | 1770 | && atomic_read(&instance->fw_outstanding) < |
@@ -1724,13 +1772,8 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance) | |||
1724 | 1772 | ||
1725 | spin_lock_irqsave(instance->host->host_lock, flags); | 1773 | spin_lock_irqsave(instance->host->host_lock, flags); |
1726 | instance->flag &= ~MEGASAS_FW_BUSY; | 1774 | instance->flag &= ~MEGASAS_FW_BUSY; |
1727 | if (instance->is_imr) { | ||
1728 | instance->host->can_queue = | ||
1729 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
1730 | } else | ||
1731 | instance->host->can_queue = | ||
1732 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
1733 | 1775 | ||
1776 | instance->host->can_queue = instance->max_scsi_cmds; | ||
1734 | spin_unlock_irqrestore(instance->host->host_lock, flags); | 1777 | spin_unlock_irqrestore(instance->host->host_lock, flags); |
1735 | } | 1778 | } |
1736 | } | 1779 | } |
@@ -3028,10 +3071,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) | |||
3028 | "was tried multiple times during reset." | 3071 | "was tried multiple times during reset." |
3029 | "Shutting down the HBA\n", | 3072 | "Shutting down the HBA\n", |
3030 | cmd, cmd->scmd, cmd->sync_cmd); | 3073 | cmd, cmd->scmd, cmd->sync_cmd); |
3074 | instance->instancet->disable_intr(instance); | ||
3075 | atomic_set(&instance->fw_reset_no_pci_access, 1); | ||
3031 | megaraid_sas_kill_hba(instance); | 3076 | megaraid_sas_kill_hba(instance); |
3032 | |||
3033 | instance->adprecovery = | ||
3034 | MEGASAS_HW_CRITICAL_ERROR; | ||
3035 | return; | 3077 | return; |
3036 | } | 3078 | } |
3037 | } | 3079 | } |
@@ -3165,8 +3207,8 @@ process_fw_state_change_wq(struct work_struct *work) | |||
3165 | if (megasas_transition_to_ready(instance, 1)) { | 3207 | if (megasas_transition_to_ready(instance, 1)) { |
3166 | printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); | 3208 | printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); |
3167 | 3209 | ||
3210 | atomic_set(&instance->fw_reset_no_pci_access, 1); | ||
3168 | megaraid_sas_kill_hba(instance); | 3211 | megaraid_sas_kill_hba(instance); |
3169 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
3170 | return ; | 3212 | return ; |
3171 | } | 3213 | } |
3172 | 3214 | ||
@@ -3547,7 +3589,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
3547 | int i; | 3589 | int i; |
3548 | u32 max_cmd; | 3590 | u32 max_cmd; |
3549 | u32 sge_sz; | 3591 | u32 sge_sz; |
3550 | u32 sgl_sz; | ||
3551 | u32 total_sz; | 3592 | u32 total_sz; |
3552 | u32 frame_count; | 3593 | u32 frame_count; |
3553 | struct megasas_cmd *cmd; | 3594 | struct megasas_cmd *cmd; |
@@ -3566,24 +3607,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
3566 | } | 3607 | } |
3567 | 3608 | ||
3568 | /* | 3609 | /* |
3569 | * Calculated the number of 64byte frames required for SGL | 3610 | * For MFI controllers. |
3570 | */ | 3611 | * max_num_sge = 60 |
3571 | sgl_sz = sge_sz * instance->max_num_sge; | 3612 | * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) |
3572 | frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; | 3613 | * Total 960 byte (15 MFI frame of 64 byte) |
3573 | frame_count = 15; | 3614 | * |
3574 | 3615 | * Fusion adapter require only 3 extra frame. | |
3575 | /* | 3616 | * max_num_sge = 16 (defined as MAX_IOCTL_SGE) |
3576 | * We need one extra frame for the MFI command | 3617 | * max_sge_sz = 12 byte (sizeof megasas_sge64) |
3618 | * Total 192 byte (3 MFI frame of 64 byte) | ||
3577 | */ | 3619 | */ |
3578 | frame_count++; | 3620 | frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); |
3579 | |||
3580 | total_sz = MEGAMFI_FRAME_SIZE * frame_count; | 3621 | total_sz = MEGAMFI_FRAME_SIZE * frame_count; |
3581 | /* | 3622 | /* |
3582 | * Use DMA pool facility provided by PCI layer | 3623 | * Use DMA pool facility provided by PCI layer |
3583 | */ | 3624 | */ |
3584 | instance->frame_dma_pool = pci_pool_create("megasas frame pool", | 3625 | instance->frame_dma_pool = pci_pool_create("megasas frame pool", |
3585 | instance->pdev, total_sz, 64, | 3626 | instance->pdev, total_sz, 256, 0); |
3586 | 0); | ||
3587 | 3627 | ||
3588 | if (!instance->frame_dma_pool) { | 3628 | if (!instance->frame_dma_pool) { |
3589 | printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); | 3629 | printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); |
@@ -4631,28 +4671,48 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
4631 | instance->crash_dump_h); | 4671 | instance->crash_dump_h); |
4632 | instance->crash_dump_buf = NULL; | 4672 | instance->crash_dump_buf = NULL; |
4633 | } | 4673 | } |
4674 | |||
4675 | instance->secure_jbod_support = | ||
4676 | ctrl_info->adapterOperations3.supportSecurityonJBOD; | ||
4677 | if (instance->secure_jbod_support) | ||
4678 | dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n"); | ||
4634 | instance->max_sectors_per_req = instance->max_num_sge * | 4679 | instance->max_sectors_per_req = instance->max_num_sge * |
4635 | PAGE_SIZE / 512; | 4680 | PAGE_SIZE / 512; |
4636 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) | 4681 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) |
4637 | instance->max_sectors_per_req = tmp_sectors; | 4682 | instance->max_sectors_per_req = tmp_sectors; |
4638 | 4683 | ||
4639 | /* Check for valid throttlequeuedepth module parameter */ | 4684 | /* |
4640 | if (instance->is_imr) { | 4685 | * 1. For fusion adapters, 3 commands for IOCTL and 5 commands |
4641 | if (throttlequeuedepth > (instance->max_fw_cmds - | 4686 | * for driver's internal DCMDs. |
4642 | MEGASAS_SKINNY_INT_CMDS)) | 4687 | * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's |
4643 | instance->throttlequeuedepth = | 4688 | * internal DCMDs. |
4644 | MEGASAS_THROTTLE_QUEUE_DEPTH; | 4689 | * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs |
4645 | else | 4690 | * and 5 commands for drivers's internal DCMD. |
4646 | instance->throttlequeuedepth = throttlequeuedepth; | 4691 | */ |
4692 | if (instance->ctrl_context) { | ||
4693 | instance->max_scsi_cmds = instance->max_fw_cmds - | ||
4694 | (MEGASAS_FUSION_INTERNAL_CMDS + | ||
4695 | MEGASAS_FUSION_IOCTL_CMDS); | ||
4696 | sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); | ||
4697 | } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
4698 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
4699 | instance->max_scsi_cmds = instance->max_fw_cmds - | ||
4700 | MEGASAS_SKINNY_INT_CMDS; | ||
4701 | sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); | ||
4647 | } else { | 4702 | } else { |
4648 | if (throttlequeuedepth > (instance->max_fw_cmds - | 4703 | instance->max_scsi_cmds = instance->max_fw_cmds - |
4649 | MEGASAS_INT_CMDS)) | 4704 | MEGASAS_INT_CMDS; |
4650 | instance->throttlequeuedepth = | 4705 | sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); |
4651 | MEGASAS_THROTTLE_QUEUE_DEPTH; | ||
4652 | else | ||
4653 | instance->throttlequeuedepth = throttlequeuedepth; | ||
4654 | } | 4706 | } |
4655 | 4707 | ||
4708 | /* Check for valid throttlequeuedepth module parameter */ | ||
4709 | if (throttlequeuedepth && | ||
4710 | throttlequeuedepth <= instance->max_scsi_cmds) | ||
4711 | instance->throttlequeuedepth = throttlequeuedepth; | ||
4712 | else | ||
4713 | instance->throttlequeuedepth = | ||
4714 | MEGASAS_THROTTLE_QUEUE_DEPTH; | ||
4715 | |||
4656 | /* | 4716 | /* |
4657 | * Setup tasklet for cmd completion | 4717 | * Setup tasklet for cmd completion |
4658 | */ | 4718 | */ |
@@ -4947,12 +5007,7 @@ static int megasas_io_attach(struct megasas_instance *instance) | |||
4947 | */ | 5007 | */ |
4948 | host->irq = instance->pdev->irq; | 5008 | host->irq = instance->pdev->irq; |
4949 | host->unique_id = instance->unique_id; | 5009 | host->unique_id = instance->unique_id; |
4950 | if (instance->is_imr) { | 5010 | host->can_queue = instance->max_scsi_cmds; |
4951 | host->can_queue = | ||
4952 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
4953 | } else | ||
4954 | host->can_queue = | ||
4955 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
4956 | host->this_id = instance->init_id; | 5011 | host->this_id = instance->init_id; |
4957 | host->sg_tablesize = instance->max_num_sge; | 5012 | host->sg_tablesize = instance->max_num_sge; |
4958 | 5013 | ||
@@ -5130,8 +5185,6 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5130 | ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); | 5185 | ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); |
5131 | INIT_LIST_HEAD(&fusion->cmd_pool); | 5186 | INIT_LIST_HEAD(&fusion->cmd_pool); |
5132 | spin_lock_init(&fusion->mpt_pool_lock); | 5187 | spin_lock_init(&fusion->mpt_pool_lock); |
5133 | memset(fusion->load_balance_info, 0, | ||
5134 | sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); | ||
5135 | } | 5188 | } |
5136 | break; | 5189 | break; |
5137 | default: /* For all other supported controllers */ | 5190 | default: /* For all other supported controllers */ |
@@ -5215,12 +5268,10 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5215 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; | 5268 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; |
5216 | instance->ctrl_info = NULL; | 5269 | instance->ctrl_info = NULL; |
5217 | 5270 | ||
5271 | |||
5218 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 5272 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
5219 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 5273 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) |
5220 | instance->flag_ieee = 1; | 5274 | instance->flag_ieee = 1; |
5221 | sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); | ||
5222 | } else | ||
5223 | sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); | ||
5224 | 5275 | ||
5225 | megasas_dbg_lvl = 0; | 5276 | megasas_dbg_lvl = 0; |
5226 | instance->flag = 0; | 5277 | instance->flag = 0; |
@@ -6215,9 +6266,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) | |||
6215 | goto out_kfree_ioc; | 6266 | goto out_kfree_ioc; |
6216 | } | 6267 | } |
6217 | 6268 | ||
6218 | /* | ||
6219 | * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds | ||
6220 | */ | ||
6221 | if (down_interruptible(&instance->ioctl_sem)) { | 6269 | if (down_interruptible(&instance->ioctl_sem)) { |
6222 | error = -ERESTARTSYS; | 6270 | error = -ERESTARTSYS; |
6223 | goto out_kfree_ioc; | 6271 | goto out_kfree_ioc; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 460c6a3d4ade..4f72287860ee 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c | |||
@@ -172,6 +172,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
172 | struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; | 172 | struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; |
173 | struct MR_FW_RAID_MAP *pFwRaidMap = NULL; | 173 | struct MR_FW_RAID_MAP *pFwRaidMap = NULL; |
174 | int i; | 174 | int i; |
175 | u16 ld_count; | ||
175 | 176 | ||
176 | 177 | ||
177 | struct MR_DRV_RAID_MAP_ALL *drv_map = | 178 | struct MR_DRV_RAID_MAP_ALL *drv_map = |
@@ -191,9 +192,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
191 | fw_map_old = (struct MR_FW_RAID_MAP_ALL *) | 192 | fw_map_old = (struct MR_FW_RAID_MAP_ALL *) |
192 | fusion->ld_map[(instance->map_id & 1)]; | 193 | fusion->ld_map[(instance->map_id & 1)]; |
193 | pFwRaidMap = &fw_map_old->raidMap; | 194 | pFwRaidMap = &fw_map_old->raidMap; |
195 | ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); | ||
194 | 196 | ||
195 | #if VD_EXT_DEBUG | 197 | #if VD_EXT_DEBUG |
196 | for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) { | 198 | for (i = 0; i < ld_count; i++) { |
197 | dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " | 199 | dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " |
198 | "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", | 200 | "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", |
199 | instance->unique_id, i, | 201 | instance->unique_id, i, |
@@ -205,12 +207,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
205 | 207 | ||
206 | memset(drv_map, 0, fusion->drv_map_sz); | 208 | memset(drv_map, 0, fusion->drv_map_sz); |
207 | pDrvRaidMap->totalSize = pFwRaidMap->totalSize; | 209 | pDrvRaidMap->totalSize = pFwRaidMap->totalSize; |
208 | pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount; | 210 | pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); |
209 | pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; | 211 | pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; |
210 | for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) | 212 | for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) |
211 | pDrvRaidMap->ldTgtIdToLd[i] = | 213 | pDrvRaidMap->ldTgtIdToLd[i] = |
212 | (u8)pFwRaidMap->ldTgtIdToLd[i]; | 214 | (u8)pFwRaidMap->ldTgtIdToLd[i]; |
213 | for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) { | 215 | for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS); |
216 | i < MAX_LOGICAL_DRIVES_EXT; i++) | ||
217 | pDrvRaidMap->ldTgtIdToLd[i] = 0xff; | ||
218 | for (i = 0; i < ld_count; i++) { | ||
214 | pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; | 219 | pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; |
215 | #if VD_EXT_DEBUG | 220 | #if VD_EXT_DEBUG |
216 | dev_dbg(&instance->pdev->dev, | 221 | dev_dbg(&instance->pdev->dev, |
@@ -252,7 +257,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) | |||
252 | struct LD_LOAD_BALANCE_INFO *lbInfo; | 257 | struct LD_LOAD_BALANCE_INFO *lbInfo; |
253 | PLD_SPAN_INFO ldSpanInfo; | 258 | PLD_SPAN_INFO ldSpanInfo; |
254 | struct MR_LD_RAID *raid; | 259 | struct MR_LD_RAID *raid; |
255 | int ldCount, num_lds; | 260 | u16 ldCount, num_lds; |
256 | u16 ld; | 261 | u16 ld; |
257 | u32 expected_size; | 262 | u32 expected_size; |
258 | 263 | ||
@@ -356,7 +361,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, | |||
356 | 361 | ||
357 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { | 362 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
358 | ld = MR_TargetIdToLdGet(ldCount, map); | 363 | ld = MR_TargetIdToLdGet(ldCount, map); |
359 | if (ld >= MAX_LOGICAL_DRIVES_EXT) | 364 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
360 | continue; | 365 | continue; |
361 | raid = MR_LdRaidGet(ld, map); | 366 | raid = MR_LdRaidGet(ld, map); |
362 | dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", | 367 | dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", |
@@ -1157,7 +1162,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, | |||
1157 | 1162 | ||
1158 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { | 1163 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
1159 | ld = MR_TargetIdToLdGet(ldCount, map); | 1164 | ld = MR_TargetIdToLdGet(ldCount, map); |
1160 | if (ld >= MAX_LOGICAL_DRIVES_EXT) | 1165 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
1161 | continue; | 1166 | continue; |
1162 | raid = MR_LdRaidGet(ld, map); | 1167 | raid = MR_LdRaidGet(ld, map); |
1163 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { | 1168 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 71557f64bb5e..675b5e7aba94 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -63,7 +63,6 @@ extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance | |||
63 | extern void | 63 | extern void |
64 | megasas_complete_cmd(struct megasas_instance *instance, | 64 | megasas_complete_cmd(struct megasas_instance *instance, |
65 | struct megasas_cmd *cmd, u8 alt_status); | 65 | struct megasas_cmd *cmd, u8 alt_status); |
66 | int megasas_is_ldio(struct scsi_cmnd *cmd); | ||
67 | int | 66 | int |
68 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, | 67 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, |
69 | int seconds); | 68 | int seconds); |
@@ -103,6 +102,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) | |||
103 | { | 102 | { |
104 | struct megasas_register_set __iomem *regs; | 103 | struct megasas_register_set __iomem *regs; |
105 | regs = instance->reg_set; | 104 | regs = instance->reg_set; |
105 | |||
106 | instance->mask_interrupts = 0; | ||
106 | /* For Thunderbolt/Invader also clear intr on enable */ | 107 | /* For Thunderbolt/Invader also clear intr on enable */ |
107 | writel(~0, ®s->outbound_intr_status); | 108 | writel(~0, ®s->outbound_intr_status); |
108 | readl(®s->outbound_intr_status); | 109 | readl(®s->outbound_intr_status); |
@@ -111,7 +112,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) | |||
111 | 112 | ||
112 | /* Dummy readl to force pci flush */ | 113 | /* Dummy readl to force pci flush */ |
113 | readl(®s->outbound_intr_mask); | 114 | readl(®s->outbound_intr_mask); |
114 | instance->mask_interrupts = 0; | ||
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
@@ -196,6 +196,7 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, | |||
196 | 196 | ||
197 | cmd->scmd = NULL; | 197 | cmd->scmd = NULL; |
198 | cmd->sync_cmd_idx = (u32)ULONG_MAX; | 198 | cmd->sync_cmd_idx = (u32)ULONG_MAX; |
199 | memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); | ||
199 | list_add(&cmd->list, (&fusion->cmd_pool)->next); | 200 | list_add(&cmd->list, (&fusion->cmd_pool)->next); |
200 | 201 | ||
201 | spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); | 202 | spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); |
@@ -689,6 +690,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
689 | = 1; | 690 | = 1; |
690 | init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb | 691 | init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb |
691 | = 1; | 692 | = 1; |
693 | init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw | ||
694 | = 1; | ||
692 | /* Convert capability to LE32 */ | 695 | /* Convert capability to LE32 */ |
693 | cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); | 696 | cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); |
694 | 697 | ||
@@ -698,12 +701,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
698 | cpu_to_le32(lower_32_bits(ioc_init_handle)); | 701 | cpu_to_le32(lower_32_bits(ioc_init_handle)); |
699 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); | 702 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); |
700 | 703 | ||
701 | req_desc.Words = 0; | 704 | req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); |
705 | req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); | ||
702 | req_desc.MFAIo.RequestFlags = | 706 | req_desc.MFAIo.RequestFlags = |
703 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << | 707 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << |
704 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 708 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
705 | cpu_to_le32s((u32 *)&req_desc.MFAIo); | ||
706 | req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr); | ||
707 | 709 | ||
708 | /* | 710 | /* |
709 | * disable the intr before firing the init frame | 711 | * disable the intr before firing the init frame |
@@ -1017,8 +1019,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) | |||
1017 | * does not exceed max cmds that the FW can support | 1019 | * does not exceed max cmds that the FW can support |
1018 | */ | 1020 | */ |
1019 | instance->max_fw_cmds = instance->max_fw_cmds-1; | 1021 | instance->max_fw_cmds = instance->max_fw_cmds-1; |
1020 | /* Only internal cmds (DCMD) need to have MFI frames */ | 1022 | |
1021 | instance->max_mfi_cmds = MEGASAS_INT_CMDS; | 1023 | /* |
1024 | * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames | ||
1025 | */ | ||
1026 | instance->max_mfi_cmds = | ||
1027 | MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; | ||
1022 | 1028 | ||
1023 | max_cmd = instance->max_fw_cmds; | 1029 | max_cmd = instance->max_fw_cmds; |
1024 | 1030 | ||
@@ -1285,6 +1291,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, | |||
1285 | 1291 | ||
1286 | sgl_ptr = | 1292 | sgl_ptr = |
1287 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; | 1293 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; |
1294 | memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); | ||
1288 | } | 1295 | } |
1289 | } | 1296 | } |
1290 | 1297 | ||
@@ -1658,6 +1665,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1658 | u32 device_id; | 1665 | u32 device_id; |
1659 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | 1666 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; |
1660 | u16 pd_index = 0; | 1667 | u16 pd_index = 0; |
1668 | u16 os_timeout_value; | ||
1669 | u16 timeout_limit; | ||
1661 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr; | 1670 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr; |
1662 | struct fusion_context *fusion = instance->ctrl_context; | 1671 | struct fusion_context *fusion = instance->ctrl_context; |
1663 | u8 span, physArm; | 1672 | u8 span, physArm; |
@@ -1674,52 +1683,66 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1674 | 1683 | ||
1675 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); | 1684 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); |
1676 | 1685 | ||
1677 | |||
1678 | /* Check if this is a system PD I/O */ | ||
1679 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && | 1686 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && |
1680 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | 1687 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { |
1681 | io_request->Function = 0; | ||
1682 | if (fusion->fast_path_io) | 1688 | if (fusion->fast_path_io) |
1683 | io_request->DevHandle = | 1689 | io_request->DevHandle = |
1684 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | 1690 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; |
1685 | io_request->RaidContext.timeoutValue = | ||
1686 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | ||
1687 | io_request->RaidContext.regLockFlags = 0; | ||
1688 | io_request->RaidContext.regLockRowLBA = 0; | ||
1689 | io_request->RaidContext.regLockLength = 0; | ||
1690 | io_request->RaidContext.RAIDFlags = | 1691 | io_request->RaidContext.RAIDFlags = |
1691 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << | 1692 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD |
1692 | MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; | 1693 | << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; |
1693 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 1694 | cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; |
1694 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | ||
1695 | io_request->IoFlags |= cpu_to_le16( | ||
1696 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); | ||
1697 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1698 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1699 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1700 | cmd->request_desc->SCSIIO.DevHandle = | ||
1701 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | ||
1702 | cmd->request_desc->SCSIIO.MSIxIndex = | 1695 | cmd->request_desc->SCSIIO.MSIxIndex = |
1703 | instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; | 1696 | instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; |
1704 | /* | 1697 | os_timeout_value = scmd->request->timeout / HZ; |
1705 | * If the command is for the tape device, set the | 1698 | |
1706 | * FP timeout to the os layer timeout value. | 1699 | if (instance->secure_jbod_support && |
1707 | */ | 1700 | (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) { |
1708 | if (scmd->device->type == TYPE_TAPE) { | 1701 | /* system pd firmware path */ |
1709 | if ((scmd->request->timeout / HZ) > 0xFFFF) | 1702 | io_request->Function = |
1710 | io_request->RaidContext.timeoutValue = | 1703 | MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; |
1711 | 0xFFFF; | 1704 | cmd->request_desc->SCSIIO.RequestFlags = |
1712 | else | 1705 | (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
1713 | io_request->RaidContext.timeoutValue = | 1706 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
1714 | scmd->request->timeout / HZ; | 1707 | io_request->RaidContext.timeoutValue = |
1708 | cpu_to_le16(os_timeout_value); | ||
1709 | } else { | ||
1710 | /* system pd Fast Path */ | ||
1711 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | ||
1712 | io_request->RaidContext.regLockFlags = 0; | ||
1713 | io_request->RaidContext.regLockRowLBA = 0; | ||
1714 | io_request->RaidContext.regLockLength = 0; | ||
1715 | timeout_limit = (scmd->device->type == TYPE_DISK) ? | ||
1716 | 255 : 0xFFFF; | ||
1717 | io_request->RaidContext.timeoutValue = | ||
1718 | cpu_to_le16((os_timeout_value > timeout_limit) ? | ||
1719 | timeout_limit : os_timeout_value); | ||
1720 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | ||
1721 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | ||
1722 | io_request->IoFlags |= | ||
1723 | cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); | ||
1724 | |||
1725 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1726 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1727 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1715 | } | 1728 | } |
1716 | } else { | 1729 | } else { |
1717 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) | 1730 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) |
1718 | goto NonFastPath; | 1731 | goto NonFastPath; |
1719 | 1732 | ||
1733 | /* | ||
1734 | * For older firmware, Driver should not access ldTgtIdToLd | ||
1735 | * beyond index 127 and for Extended VD firmware, ldTgtIdToLd | ||
1736 | * should not go beyond 255. | ||
1737 | */ | ||
1738 | |||
1739 | if ((!fusion->fast_path_io) || | ||
1740 | (device_id >= instance->fw_supported_vd_count)) | ||
1741 | goto NonFastPath; | ||
1742 | |||
1720 | ld = MR_TargetIdToLdGet(device_id, local_map_ptr); | 1743 | ld = MR_TargetIdToLdGet(device_id, local_map_ptr); |
1721 | if ((ld >= instance->fw_supported_vd_count) || | 1744 | |
1722 | (!fusion->fast_path_io)) | 1745 | if (ld >= instance->fw_supported_vd_count) |
1723 | goto NonFastPath; | 1746 | goto NonFastPath; |
1724 | 1747 | ||
1725 | raid = MR_LdRaidGet(ld, local_map_ptr); | 1748 | raid = MR_LdRaidGet(ld, local_map_ptr); |
@@ -1811,7 +1834,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, | |||
1811 | */ | 1834 | */ |
1812 | io_request->IoFlags = cpu_to_le16(scp->cmd_len); | 1835 | io_request->IoFlags = cpu_to_le16(scp->cmd_len); |
1813 | 1836 | ||
1814 | if (megasas_is_ldio(scp)) | 1837 | if (megasas_cmd_type(scp) == READ_WRITE_LDIO) |
1815 | megasas_build_ldio_fusion(instance, scp, cmd); | 1838 | megasas_build_ldio_fusion(instance, scp, cmd); |
1816 | else | 1839 | else |
1817 | megasas_build_dcdb_fusion(instance, scp, cmd); | 1840 | megasas_build_dcdb_fusion(instance, scp, cmd); |
@@ -2612,7 +2635,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2612 | instance->host->host_no); | 2635 | instance->host->host_no); |
2613 | megaraid_sas_kill_hba(instance); | 2636 | megaraid_sas_kill_hba(instance); |
2614 | instance->skip_heartbeat_timer_del = 1; | 2637 | instance->skip_heartbeat_timer_del = 1; |
2615 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
2616 | retval = FAILED; | 2638 | retval = FAILED; |
2617 | goto out; | 2639 | goto out; |
2618 | } | 2640 | } |
@@ -2808,8 +2830,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2808 | dev_info(&instance->pdev->dev, | 2830 | dev_info(&instance->pdev->dev, |
2809 | "Failed from %s %d\n", | 2831 | "Failed from %s %d\n", |
2810 | __func__, __LINE__); | 2832 | __func__, __LINE__); |
2811 | instance->adprecovery = | ||
2812 | MEGASAS_HW_CRITICAL_ERROR; | ||
2813 | megaraid_sas_kill_hba(instance); | 2833 | megaraid_sas_kill_hba(instance); |
2814 | retval = FAILED; | 2834 | retval = FAILED; |
2815 | } | 2835 | } |
@@ -2858,7 +2878,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2858 | "adapter scsi%d.\n", instance->host->host_no); | 2878 | "adapter scsi%d.\n", instance->host->host_no); |
2859 | megaraid_sas_kill_hba(instance); | 2879 | megaraid_sas_kill_hba(instance); |
2860 | instance->skip_heartbeat_timer_del = 1; | 2880 | instance->skip_heartbeat_timer_del = 1; |
2861 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
2862 | retval = FAILED; | 2881 | retval = FAILED; |
2863 | } else { | 2882 | } else { |
2864 | /* For VF: Restart HB timer if we didn't OCR */ | 2883 | /* For VF: Restart HB timer if we didn't OCR */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 5ab7daee11be..56e6db2d5874 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
@@ -306,14 +306,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST { | |||
306 | * MPT RAID MFA IO Descriptor. | 306 | * MPT RAID MFA IO Descriptor. |
307 | */ | 307 | */ |
308 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { | 308 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { |
309 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
310 | u32 MessageAddress1:24; /* bits 31:8*/ | ||
311 | u32 RequestFlags:8; | ||
312 | #else | ||
313 | u32 RequestFlags:8; | 309 | u32 RequestFlags:8; |
314 | u32 MessageAddress1:24; /* bits 31:8*/ | 310 | u32 MessageAddress1:24; |
315 | #endif | 311 | u32 MessageAddress2; |
316 | u32 MessageAddress2; /* bits 61:32 */ | ||
317 | }; | 312 | }; |
318 | 313 | ||
319 | /* Default Request Descriptor */ | 314 | /* Default Request Descriptor */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 088eefa67da8..7fc6f23bd9dc 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
10 | * | 10 | * |
11 | * mpi2.h Version: 02.00.32 | 11 | * mpi2.h Version: 02.00.35 |
12 | * | 12 | * |
13 | * Version History | 13 | * Version History |
14 | * --------------- | 14 | * --------------- |
@@ -83,6 +83,9 @@ | |||
83 | * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. | 83 | * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. |
84 | * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. | 84 | * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. |
85 | * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. | 85 | * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. |
86 | * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. | ||
87 | * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. | ||
88 | * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. | ||
86 | * -------------------------------------------------------------------------- | 89 | * -------------------------------------------------------------------------- |
87 | */ | 90 | */ |
88 | 91 | ||
@@ -108,7 +111,7 @@ | |||
108 | #define MPI2_VERSION_02_00 (0x0200) | 111 | #define MPI2_VERSION_02_00 (0x0200) |
109 | 112 | ||
110 | /* versioning for this MPI header set */ | 113 | /* versioning for this MPI header set */ |
111 | #define MPI2_HEADER_VERSION_UNIT (0x20) | 114 | #define MPI2_HEADER_VERSION_UNIT (0x23) |
112 | #define MPI2_HEADER_VERSION_DEV (0x00) | 115 | #define MPI2_HEADER_VERSION_DEV (0x00) |
113 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 116 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
114 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 117 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 510ef0dc8d7b..ee8d2d695d55 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
8 | * | 8 | * |
9 | * mpi2_cnfg.h Version: 02.00.26 | 9 | * mpi2_cnfg.h Version: 02.00.29 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -157,6 +157,20 @@ | |||
157 | * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. | 157 | * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. |
158 | * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to | 158 | * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to |
159 | * match the specification. | 159 | * match the specification. |
160 | * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for | ||
161 | * MPI2_CONFIG_PAGE_MAN_7. | ||
162 | * Added EnclosureLevel and ConnectorName fields to | ||
163 | * MPI2_CONFIG_PAGE_SAS_DEV_0. | ||
164 | * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for | ||
165 | * MPI2_CONFIG_PAGE_SAS_DEV_0. | ||
166 | * Added EnclosureLevel field to | ||
167 | * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. | ||
168 | * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for | ||
169 | * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. | ||
170 | * 01-08-14 02.00.28 Added more defines for the BiosOptions field of | ||
171 | * MPI2_CONFIG_PAGE_BIOS_1. | ||
172 | * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and | ||
173 | * more defines for the BiosOptions field. | ||
160 | * -------------------------------------------------------------------------- | 174 | * -------------------------------------------------------------------------- |
161 | */ | 175 | */ |
162 | 176 | ||
@@ -706,6 +720,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 | |||
706 | #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) | 720 | #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) |
707 | 721 | ||
708 | /* defines for the Flags field */ | 722 | /* defines for the Flags field */ |
723 | #define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) | ||
709 | #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) | 724 | #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) |
710 | #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) | 725 | #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) |
711 | 726 | ||
@@ -1224,7 +1239,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 | |||
1224 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | 1239 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ |
1225 | U32 BiosOptions; /* 0x04 */ | 1240 | U32 BiosOptions; /* 0x04 */ |
1226 | U32 IOCSettings; /* 0x08 */ | 1241 | U32 IOCSettings; /* 0x08 */ |
1227 | U32 Reserved1; /* 0x0C */ | 1242 | U8 SSUTimeout; /* 0x0C */ |
1243 | U8 Reserved1; /* 0x0D */ | ||
1244 | U16 Reserved2; /* 0x0E */ | ||
1228 | U32 DeviceSettings; /* 0x10 */ | 1245 | U32 DeviceSettings; /* 0x10 */ |
1229 | U16 NumberOfDevices; /* 0x14 */ | 1246 | U16 NumberOfDevices; /* 0x14 */ |
1230 | U16 UEFIVersion; /* 0x16 */ | 1247 | U16 UEFIVersion; /* 0x16 */ |
@@ -1235,9 +1252,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 | |||
1235 | } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, | 1252 | } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, |
1236 | Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; | 1253 | Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; |
1237 | 1254 | ||
1238 | #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) | 1255 | #define MPI2_BIOSPAGE1_PAGEVERSION (0x07) |
1239 | 1256 | ||
1240 | /* values for BIOS Page 1 BiosOptions field */ | 1257 | /* values for BIOS Page 1 BiosOptions field */ |
1258 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) | ||
1259 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) | ||
1260 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) | ||
1261 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) | ||
1262 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) | ||
1263 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) | ||
1264 | |||
1265 | #define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) | ||
1266 | |||
1267 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) | ||
1268 | #define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) | ||
1269 | #define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) | ||
1270 | #define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) | ||
1271 | #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) | ||
1272 | |||
1241 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) | 1273 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) |
1242 | #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) | 1274 | #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) |
1243 | 1275 | ||
@@ -2420,13 +2452,13 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
2420 | U8 PortGroups; /* 0x2C */ | 2452 | U8 PortGroups; /* 0x2C */ |
2421 | U8 DmaGroup; /* 0x2D */ | 2453 | U8 DmaGroup; /* 0x2D */ |
2422 | U8 ControlGroup; /* 0x2E */ | 2454 | U8 ControlGroup; /* 0x2E */ |
2423 | U8 Reserved1; /* 0x2F */ | 2455 | U8 EnclosureLevel; /* 0x2F */ |
2424 | U32 Reserved2; /* 0x30 */ | 2456 | U8 ConnectorName[4]; /* 0x30 */ |
2425 | U32 Reserved3; /* 0x34 */ | 2457 | U32 Reserved3; /* 0x34 */ |
2426 | } MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, | 2458 | } MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, |
2427 | Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; | 2459 | Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; |
2428 | 2460 | ||
2429 | #define MPI2_SASDEVICE0_PAGEVERSION (0x08) | 2461 | #define MPI2_SASDEVICE0_PAGEVERSION (0x09) |
2430 | 2462 | ||
2431 | /* values for SAS Device Page 0 AccessStatus field */ | 2463 | /* values for SAS Device Page 0 AccessStatus field */ |
2432 | #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) | 2464 | #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) |
@@ -2464,6 +2496,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
2464 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) | 2496 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) |
2465 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) | 2497 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) |
2466 | #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) | 2498 | #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) |
2499 | #define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) | ||
2467 | #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) | 2500 | #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) |
2468 | 2501 | ||
2469 | 2502 | ||
@@ -2732,7 +2765,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 | |||
2732 | U16 EnclosureHandle; /* 0x16 */ | 2765 | U16 EnclosureHandle; /* 0x16 */ |
2733 | U16 NumSlots; /* 0x18 */ | 2766 | U16 NumSlots; /* 0x18 */ |
2734 | U16 StartSlot; /* 0x1A */ | 2767 | U16 StartSlot; /* 0x1A */ |
2735 | U16 Reserved2; /* 0x1C */ | 2768 | U8 Reserved2; /* 0x1C */ |
2769 | U8 EnclosureLevel; /* 0x1D */ | ||
2736 | U16 SEPDevHandle; /* 0x1E */ | 2770 | U16 SEPDevHandle; /* 0x1E */ |
2737 | U32 Reserved3; /* 0x20 */ | 2771 | U32 Reserved3; /* 0x20 */ |
2738 | U32 Reserved4; /* 0x24 */ | 2772 | U32 Reserved4; /* 0x24 */ |
@@ -2740,9 +2774,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 | |||
2740 | MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, | 2774 | MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, |
2741 | Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t; | 2775 | Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t; |
2742 | 2776 | ||
2743 | #define MPI2_SASENCLOSURE0_PAGEVERSION (0x03) | 2777 | #define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) |
2744 | 2778 | ||
2745 | /* values for SAS Enclosure Page 0 Flags field */ | 2779 | /* values for SAS Enclosure Page 0 Flags field */ |
2780 | #define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) | ||
2746 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) | 2781 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) |
2747 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) | 2782 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) |
2748 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) | 2783 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index 2c3b0f28576b..b02de48be204 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
8 | * | 8 | * |
9 | * mpi2_ioc.h Version: 02.00.23 | 9 | * mpi2_ioc.h Version: 02.00.24 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -126,6 +126,7 @@ | |||
126 | * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. | 126 | * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. |
127 | * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. | 127 | * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. |
128 | * Added Encrypted Hash Extended Image. | 128 | * Added Encrypted Hash Extended Image. |
129 | * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. | ||
129 | * -------------------------------------------------------------------------- | 130 | * -------------------------------------------------------------------------- |
130 | */ | 131 | */ |
131 | 132 | ||
@@ -1589,6 +1590,7 @@ Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; | |||
1589 | /* values for HashImageType */ | 1590 | /* values for HashImageType */ |
1590 | #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) | 1591 | #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) |
1591 | #define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) | 1592 | #define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) |
1593 | #define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) | ||
1592 | 1594 | ||
1593 | /* values for HashAlgorithm */ | 1595 | /* values for HashAlgorithm */ |
1594 | #define MPI25_HASH_ALGORITHM_UNUSED (0x00) | 1596 | #define MPI25_HASH_ALGORITHM_UNUSED (0x00) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 9be03ed46180..659b8ac83ceb 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI diagnostic tool structures and definitions | 6 | * Title: MPI diagnostic tool structures and definitions |
7 | * Creation Date: March 26, 2007 | 7 | * Creation Date: March 26, 2007 |
8 | * | 8 | * |
9 | * mpi2_tool.h Version: 02.00.11 | 9 | * mpi2_tool.h Version: 02.00.12 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -29,7 +29,8 @@ | |||
29 | * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. | 29 | * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. |
30 | * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that | 30 | * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that |
31 | * it uses MPI Chain SGE as well as MPI Simple SGE. | 31 | * it uses MPI Chain SGE as well as MPI Simple SGE. |
32 | * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. | 32 | * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. |
33 | * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. | ||
33 | * -------------------------------------------------------------------------- | 34 | * -------------------------------------------------------------------------- |
34 | */ | 35 | */ |
35 | 36 | ||
@@ -101,6 +102,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST | |||
101 | #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) | 102 | #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) |
102 | #define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) | 103 | #define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) |
103 | #define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) | 104 | #define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) |
105 | #define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) | ||
104 | #define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) | 106 | #define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) |
105 | #define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) | 107 | #define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) |
106 | #define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) | 108 | #define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 58e45216d1ec..11248de92b3b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -639,6 +640,9 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, | |||
639 | if (!ioc->hide_ir_msg) | 640 | if (!ioc->hide_ir_msg) |
640 | desc = "Log Entry Added"; | 641 | desc = "Log Entry Added"; |
641 | break; | 642 | break; |
643 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
644 | desc = "Temperature Threshold"; | ||
645 | break; | ||
642 | } | 646 | } |
643 | 647 | ||
644 | if (!desc) | 648 | if (!desc) |
@@ -1296,6 +1300,8 @@ _base_free_irq(struct MPT2SAS_ADAPTER *ioc) | |||
1296 | 1300 | ||
1297 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { | 1301 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
1298 | list_del(&reply_q->list); | 1302 | list_del(&reply_q->list); |
1303 | irq_set_affinity_hint(reply_q->vector, NULL); | ||
1304 | free_cpumask_var(reply_q->affinity_hint); | ||
1299 | synchronize_irq(reply_q->vector); | 1305 | synchronize_irq(reply_q->vector); |
1300 | free_irq(reply_q->vector, reply_q); | 1306 | free_irq(reply_q->vector, reply_q); |
1301 | kfree(reply_q); | 1307 | kfree(reply_q); |
@@ -1325,6 +1331,11 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
1325 | reply_q->ioc = ioc; | 1331 | reply_q->ioc = ioc; |
1326 | reply_q->msix_index = index; | 1332 | reply_q->msix_index = index; |
1327 | reply_q->vector = vector; | 1333 | reply_q->vector = vector; |
1334 | |||
1335 | if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) | ||
1336 | return -ENOMEM; | ||
1337 | cpumask_clear(reply_q->affinity_hint); | ||
1338 | |||
1328 | atomic_set(&reply_q->busy, 0); | 1339 | atomic_set(&reply_q->busy, 0); |
1329 | if (ioc->msix_enable) | 1340 | if (ioc->msix_enable) |
1330 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", | 1341 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", |
@@ -1359,6 +1370,7 @@ static void | |||
1359 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) | 1370 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) |
1360 | { | 1371 | { |
1361 | unsigned int cpu, nr_cpus, nr_msix, index = 0; | 1372 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
1373 | struct adapter_reply_queue *reply_q; | ||
1362 | 1374 | ||
1363 | if (!_base_is_controller_msix_enabled(ioc)) | 1375 | if (!_base_is_controller_msix_enabled(ioc)) |
1364 | return; | 1376 | return; |
@@ -1373,20 +1385,30 @@ _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) | |||
1373 | 1385 | ||
1374 | cpu = cpumask_first(cpu_online_mask); | 1386 | cpu = cpumask_first(cpu_online_mask); |
1375 | 1387 | ||
1376 | do { | 1388 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
1389 | |||
1377 | unsigned int i, group = nr_cpus / nr_msix; | 1390 | unsigned int i, group = nr_cpus / nr_msix; |
1378 | 1391 | ||
1392 | if (cpu >= nr_cpus) | ||
1393 | break; | ||
1394 | |||
1379 | if (index < nr_cpus % nr_msix) | 1395 | if (index < nr_cpus % nr_msix) |
1380 | group++; | 1396 | group++; |
1381 | 1397 | ||
1382 | for (i = 0 ; i < group ; i++) { | 1398 | for (i = 0 ; i < group ; i++) { |
1383 | ioc->cpu_msix_table[cpu] = index; | 1399 | ioc->cpu_msix_table[cpu] = index; |
1400 | cpumask_or(reply_q->affinity_hint, | ||
1401 | reply_q->affinity_hint, get_cpu_mask(cpu)); | ||
1384 | cpu = cpumask_next(cpu, cpu_online_mask); | 1402 | cpu = cpumask_next(cpu, cpu_online_mask); |
1385 | } | 1403 | } |
1386 | 1404 | ||
1405 | if (irq_set_affinity_hint(reply_q->vector, | ||
1406 | reply_q->affinity_hint)) | ||
1407 | dinitprintk(ioc, pr_info(MPT2SAS_FMT | ||
1408 | "error setting affinity hint for irq vector %d\n", | ||
1409 | ioc->name, reply_q->vector)); | ||
1387 | index++; | 1410 | index++; |
1388 | 1411 | } | |
1389 | } while (cpu < nr_cpus); | ||
1390 | } | 1412 | } |
1391 | 1413 | ||
1392 | /** | 1414 | /** |
@@ -2338,6 +2360,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
2338 | mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); | 2360 | mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); |
2339 | mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); | 2361 | mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); |
2340 | mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2362 | mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2363 | mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); | ||
2341 | _base_display_ioc_capabilities(ioc); | 2364 | _base_display_ioc_capabilities(ioc); |
2342 | 2365 | ||
2343 | /* | 2366 | /* |
@@ -2355,6 +2378,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
2355 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | 2378 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
2356 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2379 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2357 | 2380 | ||
2381 | if (ioc->iounit_pg8.NumSensors) | ||
2382 | ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; | ||
2358 | } | 2383 | } |
2359 | 2384 | ||
2360 | /** | 2385 | /** |
@@ -2486,9 +2511,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2486 | 2511 | ||
2487 | /* command line tunables for max sgl entries */ | 2512 | /* command line tunables for max sgl entries */ |
2488 | if (max_sgl_entries != -1) { | 2513 | if (max_sgl_entries != -1) { |
2489 | ioc->shost->sg_tablesize = (max_sgl_entries < | 2514 | ioc->shost->sg_tablesize = min_t(unsigned short, |
2490 | MPT2SAS_SG_DEPTH) ? max_sgl_entries : | 2515 | max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS); |
2491 | MPT2SAS_SG_DEPTH; | 2516 | if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH) |
2517 | printk(MPT2SAS_WARN_FMT | ||
2518 | "sg_tablesize(%u) is bigger than kernel defined" | ||
2519 | " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, | ||
2520 | ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH); | ||
2492 | } else { | 2521 | } else { |
2493 | ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH; | 2522 | ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH; |
2494 | } | 2523 | } |
@@ -3236,7 +3265,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc, | |||
3236 | u16 smid; | 3265 | u16 smid; |
3237 | u32 ioc_state; | 3266 | u32 ioc_state; |
3238 | unsigned long timeleft; | 3267 | unsigned long timeleft; |
3239 | u8 issue_reset; | 3268 | bool issue_reset = false; |
3240 | int rc; | 3269 | int rc; |
3241 | void *request; | 3270 | void *request; |
3242 | u16 wait_state_count; | 3271 | u16 wait_state_count; |
@@ -3300,7 +3329,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc, | |||
3300 | _debug_dump_mf(mpi_request, | 3329 | _debug_dump_mf(mpi_request, |
3301 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | 3330 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); |
3302 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) | 3331 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) |
3303 | issue_reset = 1; | 3332 | issue_reset = true; |
3304 | goto issue_host_reset; | 3333 | goto issue_host_reset; |
3305 | } | 3334 | } |
3306 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) | 3335 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) |
@@ -3341,7 +3370,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, | |||
3341 | u16 smid; | 3370 | u16 smid; |
3342 | u32 ioc_state; | 3371 | u32 ioc_state; |
3343 | unsigned long timeleft; | 3372 | unsigned long timeleft; |
3344 | u8 issue_reset; | 3373 | bool issue_reset = false; |
3345 | int rc; | 3374 | int rc; |
3346 | void *request; | 3375 | void *request; |
3347 | u16 wait_state_count; | 3376 | u16 wait_state_count; |
@@ -3398,7 +3427,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, | |||
3398 | _debug_dump_mf(mpi_request, | 3427 | _debug_dump_mf(mpi_request, |
3399 | sizeof(Mpi2SepRequest_t)/4); | 3428 | sizeof(Mpi2SepRequest_t)/4); |
3400 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) | 3429 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) |
3401 | issue_reset = 1; | 3430 | issue_reset = true; |
3402 | goto issue_host_reset; | 3431 | goto issue_host_reset; |
3403 | } | 3432 | } |
3404 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) | 3433 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) |
@@ -4594,6 +4623,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
4594 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | 4623 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
4595 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 4624 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
4596 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 4625 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
4626 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); | ||
4597 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 4627 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
4598 | if (r) | 4628 | if (r) |
4599 | goto out_free_resources; | 4629 | goto out_free_resources; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 239f169b0673..caff8d10cca4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.h | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.h |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -67,10 +68,10 @@ | |||
67 | 68 | ||
68 | /* driver versioning info */ | 69 | /* driver versioning info */ |
69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 70 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 71 | #define MPT2SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 72 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
72 | #define MPT2SAS_DRIVER_VERSION "18.100.00.00" | 73 | #define MPT2SAS_DRIVER_VERSION "20.100.00.00" |
73 | #define MPT2SAS_MAJOR_VERSION 18 | 74 | #define MPT2SAS_MAJOR_VERSION 20 |
74 | #define MPT2SAS_MINOR_VERSION 100 | 75 | #define MPT2SAS_MINOR_VERSION 100 |
75 | #define MPT2SAS_BUILD_VERSION 00 | 76 | #define MPT2SAS_BUILD_VERSION 00 |
76 | #define MPT2SAS_RELEASE_VERSION 00 | 77 | #define MPT2SAS_RELEASE_VERSION 00 |
@@ -586,6 +587,7 @@ struct adapter_reply_queue { | |||
586 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; | 587 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; |
587 | char name[MPT_NAME_LENGTH]; | 588 | char name[MPT_NAME_LENGTH]; |
588 | atomic_t busy; | 589 | atomic_t busy; |
590 | cpumask_var_t affinity_hint; | ||
589 | struct list_head list; | 591 | struct list_head list; |
590 | }; | 592 | }; |
591 | 593 | ||
@@ -725,6 +727,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); | |||
725 | * @ioc_pg8: static ioc page 8 | 727 | * @ioc_pg8: static ioc page 8 |
726 | * @iounit_pg0: static iounit page 0 | 728 | * @iounit_pg0: static iounit page 0 |
727 | * @iounit_pg1: static iounit page 1 | 729 | * @iounit_pg1: static iounit page 1 |
730 | * @iounit_pg8: static iounit page 8 | ||
728 | * @sas_hba: sas host object | 731 | * @sas_hba: sas host object |
729 | * @sas_expander_list: expander object list | 732 | * @sas_expander_list: expander object list |
730 | * @sas_node_lock: | 733 | * @sas_node_lock: |
@@ -795,6 +798,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); | |||
795 | * @reply_post_host_index: head index in the pool where FW completes IO | 798 | * @reply_post_host_index: head index in the pool where FW completes IO |
796 | * @delayed_tr_list: target reset link list | 799 | * @delayed_tr_list: target reset link list |
797 | * @delayed_tr_volume_list: volume target reset link list | 800 | * @delayed_tr_volume_list: volume target reset link list |
801 | * @@temp_sensors_count: flag to carry the number of temperature sensors | ||
798 | */ | 802 | */ |
799 | struct MPT2SAS_ADAPTER { | 803 | struct MPT2SAS_ADAPTER { |
800 | struct list_head list; | 804 | struct list_head list; |
@@ -892,6 +896,7 @@ struct MPT2SAS_ADAPTER { | |||
892 | Mpi2IOCPage8_t ioc_pg8; | 896 | Mpi2IOCPage8_t ioc_pg8; |
893 | Mpi2IOUnitPage0_t iounit_pg0; | 897 | Mpi2IOUnitPage0_t iounit_pg0; |
894 | Mpi2IOUnitPage1_t iounit_pg1; | 898 | Mpi2IOUnitPage1_t iounit_pg1; |
899 | Mpi2IOUnitPage8_t iounit_pg8; | ||
895 | 900 | ||
896 | struct _boot_device req_boot_device; | 901 | struct _boot_device req_boot_device; |
897 | struct _boot_device req_alt_boot_device; | 902 | struct _boot_device req_alt_boot_device; |
@@ -992,6 +997,7 @@ struct MPT2SAS_ADAPTER { | |||
992 | 997 | ||
993 | struct list_head delayed_tr_list; | 998 | struct list_head delayed_tr_list; |
994 | struct list_head delayed_tr_volume_list; | 999 | struct list_head delayed_tr_volume_list; |
1000 | u8 temp_sensors_count; | ||
995 | 1001 | ||
996 | /* diag buffer support */ | 1002 | /* diag buffer support */ |
997 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; | 1003 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; |
@@ -1120,6 +1126,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1120 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1126 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1121 | int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1127 | int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1122 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1128 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1129 | int mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc, | ||
1130 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page); | ||
1123 | int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, | 1131 | int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, |
1124 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); | 1132 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); |
1125 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1133 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index c72a2fff5dbb..c43815b1a485 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -719,6 +720,42 @@ mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, | |||
719 | } | 720 | } |
720 | 721 | ||
721 | /** | 722 | /** |
723 | * mpt2sas_config_get_iounit_pg8 - obtain iounit page 8 | ||
724 | * @ioc: per adapter object | ||
725 | * @mpi_reply: reply mf payload returned from firmware | ||
726 | * @config_page: contents of the config page | ||
727 | * Context: sleep. | ||
728 | * | ||
729 | * Returns 0 for success, non-zero for failure. | ||
730 | */ | ||
731 | int | ||
732 | mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc, | ||
733 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) | ||
734 | { | ||
735 | Mpi2ConfigRequest_t mpi_request; | ||
736 | int r; | ||
737 | |||
738 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | ||
739 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | ||
740 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | ||
741 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; | ||
742 | mpi_request.Header.PageNumber = 8; | ||
743 | mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; | ||
744 | mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); | ||
745 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
746 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | ||
747 | if (r) | ||
748 | goto out; | ||
749 | |||
750 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; | ||
751 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
752 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, | ||
753 | sizeof(*config_page)); | ||
754 | out: | ||
755 | return r; | ||
756 | } | ||
757 | |||
758 | /** | ||
722 | * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 | 759 | * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 |
723 | * @ioc: per adapter object | 760 | * @ioc: per adapter object |
724 | * @mpi_reply: reply mf payload returned from firmware | 761 | * @mpi_reply: reply mf payload returned from firmware |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index ca4e563c01dd..4e509604b571 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 7f842c88abd2..46b2fc5b74af 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index cc57ef31d0fe..277120d45648 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6a1c036a6f3f..3f26147bbc64 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -2729,9 +2730,18 @@ _scsih_host_reset(struct scsi_cmnd *scmd) | |||
2729 | ioc->name, scmd); | 2730 | ioc->name, scmd); |
2730 | scsi_print_command(scmd); | 2731 | scsi_print_command(scmd); |
2731 | 2732 | ||
2733 | if (ioc->is_driver_loading) { | ||
2734 | printk(MPT2SAS_INFO_FMT "Blocking the host reset\n", | ||
2735 | ioc->name); | ||
2736 | r = FAILED; | ||
2737 | goto out; | ||
2738 | } | ||
2739 | |||
2732 | retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 2740 | retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
2733 | FORCE_BIG_HAMMER); | 2741 | FORCE_BIG_HAMMER); |
2734 | r = (retval < 0) ? FAILED : SUCCESS; | 2742 | r = (retval < 0) ? FAILED : SUCCESS; |
2743 | |||
2744 | out: | ||
2735 | printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n", | 2745 | printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n", |
2736 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); | 2746 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); |
2737 | 2747 | ||
@@ -3647,6 +3657,31 @@ _scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc, | |||
3647 | } | 3657 | } |
3648 | 3658 | ||
3649 | /** | 3659 | /** |
3660 | * _scsih_temp_threshold_events - display temperature threshold exceeded events | ||
3661 | * @ioc: per adapter object | ||
3662 | * @event_data: the temp threshold event data | ||
3663 | * Context: interrupt time. | ||
3664 | * | ||
3665 | * Return nothing. | ||
3666 | */ | ||
3667 | static void | ||
3668 | _scsih_temp_threshold_events(struct MPT2SAS_ADAPTER *ioc, | ||
3669 | Mpi2EventDataTemperature_t *event_data) | ||
3670 | { | ||
3671 | if (ioc->temp_sensors_count >= event_data->SensorNum) { | ||
3672 | printk(MPT2SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s" | ||
3673 | " exceeded for Sensor: %d !!!\n", ioc->name, | ||
3674 | ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", | ||
3675 | ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", | ||
3676 | ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", | ||
3677 | ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", | ||
3678 | event_data->SensorNum); | ||
3679 | printk(MPT2SAS_ERR_FMT "Current Temp In Celsius: %d\n", | ||
3680 | ioc->name, event_data->CurrentTemperature); | ||
3681 | } | ||
3682 | } | ||
3683 | |||
3684 | /** | ||
3650 | * _scsih_flush_running_cmds - completing outstanding commands. | 3685 | * _scsih_flush_running_cmds - completing outstanding commands. |
3651 | * @ioc: per adapter object | 3686 | * @ioc: per adapter object |
3652 | * | 3687 | * |
@@ -4509,6 +4544,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4509 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; | 4544 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4510 | goto out; | 4545 | goto out; |
4511 | } | 4546 | } |
4547 | if (log_info == 0x32010081) { | ||
4548 | scmd->result = DID_RESET << 16; | ||
4549 | break; | ||
4550 | } | ||
4512 | scmd->result = DID_SOFT_ERROR << 16; | 4551 | scmd->result = DID_SOFT_ERROR << 16; |
4513 | break; | 4552 | break; |
4514 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: | 4553 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
@@ -7557,6 +7596,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
7557 | case MPI2_EVENT_IR_PHYSICAL_DISK: | 7596 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
7558 | break; | 7597 | break; |
7559 | 7598 | ||
7599 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
7600 | _scsih_temp_threshold_events(ioc, | ||
7601 | (Mpi2EventDataTemperature_t *) | ||
7602 | mpi_reply->EventData); | ||
7603 | break; | ||
7604 | |||
7560 | default: /* ignore the rest */ | 7605 | default: /* ignore the rest */ |
7561 | return; | 7606 | return; |
7562 | } | 7607 | } |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index e689bf20a3ea..ff2500ab9ba4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1560115079c7..14a781b6b88d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -619,6 +620,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | |||
619 | case MPI2_EVENT_LOG_ENTRY_ADDED: | 620 | case MPI2_EVENT_LOG_ENTRY_ADDED: |
620 | desc = "Log Entry Added"; | 621 | desc = "Log Entry Added"; |
621 | break; | 622 | break; |
623 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
624 | desc = "Temperature Threshold"; | ||
625 | break; | ||
622 | } | 626 | } |
623 | 627 | ||
624 | if (!desc) | 628 | if (!desc) |
@@ -1580,6 +1584,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) | |||
1580 | 1584 | ||
1581 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { | 1585 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
1582 | list_del(&reply_q->list); | 1586 | list_del(&reply_q->list); |
1587 | irq_set_affinity_hint(reply_q->vector, NULL); | ||
1588 | free_cpumask_var(reply_q->affinity_hint); | ||
1583 | synchronize_irq(reply_q->vector); | 1589 | synchronize_irq(reply_q->vector); |
1584 | free_irq(reply_q->vector, reply_q); | 1590 | free_irq(reply_q->vector, reply_q); |
1585 | kfree(reply_q); | 1591 | kfree(reply_q); |
@@ -1609,6 +1615,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
1609 | reply_q->ioc = ioc; | 1615 | reply_q->ioc = ioc; |
1610 | reply_q->msix_index = index; | 1616 | reply_q->msix_index = index; |
1611 | reply_q->vector = vector; | 1617 | reply_q->vector = vector; |
1618 | |||
1619 | if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) | ||
1620 | return -ENOMEM; | ||
1621 | cpumask_clear(reply_q->affinity_hint); | ||
1622 | |||
1612 | atomic_set(&reply_q->busy, 0); | 1623 | atomic_set(&reply_q->busy, 0); |
1613 | if (ioc->msix_enable) | 1624 | if (ioc->msix_enable) |
1614 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", | 1625 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", |
@@ -1643,6 +1654,7 @@ static void | |||
1643 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | 1654 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) |
1644 | { | 1655 | { |
1645 | unsigned int cpu, nr_cpus, nr_msix, index = 0; | 1656 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
1657 | struct adapter_reply_queue *reply_q; | ||
1646 | 1658 | ||
1647 | if (!_base_is_controller_msix_enabled(ioc)) | 1659 | if (!_base_is_controller_msix_enabled(ioc)) |
1648 | return; | 1660 | return; |
@@ -1657,20 +1669,30 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |||
1657 | 1669 | ||
1658 | cpu = cpumask_first(cpu_online_mask); | 1670 | cpu = cpumask_first(cpu_online_mask); |
1659 | 1671 | ||
1660 | do { | 1672 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
1673 | |||
1661 | unsigned int i, group = nr_cpus / nr_msix; | 1674 | unsigned int i, group = nr_cpus / nr_msix; |
1662 | 1675 | ||
1676 | if (cpu >= nr_cpus) | ||
1677 | break; | ||
1678 | |||
1663 | if (index < nr_cpus % nr_msix) | 1679 | if (index < nr_cpus % nr_msix) |
1664 | group++; | 1680 | group++; |
1665 | 1681 | ||
1666 | for (i = 0 ; i < group ; i++) { | 1682 | for (i = 0 ; i < group ; i++) { |
1667 | ioc->cpu_msix_table[cpu] = index; | 1683 | ioc->cpu_msix_table[cpu] = index; |
1684 | cpumask_or(reply_q->affinity_hint, | ||
1685 | reply_q->affinity_hint, get_cpu_mask(cpu)); | ||
1668 | cpu = cpumask_next(cpu, cpu_online_mask); | 1686 | cpu = cpumask_next(cpu, cpu_online_mask); |
1669 | } | 1687 | } |
1670 | 1688 | ||
1689 | if (irq_set_affinity_hint(reply_q->vector, | ||
1690 | reply_q->affinity_hint)) | ||
1691 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | ||
1692 | "error setting affinity hint for irq vector %d\n", | ||
1693 | ioc->name, reply_q->vector)); | ||
1671 | index++; | 1694 | index++; |
1672 | 1695 | } | |
1673 | } while (cpu < nr_cpus); | ||
1674 | } | 1696 | } |
1675 | 1697 | ||
1676 | /** | 1698 | /** |
@@ -2500,6 +2522,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |||
2500 | mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); | 2522 | mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); |
2501 | mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); | 2523 | mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); |
2502 | mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2524 | mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2525 | mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); | ||
2503 | _base_display_ioc_capabilities(ioc); | 2526 | _base_display_ioc_capabilities(ioc); |
2504 | 2527 | ||
2505 | /* | 2528 | /* |
@@ -2516,6 +2539,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |||
2516 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; | 2539 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; |
2517 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | 2540 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
2518 | mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2541 | mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2542 | |||
2543 | if (ioc->iounit_pg8.NumSensors) | ||
2544 | ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; | ||
2519 | } | 2545 | } |
2520 | 2546 | ||
2521 | /** | 2547 | /** |
@@ -2659,8 +2685,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |||
2659 | 2685 | ||
2660 | if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) | 2686 | if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) |
2661 | sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; | 2687 | sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; |
2662 | else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) | 2688 | else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) { |
2663 | sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS; | 2689 | sg_tablesize = min_t(unsigned short, sg_tablesize, |
2690 | SCSI_MAX_SG_CHAIN_SEGMENTS); | ||
2691 | pr_warn(MPT3SAS_FMT | ||
2692 | "sg_tablesize(%u) is bigger than kernel" | ||
2693 | " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, | ||
2694 | sg_tablesize, MPT3SAS_MAX_PHYS_SEGMENTS); | ||
2695 | } | ||
2664 | ioc->shost->sg_tablesize = sg_tablesize; | 2696 | ioc->shost->sg_tablesize = sg_tablesize; |
2665 | 2697 | ||
2666 | ioc->hi_priority_depth = facts->HighPriorityCredit; | 2698 | ioc->hi_priority_depth = facts->HighPriorityCredit; |
@@ -3419,7 +3451,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
3419 | u16 smid; | 3451 | u16 smid; |
3420 | u32 ioc_state; | 3452 | u32 ioc_state; |
3421 | unsigned long timeleft; | 3453 | unsigned long timeleft; |
3422 | u8 issue_reset; | 3454 | bool issue_reset = false; |
3423 | int rc; | 3455 | int rc; |
3424 | void *request; | 3456 | void *request; |
3425 | u16 wait_state_count; | 3457 | u16 wait_state_count; |
@@ -3483,7 +3515,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
3483 | _debug_dump_mf(mpi_request, | 3515 | _debug_dump_mf(mpi_request, |
3484 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | 3516 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); |
3485 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | 3517 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) |
3486 | issue_reset = 1; | 3518 | issue_reset = true; |
3487 | goto issue_host_reset; | 3519 | goto issue_host_reset; |
3488 | } | 3520 | } |
3489 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 3521 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -3523,7 +3555,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
3523 | u16 smid; | 3555 | u16 smid; |
3524 | u32 ioc_state; | 3556 | u32 ioc_state; |
3525 | unsigned long timeleft; | 3557 | unsigned long timeleft; |
3526 | u8 issue_reset; | 3558 | bool issue_reset = false; |
3527 | int rc; | 3559 | int rc; |
3528 | void *request; | 3560 | void *request; |
3529 | u16 wait_state_count; | 3561 | u16 wait_state_count; |
@@ -3581,7 +3613,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
3581 | _debug_dump_mf(mpi_request, | 3613 | _debug_dump_mf(mpi_request, |
3582 | sizeof(Mpi2SepRequest_t)/4); | 3614 | sizeof(Mpi2SepRequest_t)/4); |
3583 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | 3615 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) |
3584 | issue_reset = 1; | 3616 | issue_reset = false; |
3585 | goto issue_host_reset; | 3617 | goto issue_host_reset; |
3586 | } | 3618 | } |
3587 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 3619 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -4720,6 +4752,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
4720 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | 4752 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
4721 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 4753 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
4722 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 4754 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
4755 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); | ||
4723 | 4756 | ||
4724 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 4757 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
4725 | if (r) | 4758 | if (r) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 40926aa9b24d..afa881682bef 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -68,7 +69,7 @@ | |||
68 | 69 | ||
69 | /* driver versioning info */ | 70 | /* driver versioning info */ |
70 | #define MPT3SAS_DRIVER_NAME "mpt3sas" | 71 | #define MPT3SAS_DRIVER_NAME "mpt3sas" |
71 | #define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 72 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
72 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" | 73 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" |
73 | #define MPT3SAS_DRIVER_VERSION "04.100.00.00" | 74 | #define MPT3SAS_DRIVER_VERSION "04.100.00.00" |
74 | #define MPT3SAS_MAJOR_VERSION 4 | 75 | #define MPT3SAS_MAJOR_VERSION 4 |
@@ -506,6 +507,7 @@ struct adapter_reply_queue { | |||
506 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; | 507 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; |
507 | char name[MPT_NAME_LENGTH]; | 508 | char name[MPT_NAME_LENGTH]; |
508 | atomic_t busy; | 509 | atomic_t busy; |
510 | cpumask_var_t affinity_hint; | ||
509 | struct list_head list; | 511 | struct list_head list; |
510 | }; | 512 | }; |
511 | 513 | ||
@@ -659,6 +661,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); | |||
659 | * @ioc_pg8: static ioc page 8 | 661 | * @ioc_pg8: static ioc page 8 |
660 | * @iounit_pg0: static iounit page 0 | 662 | * @iounit_pg0: static iounit page 0 |
661 | * @iounit_pg1: static iounit page 1 | 663 | * @iounit_pg1: static iounit page 1 |
664 | * @iounit_pg8: static iounit page 8 | ||
662 | * @sas_hba: sas host object | 665 | * @sas_hba: sas host object |
663 | * @sas_expander_list: expander object list | 666 | * @sas_expander_list: expander object list |
664 | * @sas_node_lock: | 667 | * @sas_node_lock: |
@@ -728,6 +731,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); | |||
728 | * @reply_post_host_index: head index in the pool where FW completes IO | 731 | * @reply_post_host_index: head index in the pool where FW completes IO |
729 | * @delayed_tr_list: target reset link list | 732 | * @delayed_tr_list: target reset link list |
730 | * @delayed_tr_volume_list: volume target reset link list | 733 | * @delayed_tr_volume_list: volume target reset link list |
734 | * @@temp_sensors_count: flag to carry the number of temperature sensors | ||
731 | */ | 735 | */ |
732 | struct MPT3SAS_ADAPTER { | 736 | struct MPT3SAS_ADAPTER { |
733 | struct list_head list; | 737 | struct list_head list; |
@@ -834,6 +838,7 @@ struct MPT3SAS_ADAPTER { | |||
834 | Mpi2IOCPage8_t ioc_pg8; | 838 | Mpi2IOCPage8_t ioc_pg8; |
835 | Mpi2IOUnitPage0_t iounit_pg0; | 839 | Mpi2IOUnitPage0_t iounit_pg0; |
836 | Mpi2IOUnitPage1_t iounit_pg1; | 840 | Mpi2IOUnitPage1_t iounit_pg1; |
841 | Mpi2IOUnitPage8_t iounit_pg8; | ||
837 | 842 | ||
838 | struct _boot_device req_boot_device; | 843 | struct _boot_device req_boot_device; |
839 | struct _boot_device req_alt_boot_device; | 844 | struct _boot_device req_alt_boot_device; |
@@ -934,6 +939,7 @@ struct MPT3SAS_ADAPTER { | |||
934 | 939 | ||
935 | struct list_head delayed_tr_list; | 940 | struct list_head delayed_tr_list; |
936 | struct list_head delayed_tr_volume_list; | 941 | struct list_head delayed_tr_volume_list; |
942 | u8 temp_sensors_count; | ||
937 | 943 | ||
938 | /* diag buffer support */ | 944 | /* diag buffer support */ |
939 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; | 945 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; |
@@ -1082,6 +1088,8 @@ int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1082 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1088 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1083 | int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1089 | int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1084 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1090 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1091 | int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | ||
1092 | *mpi_reply, Mpi2IOUnitPage8_t *config_page); | ||
1085 | int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 1093 | int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
1086 | Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, | 1094 | Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, |
1087 | u16 sz); | 1095 | u16 sz); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 4472c2af9255..e45c4613ef0c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -871,6 +872,42 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
871 | } | 872 | } |
872 | 873 | ||
873 | /** | 874 | /** |
875 | * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8 | ||
876 | * @ioc: per adapter object | ||
877 | * @mpi_reply: reply mf payload returned from firmware | ||
878 | * @config_page: contents of the config page | ||
879 | * Context: sleep. | ||
880 | * | ||
881 | * Returns 0 for success, non-zero for failure. | ||
882 | */ | ||
883 | int | ||
884 | mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, | ||
885 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) | ||
886 | { | ||
887 | Mpi2ConfigRequest_t mpi_request; | ||
888 | int r; | ||
889 | |||
890 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | ||
891 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | ||
892 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | ||
893 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; | ||
894 | mpi_request.Header.PageNumber = 8; | ||
895 | mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; | ||
896 | ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); | ||
897 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
898 | MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | ||
899 | if (r) | ||
900 | goto out; | ||
901 | |||
902 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; | ||
903 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
904 | MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, | ||
905 | sizeof(*config_page)); | ||
906 | out: | ||
907 | return r; | ||
908 | } | ||
909 | |||
910 | /** | ||
874 | * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 | 911 | * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 |
875 | * @ioc: per adapter object | 912 | * @ioc: per adapter object |
876 | * @mpi_reply: reply mf payload returned from firmware | 913 | * @mpi_reply: reply mf payload returned from firmware |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index dca14877d5ab..080c8a76d23d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 5f3d7fd7c2f8..aee99ce67e54 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h index 4778e7dd98bd..4e8a63fdb304 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_debug.h +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 94261ee9e72d..5a97e3286719 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -2392,9 +2393,17 @@ _scsih_host_reset(struct scsi_cmnd *scmd) | |||
2392 | ioc->name, scmd); | 2393 | ioc->name, scmd); |
2393 | scsi_print_command(scmd); | 2394 | scsi_print_command(scmd); |
2394 | 2395 | ||
2396 | if (ioc->is_driver_loading) { | ||
2397 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", | ||
2398 | ioc->name); | ||
2399 | r = FAILED; | ||
2400 | goto out; | ||
2401 | } | ||
2402 | |||
2395 | retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 2403 | retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
2396 | FORCE_BIG_HAMMER); | 2404 | FORCE_BIG_HAMMER); |
2397 | r = (retval < 0) ? FAILED : SUCCESS; | 2405 | r = (retval < 0) ? FAILED : SUCCESS; |
2406 | out: | ||
2398 | pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", | 2407 | pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", |
2399 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); | 2408 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); |
2400 | 2409 | ||
@@ -3342,6 +3351,31 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, | |||
3342 | } | 3351 | } |
3343 | 3352 | ||
3344 | /** | 3353 | /** |
3354 | * _scsih_temp_threshold_events - display temperature threshold exceeded events | ||
3355 | * @ioc: per adapter object | ||
3356 | * @event_data: the temp threshold event data | ||
3357 | * Context: interrupt time. | ||
3358 | * | ||
3359 | * Return nothing. | ||
3360 | */ | ||
3361 | static void | ||
3362 | _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, | ||
3363 | Mpi2EventDataTemperature_t *event_data) | ||
3364 | { | ||
3365 | if (ioc->temp_sensors_count >= event_data->SensorNum) { | ||
3366 | pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s" | ||
3367 | " exceeded for Sensor: %d !!!\n", ioc->name, | ||
3368 | ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", | ||
3369 | ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", | ||
3370 | ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", | ||
3371 | ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", | ||
3372 | event_data->SensorNum); | ||
3373 | pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n", | ||
3374 | ioc->name, event_data->CurrentTemperature); | ||
3375 | } | ||
3376 | } | ||
3377 | |||
3378 | /** | ||
3345 | * _scsih_flush_running_cmds - completing outstanding commands. | 3379 | * _scsih_flush_running_cmds - completing outstanding commands. |
3346 | * @ioc: per adapter object | 3380 | * @ioc: per adapter object |
3347 | * | 3381 | * |
@@ -7194,6 +7228,12 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
7194 | case MPI2_EVENT_IR_PHYSICAL_DISK: | 7228 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
7195 | break; | 7229 | break; |
7196 | 7230 | ||
7231 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
7232 | _scsih_temp_threshold_events(ioc, | ||
7233 | (Mpi2EventDataTemperature_t *) | ||
7234 | mpi_reply->EventData); | ||
7235 | break; | ||
7236 | |||
7197 | default: /* ignore the rest */ | 7237 | default: /* ignore the rest */ |
7198 | return 1; | 7238 | return 1; |
7199 | } | 7239 | } |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 3637ae6c0171..efb98afc46e0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index 8a2dd113f401..b60fd7a3b571 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h index f681db56c53b..6586a463bea9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h | |||
@@ -5,7 +5,8 @@ | |||
5 | * | 5 | * |
6 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h | 6 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h |
7 | * Copyright (C) 2012-2014 LSI Corporation | 7 | * Copyright (C) 2012-2014 LSI Corporation |
8 | * (mailto:DL-MPTFusionLinux@lsi.com) | 8 | * Copyright (C) 2013-2014 Avago Technologies |
9 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 90abb03c9074..c6077cefbeca 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
@@ -1441,8 +1441,6 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) | |||
1441 | return IRQ_RETVAL(handled); | 1441 | return IRQ_RETVAL(handled); |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | #undef SPRINTF | ||
1445 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
1446 | 1444 | ||
1447 | static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) | 1445 | static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) |
1448 | { | 1446 | { |
@@ -1458,64 +1456,63 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1458 | data = (nsp32_hw_data *)host->hostdata; | 1456 | data = (nsp32_hw_data *)host->hostdata; |
1459 | base = host->io_port; | 1457 | base = host->io_port; |
1460 | 1458 | ||
1461 | SPRINTF("NinjaSCSI-32 status\n\n"); | 1459 | seq_puts(m, "NinjaSCSI-32 status\n\n"); |
1462 | SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); | 1460 | seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); |
1463 | SPRINTF("SCSI host No.: %d\n", hostno); | 1461 | seq_printf(m, "SCSI host No.: %d\n", hostno); |
1464 | SPRINTF("IRQ: %d\n", host->irq); | 1462 | seq_printf(m, "IRQ: %d\n", host->irq); |
1465 | SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); | 1463 | seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); |
1466 | SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); | 1464 | seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); |
1467 | SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); | 1465 | seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); |
1468 | SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); | 1466 | seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); |
1469 | 1467 | ||
1470 | mode_reg = nsp32_index_read1(base, CHIP_MODE); | 1468 | mode_reg = nsp32_index_read1(base, CHIP_MODE); |
1471 | model = data->pci_devid->driver_data; | 1469 | model = data->pci_devid->driver_data; |
1472 | 1470 | ||
1473 | #ifdef CONFIG_PM | 1471 | #ifdef CONFIG_PM |
1474 | SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); | 1472 | seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); |
1475 | #endif | 1473 | #endif |
1476 | SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); | 1474 | seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); |
1477 | 1475 | ||
1478 | spin_lock_irqsave(&(data->Lock), flags); | 1476 | spin_lock_irqsave(&(data->Lock), flags); |
1479 | SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); | 1477 | seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); |
1480 | spin_unlock_irqrestore(&(data->Lock), flags); | 1478 | spin_unlock_irqrestore(&(data->Lock), flags); |
1481 | 1479 | ||
1482 | 1480 | ||
1483 | SPRINTF("SDTR status\n"); | 1481 | seq_puts(m, "SDTR status\n"); |
1484 | for (id = 0; id < ARRAY_SIZE(data->target); id++) { | 1482 | for (id = 0; id < ARRAY_SIZE(data->target); id++) { |
1485 | 1483 | ||
1486 | SPRINTF("id %d: ", id); | 1484 | seq_printf(m, "id %d: ", id); |
1487 | 1485 | ||
1488 | if (id == host->this_id) { | 1486 | if (id == host->this_id) { |
1489 | SPRINTF("----- NinjaSCSI-32 host adapter\n"); | 1487 | seq_puts(m, "----- NinjaSCSI-32 host adapter\n"); |
1490 | continue; | 1488 | continue; |
1491 | } | 1489 | } |
1492 | 1490 | ||
1493 | if (data->target[id].sync_flag == SDTR_DONE) { | 1491 | if (data->target[id].sync_flag == SDTR_DONE) { |
1494 | if (data->target[id].period == 0 && | 1492 | if (data->target[id].period == 0 && |
1495 | data->target[id].offset == ASYNC_OFFSET ) { | 1493 | data->target[id].offset == ASYNC_OFFSET ) { |
1496 | SPRINTF("async"); | 1494 | seq_puts(m, "async"); |
1497 | } else { | 1495 | } else { |
1498 | SPRINTF(" sync"); | 1496 | seq_puts(m, " sync"); |
1499 | } | 1497 | } |
1500 | } else { | 1498 | } else { |
1501 | SPRINTF(" none"); | 1499 | seq_puts(m, " none"); |
1502 | } | 1500 | } |
1503 | 1501 | ||
1504 | if (data->target[id].period != 0) { | 1502 | if (data->target[id].period != 0) { |
1505 | 1503 | ||
1506 | speed = 1000000 / (data->target[id].period * 4); | 1504 | speed = 1000000 / (data->target[id].period * 4); |
1507 | 1505 | ||
1508 | SPRINTF(" transfer %d.%dMB/s, offset %d", | 1506 | seq_printf(m, " transfer %d.%dMB/s, offset %d", |
1509 | speed / 1000, | 1507 | speed / 1000, |
1510 | speed % 1000, | 1508 | speed % 1000, |
1511 | data->target[id].offset | 1509 | data->target[id].offset |
1512 | ); | 1510 | ); |
1513 | } | 1511 | } |
1514 | SPRINTF("\n"); | 1512 | seq_putc(m, '\n'); |
1515 | } | 1513 | } |
1516 | return 0; | 1514 | return 0; |
1517 | } | 1515 | } |
1518 | #undef SPRINTF | ||
1519 | 1516 | ||
1520 | 1517 | ||
1521 | 1518 | ||
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 34aad32829f5..1b6c8833a304 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -1364,9 +1364,6 @@ static const char *nsp_info(struct Scsi_Host *shpnt) | |||
1364 | return data->nspinfo; | 1364 | return data->nspinfo; |
1365 | } | 1365 | } |
1366 | 1366 | ||
1367 | #undef SPRINTF | ||
1368 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
1369 | |||
1370 | static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) | 1367 | static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) |
1371 | { | 1368 | { |
1372 | int id; | 1369 | int id; |
@@ -1378,75 +1375,74 @@ static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1378 | hostno = host->host_no; | 1375 | hostno = host->host_no; |
1379 | data = (nsp_hw_data *)host->hostdata; | 1376 | data = (nsp_hw_data *)host->hostdata; |
1380 | 1377 | ||
1381 | SPRINTF("NinjaSCSI status\n\n"); | 1378 | seq_puts(m, "NinjaSCSI status\n\n" |
1382 | SPRINTF("Driver version: $Revision: 1.23 $\n"); | 1379 | "Driver version: $Revision: 1.23 $\n"); |
1383 | SPRINTF("SCSI host No.: %d\n", hostno); | 1380 | seq_printf(m, "SCSI host No.: %d\n", hostno); |
1384 | SPRINTF("IRQ: %d\n", host->irq); | 1381 | seq_printf(m, "IRQ: %d\n", host->irq); |
1385 | SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); | 1382 | seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); |
1386 | SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); | 1383 | seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); |
1387 | SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); | 1384 | seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); |
1388 | 1385 | ||
1389 | SPRINTF("burst transfer mode: "); | 1386 | seq_puts(m, "burst transfer mode: "); |
1390 | switch (nsp_burst_mode) { | 1387 | switch (nsp_burst_mode) { |
1391 | case BURST_IO8: | 1388 | case BURST_IO8: |
1392 | SPRINTF("io8"); | 1389 | seq_puts(m, "io8"); |
1393 | break; | 1390 | break; |
1394 | case BURST_IO32: | 1391 | case BURST_IO32: |
1395 | SPRINTF("io32"); | 1392 | seq_puts(m, "io32"); |
1396 | break; | 1393 | break; |
1397 | case BURST_MEM32: | 1394 | case BURST_MEM32: |
1398 | SPRINTF("mem32"); | 1395 | seq_puts(m, "mem32"); |
1399 | break; | 1396 | break; |
1400 | default: | 1397 | default: |
1401 | SPRINTF("???"); | 1398 | seq_puts(m, "???"); |
1402 | break; | 1399 | break; |
1403 | } | 1400 | } |
1404 | SPRINTF("\n"); | 1401 | seq_putc(m, '\n'); |
1405 | 1402 | ||
1406 | 1403 | ||
1407 | spin_lock_irqsave(&(data->Lock), flags); | 1404 | spin_lock_irqsave(&(data->Lock), flags); |
1408 | SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); | 1405 | seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); |
1409 | spin_unlock_irqrestore(&(data->Lock), flags); | 1406 | spin_unlock_irqrestore(&(data->Lock), flags); |
1410 | 1407 | ||
1411 | SPRINTF("SDTR status\n"); | 1408 | seq_puts(m, "SDTR status\n"); |
1412 | for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { | 1409 | for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { |
1413 | 1410 | ||
1414 | SPRINTF("id %d: ", id); | 1411 | seq_printf(m, "id %d: ", id); |
1415 | 1412 | ||
1416 | if (id == host->this_id) { | 1413 | if (id == host->this_id) { |
1417 | SPRINTF("----- NinjaSCSI-3 host adapter\n"); | 1414 | seq_puts(m, "----- NinjaSCSI-3 host adapter\n"); |
1418 | continue; | 1415 | continue; |
1419 | } | 1416 | } |
1420 | 1417 | ||
1421 | switch(data->Sync[id].SyncNegotiation) { | 1418 | switch(data->Sync[id].SyncNegotiation) { |
1422 | case SYNC_OK: | 1419 | case SYNC_OK: |
1423 | SPRINTF(" sync"); | 1420 | seq_puts(m, " sync"); |
1424 | break; | 1421 | break; |
1425 | case SYNC_NG: | 1422 | case SYNC_NG: |
1426 | SPRINTF("async"); | 1423 | seq_puts(m, "async"); |
1427 | break; | 1424 | break; |
1428 | case SYNC_NOT_YET: | 1425 | case SYNC_NOT_YET: |
1429 | SPRINTF(" none"); | 1426 | seq_puts(m, " none"); |
1430 | break; | 1427 | break; |
1431 | default: | 1428 | default: |
1432 | SPRINTF("?????"); | 1429 | seq_puts(m, "?????"); |
1433 | break; | 1430 | break; |
1434 | } | 1431 | } |
1435 | 1432 | ||
1436 | if (data->Sync[id].SyncPeriod != 0) { | 1433 | if (data->Sync[id].SyncPeriod != 0) { |
1437 | speed = 1000000 / (data->Sync[id].SyncPeriod * 4); | 1434 | speed = 1000000 / (data->Sync[id].SyncPeriod * 4); |
1438 | 1435 | ||
1439 | SPRINTF(" transfer %d.%dMB/s, offset %d", | 1436 | seq_printf(m, " transfer %d.%dMB/s, offset %d", |
1440 | speed / 1000, | 1437 | speed / 1000, |
1441 | speed % 1000, | 1438 | speed % 1000, |
1442 | data->Sync[id].SyncOffset | 1439 | data->Sync[id].SyncOffset |
1443 | ); | 1440 | ); |
1444 | } | 1441 | } |
1445 | SPRINTF("\n"); | 1442 | seq_putc(m, '\n'); |
1446 | } | 1443 | } |
1447 | return 0; | 1444 | return 0; |
1448 | } | 1445 | } |
1449 | #undef SPRINTF | ||
1450 | 1446 | ||
1451 | /*---------------------------------------------------------------*/ | 1447 | /*---------------------------------------------------------------*/ |
1452 | /* error handler */ | 1448 | /* error handler */ |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 8c27b6a77ec4..ed31d8cc6266 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -1473,13 +1473,7 @@ static int pmcraid_notify_aen( | |||
1473 | } | 1473 | } |
1474 | 1474 | ||
1475 | /* send genetlink multicast message to notify appplications */ | 1475 | /* send genetlink multicast message to notify appplications */ |
1476 | result = genlmsg_end(skb, msg_header); | 1476 | genlmsg_end(skb, msg_header); |
1477 | |||
1478 | if (result < 0) { | ||
1479 | pmcraid_err("genlmsg_end failed\n"); | ||
1480 | nlmsg_free(skb); | ||
1481 | return result; | ||
1482 | } | ||
1483 | 1477 | ||
1484 | result = genlmsg_multicast(&pmcraid_event_family, skb, | 1478 | result = genlmsg_multicast(&pmcraid_event_family, skb, |
1485 | 0, 0, GFP_ATOMIC); | 1479 | 0, 0, GFP_ATOMIC); |
@@ -4223,7 +4217,7 @@ static ssize_t pmcraid_show_adapter_id( | |||
4223 | static struct device_attribute pmcraid_adapter_id_attr = { | 4217 | static struct device_attribute pmcraid_adapter_id_attr = { |
4224 | .attr = { | 4218 | .attr = { |
4225 | .name = "adapter_id", | 4219 | .name = "adapter_id", |
4226 | .mode = S_IRUGO | S_IWUSR, | 4220 | .mode = S_IRUGO, |
4227 | }, | 4221 | }, |
4228 | .show = pmcraid_show_adapter_id, | 4222 | .show = pmcraid_show_adapter_id, |
4229 | }; | 4223 | }; |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 2ca39b8e7166..15cf074ffa3c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -23,10 +23,10 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
23 | 23 | ||
24 | mutex_lock(&ha->fce_mutex); | 24 | mutex_lock(&ha->fce_mutex); |
25 | 25 | ||
26 | seq_printf(s, "FCE Trace Buffer\n"); | 26 | seq_puts(s, "FCE Trace Buffer\n"); |
27 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); | 27 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); |
28 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); | 28 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); |
29 | seq_printf(s, "FCE Enable Registers\n"); | 29 | seq_puts(s, "FCE Enable Registers\n"); |
30 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", | 30 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", |
31 | ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], | 31 | ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], |
32 | ha->fce_mb[5], ha->fce_mb[6]); | 32 | ha->fce_mb[5], ha->fce_mb[6]); |
@@ -38,11 +38,11 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
38 | seq_printf(s, "\n%llx: ", | 38 | seq_printf(s, "\n%llx: ", |
39 | (unsigned long long)((cnt * 4) + fce_start)); | 39 | (unsigned long long)((cnt * 4) + fce_start)); |
40 | else | 40 | else |
41 | seq_printf(s, " "); | 41 | seq_putc(s, ' '); |
42 | seq_printf(s, "%08x", *fce++); | 42 | seq_printf(s, "%08x", *fce++); |
43 | } | 43 | } |
44 | 44 | ||
45 | seq_printf(s, "\nEnd\n"); | 45 | seq_puts(s, "\nEnd\n"); |
46 | 46 | ||
47 | mutex_unlock(&ha->fce_mutex); | 47 | mutex_unlock(&ha->fce_mutex); |
48 | 48 | ||
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 73f9feecda72..ab4879e12ea7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1570 | * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. | 1570 | * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. |
1571 | */ | 1571 | */ |
1572 | memset(&port_name, 0, 36); | 1572 | memset(&port_name, 0, 36); |
1573 | snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", | 1573 | snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); |
1574 | fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], | ||
1575 | fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); | ||
1576 | /* | 1574 | /* |
1577 | * Locate our struct se_node_acl either from an explict NodeACL created | 1575 | * Locate our struct se_node_acl either from an explict NodeACL created |
1578 | * via ConfigFS, or via running in TPG demo mode. | 1576 | * via ConfigFS, or via running in TPG demo mode. |
@@ -1598,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1598 | /* | 1596 | /* |
1599 | * Finally register the new FC Nexus with TCM | 1597 | * Finally register the new FC Nexus with TCM |
1600 | */ | 1598 | */ |
1601 | __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); | 1599 | transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); |
1602 | 1600 | ||
1603 | return 0; | 1601 | return 0; |
1604 | } | 1602 | } |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 9b3829931f40..c9c3b579eece 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -531,7 +531,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) | |||
531 | * | 531 | * |
532 | * 3: same as 2 | 532 | * 3: same as 2 |
533 | * | 533 | * |
534 | * 4: same as 3 plus dump extra junk | 534 | * 4: same as 3 |
535 | */ | 535 | */ |
536 | if (unlikely(scsi_logging_level)) { | 536 | if (unlikely(scsi_logging_level)) { |
537 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, | 537 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, |
@@ -540,13 +540,6 @@ void scsi_log_send(struct scsi_cmnd *cmd) | |||
540 | scmd_printk(KERN_INFO, cmd, | 540 | scmd_printk(KERN_INFO, cmd, |
541 | "Send: scmd 0x%p\n", cmd); | 541 | "Send: scmd 0x%p\n", cmd); |
542 | scsi_print_command(cmd); | 542 | scsi_print_command(cmd); |
543 | if (level > 3) { | ||
544 | printk(KERN_INFO "buffer = 0x%p, bufflen = %d," | ||
545 | " queuecommand 0x%p\n", | ||
546 | scsi_sglist(cmd), scsi_bufflen(cmd), | ||
547 | cmd->device->host->hostt->queuecommand); | ||
548 | |||
549 | } | ||
550 | } | 543 | } |
551 | } | 544 | } |
552 | } | 545 | } |
@@ -572,7 +565,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
572 | SCSI_LOG_MLCOMPLETE_BITS); | 565 | SCSI_LOG_MLCOMPLETE_BITS); |
573 | if (((level > 0) && (cmd->result || disposition != SUCCESS)) || | 566 | if (((level > 0) && (cmd->result || disposition != SUCCESS)) || |
574 | (level > 1)) { | 567 | (level > 1)) { |
575 | scsi_print_result(cmd, "Done: ", disposition); | 568 | scsi_print_result(cmd, "Done", disposition); |
576 | scsi_print_command(cmd); | 569 | scsi_print_command(cmd); |
577 | if (status_byte(cmd->result) & CHECK_CONDITION) | 570 | if (status_byte(cmd->result) & CHECK_CONDITION) |
578 | scsi_print_sense(cmd); | 571 | scsi_print_sense(cmd); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 4aca1b0378c2..1f8e2dc9c616 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -80,6 +80,8 @@ static const char *scsi_debug_version_date = "20141022"; | |||
80 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 | 80 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 |
81 | #define UA_RESET_ASC 0x29 | 81 | #define UA_RESET_ASC 0x29 |
82 | #define UA_CHANGED_ASC 0x2a | 82 | #define UA_CHANGED_ASC 0x2a |
83 | #define TARGET_CHANGED_ASC 0x3f | ||
84 | #define LUNS_CHANGED_ASCQ 0x0e | ||
83 | #define INSUFF_RES_ASC 0x55 | 85 | #define INSUFF_RES_ASC 0x55 |
84 | #define INSUFF_RES_ASCQ 0x3 | 86 | #define INSUFF_RES_ASCQ 0x3 |
85 | #define POWER_ON_RESET_ASCQ 0x0 | 87 | #define POWER_ON_RESET_ASCQ 0x0 |
@@ -91,6 +93,8 @@ static const char *scsi_debug_version_date = "20141022"; | |||
91 | #define THRESHOLD_EXCEEDED 0x5d | 93 | #define THRESHOLD_EXCEEDED 0x5d |
92 | #define LOW_POWER_COND_ON 0x5e | 94 | #define LOW_POWER_COND_ON 0x5e |
93 | #define MISCOMPARE_VERIFY_ASC 0x1d | 95 | #define MISCOMPARE_VERIFY_ASC 0x1d |
96 | #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ | ||
97 | #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 | ||
94 | 98 | ||
95 | /* Additional Sense Code Qualifier (ASCQ) */ | 99 | /* Additional Sense Code Qualifier (ASCQ) */ |
96 | #define ACK_NAK_TO 0x3 | 100 | #define ACK_NAK_TO 0x3 |
@@ -180,7 +184,10 @@ static const char *scsi_debug_version_date = "20141022"; | |||
180 | #define SDEBUG_UA_BUS_RESET 1 | 184 | #define SDEBUG_UA_BUS_RESET 1 |
181 | #define SDEBUG_UA_MODE_CHANGED 2 | 185 | #define SDEBUG_UA_MODE_CHANGED 2 |
182 | #define SDEBUG_UA_CAPACITY_CHANGED 3 | 186 | #define SDEBUG_UA_CAPACITY_CHANGED 3 |
183 | #define SDEBUG_NUM_UAS 4 | 187 | #define SDEBUG_UA_LUNS_CHANGED 4 |
188 | #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */ | ||
189 | #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 | ||
190 | #define SDEBUG_NUM_UAS 7 | ||
184 | 191 | ||
185 | /* for check_readiness() */ | 192 | /* for check_readiness() */ |
186 | #define UAS_ONLY 1 /* check for UAs only */ | 193 | #define UAS_ONLY 1 /* check for UAs only */ |
@@ -326,6 +333,7 @@ static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); | |||
326 | static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); | 333 | static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); |
327 | static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); | 334 | static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); |
328 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); | 335 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); |
336 | static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); | ||
329 | 337 | ||
330 | struct opcode_info_t { | 338 | struct opcode_info_t { |
331 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff | 339 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff |
@@ -480,8 +488,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { | |||
480 | {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, | 488 | {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, |
481 | NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, | 489 | NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, |
482 | 0, 0, 0, 0, 0, 0} }, | 490 | 0, 0, 0, 0, 0, 0} }, |
483 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */ | 491 | {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, |
484 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 492 | {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, |
493 | 0, 0, 0, 0} }, /* WRITE_BUFFER */ | ||
485 | {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, | 494 | {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, |
486 | write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, | 495 | write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, |
487 | 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, | 496 | 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, |
@@ -782,6 +791,22 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | |||
782 | /* return -ENOTTY; // correct return but upsets fdisk */ | 791 | /* return -ENOTTY; // correct return but upsets fdisk */ |
783 | } | 792 | } |
784 | 793 | ||
794 | static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) | ||
795 | { | ||
796 | struct sdebug_host_info *sdhp; | ||
797 | struct sdebug_dev_info *dp; | ||
798 | |||
799 | spin_lock(&sdebug_host_list_lock); | ||
800 | list_for_each_entry(sdhp, &sdebug_host_list, host_list) { | ||
801 | list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { | ||
802 | if ((devip->sdbg_host == dp->sdbg_host) && | ||
803 | (devip->target == dp->target)) | ||
804 | clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); | ||
805 | } | ||
806 | } | ||
807 | spin_unlock(&sdebug_host_list_lock); | ||
808 | } | ||
809 | |||
785 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | 810 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, |
786 | struct sdebug_dev_info * devip) | 811 | struct sdebug_dev_info * devip) |
787 | { | 812 | { |
@@ -817,6 +842,36 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | |||
817 | if (debug) | 842 | if (debug) |
818 | cp = "capacity data changed"; | 843 | cp = "capacity data changed"; |
819 | break; | 844 | break; |
845 | case SDEBUG_UA_MICROCODE_CHANGED: | ||
846 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
847 | TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ); | ||
848 | if (debug) | ||
849 | cp = "microcode has been changed"; | ||
850 | break; | ||
851 | case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: | ||
852 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
853 | TARGET_CHANGED_ASC, | ||
854 | MICROCODE_CHANGED_WO_RESET_ASCQ); | ||
855 | if (debug) | ||
856 | cp = "microcode has been changed without reset"; | ||
857 | break; | ||
858 | case SDEBUG_UA_LUNS_CHANGED: | ||
859 | /* | ||
860 | * SPC-3 behavior is to report a UNIT ATTENTION with | ||
861 | * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN | ||
862 | * on the target, until a REPORT LUNS command is | ||
863 | * received. SPC-4 behavior is to report it only once. | ||
864 | * NOTE: scsi_debug_scsi_level does not use the same | ||
865 | * values as struct scsi_device->scsi_level. | ||
866 | */ | ||
867 | if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */ | ||
868 | clear_luns_changed_on_target(devip); | ||
869 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
870 | TARGET_CHANGED_ASC, | ||
871 | LUNS_CHANGED_ASCQ); | ||
872 | if (debug) | ||
873 | cp = "reported luns data has changed"; | ||
874 | break; | ||
820 | default: | 875 | default: |
821 | pr_warn("%s: unexpected unit attention code=%d\n", | 876 | pr_warn("%s: unexpected unit attention code=%d\n", |
822 | __func__, k); | 877 | __func__, k); |
@@ -3033,6 +3088,55 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3033 | return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); | 3088 | return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); |
3034 | } | 3089 | } |
3035 | 3090 | ||
3091 | /* Note the mode field is in the same position as the (lower) service action | ||
3092 | * field. For the Report supported operation codes command, SPC-4 suggests | ||
3093 | * each mode of this command should be reported separately; for future. */ | ||
3094 | static int | ||
3095 | resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | ||
3096 | { | ||
3097 | u8 *cmd = scp->cmnd; | ||
3098 | struct scsi_device *sdp = scp->device; | ||
3099 | struct sdebug_dev_info *dp; | ||
3100 | u8 mode; | ||
3101 | |||
3102 | mode = cmd[1] & 0x1f; | ||
3103 | switch (mode) { | ||
3104 | case 0x4: /* download microcode (MC) and activate (ACT) */ | ||
3105 | /* set UAs on this device only */ | ||
3106 | set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); | ||
3107 | set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); | ||
3108 | break; | ||
3109 | case 0x5: /* download MC, save and ACT */ | ||
3110 | set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); | ||
3111 | break; | ||
3112 | case 0x6: /* download MC with offsets and ACT */ | ||
3113 | /* set UAs on most devices (LUs) in this target */ | ||
3114 | list_for_each_entry(dp, | ||
3115 | &devip->sdbg_host->dev_info_list, | ||
3116 | dev_list) | ||
3117 | if (dp->target == sdp->id) { | ||
3118 | set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); | ||
3119 | if (devip != dp) | ||
3120 | set_bit(SDEBUG_UA_MICROCODE_CHANGED, | ||
3121 | dp->uas_bm); | ||
3122 | } | ||
3123 | break; | ||
3124 | case 0x7: /* download MC with offsets, save, and ACT */ | ||
3125 | /* set UA on all devices (LUs) in this target */ | ||
3126 | list_for_each_entry(dp, | ||
3127 | &devip->sdbg_host->dev_info_list, | ||
3128 | dev_list) | ||
3129 | if (dp->target == sdp->id) | ||
3130 | set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, | ||
3131 | dp->uas_bm); | ||
3132 | break; | ||
3133 | default: | ||
3134 | /* do nothing for this command for other mode values */ | ||
3135 | break; | ||
3136 | } | ||
3137 | return 0; | ||
3138 | } | ||
3139 | |||
3036 | static int | 3140 | static int |
3037 | resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3141 | resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
3038 | { | 3142 | { |
@@ -3229,6 +3333,7 @@ static int resp_report_luns(struct scsi_cmnd * scp, | |||
3229 | unsigned char arr[SDEBUG_RLUN_ARR_SZ]; | 3333 | unsigned char arr[SDEBUG_RLUN_ARR_SZ]; |
3230 | unsigned char * max_addr; | 3334 | unsigned char * max_addr; |
3231 | 3335 | ||
3336 | clear_luns_changed_on_target(devip); | ||
3232 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); | 3337 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); |
3233 | shortish = (alloc_len < 4); | 3338 | shortish = (alloc_len < 4); |
3234 | if (shortish || (select_report > 2)) { | 3339 | if (shortish || (select_report > 2)) { |
@@ -4369,10 +4474,27 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, | |||
4369 | size_t count) | 4474 | size_t count) |
4370 | { | 4475 | { |
4371 | int n; | 4476 | int n; |
4477 | bool changed; | ||
4372 | 4478 | ||
4373 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4479 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4480 | changed = (scsi_debug_max_luns != n); | ||
4374 | scsi_debug_max_luns = n; | 4481 | scsi_debug_max_luns = n; |
4375 | sdebug_max_tgts_luns(); | 4482 | sdebug_max_tgts_luns(); |
4483 | if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */ | ||
4484 | struct sdebug_host_info *sdhp; | ||
4485 | struct sdebug_dev_info *dp; | ||
4486 | |||
4487 | spin_lock(&sdebug_host_list_lock); | ||
4488 | list_for_each_entry(sdhp, &sdebug_host_list, | ||
4489 | host_list) { | ||
4490 | list_for_each_entry(dp, &sdhp->dev_info_list, | ||
4491 | dev_list) { | ||
4492 | set_bit(SDEBUG_UA_LUNS_CHANGED, | ||
4493 | dp->uas_bm); | ||
4494 | } | ||
4495 | } | ||
4496 | spin_unlock(&sdebug_host_list_lock); | ||
4497 | } | ||
4376 | return count; | 4498 | return count; |
4377 | } | 4499 | } |
4378 | return -EINVAL; | 4500 | return -EINVAL; |
@@ -4536,10 +4658,10 @@ static ssize_t map_show(struct device_driver *ddp, char *buf) | |||
4536 | return scnprintf(buf, PAGE_SIZE, "0-%u\n", | 4658 | return scnprintf(buf, PAGE_SIZE, "0-%u\n", |
4537 | sdebug_store_sectors); | 4659 | sdebug_store_sectors); |
4538 | 4660 | ||
4539 | count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size); | 4661 | count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", |
4540 | 4662 | (int)map_size, map_storep); | |
4541 | buf[count++] = '\n'; | 4663 | buf[count++] = '\n'; |
4542 | buf[count++] = 0; | 4664 | buf[count] = '\0'; |
4543 | 4665 | ||
4544 | return count; | 4666 | return count; |
4545 | } | 4667 | } |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 8afb01604d51..4cdaffca17fc 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -124,41 +124,37 @@ scmd_eh_abort_handler(struct work_struct *work) | |||
124 | if (scsi_host_eh_past_deadline(sdev->host)) { | 124 | if (scsi_host_eh_past_deadline(sdev->host)) { |
125 | SCSI_LOG_ERROR_RECOVERY(3, | 125 | SCSI_LOG_ERROR_RECOVERY(3, |
126 | scmd_printk(KERN_INFO, scmd, | 126 | scmd_printk(KERN_INFO, scmd, |
127 | "scmd %p eh timeout, not aborting\n", | 127 | "eh timeout, not aborting\n")); |
128 | scmd)); | ||
129 | } else { | 128 | } else { |
130 | SCSI_LOG_ERROR_RECOVERY(3, | 129 | SCSI_LOG_ERROR_RECOVERY(3, |
131 | scmd_printk(KERN_INFO, scmd, | 130 | scmd_printk(KERN_INFO, scmd, |
132 | "aborting command %p\n", scmd)); | 131 | "aborting command\n")); |
133 | rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); | 132 | rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); |
134 | if (rtn == SUCCESS) { | 133 | if (rtn == SUCCESS) { |
135 | set_host_byte(scmd, DID_TIME_OUT); | 134 | set_host_byte(scmd, DID_TIME_OUT); |
136 | if (scsi_host_eh_past_deadline(sdev->host)) { | 135 | if (scsi_host_eh_past_deadline(sdev->host)) { |
137 | SCSI_LOG_ERROR_RECOVERY(3, | 136 | SCSI_LOG_ERROR_RECOVERY(3, |
138 | scmd_printk(KERN_INFO, scmd, | 137 | scmd_printk(KERN_INFO, scmd, |
139 | "scmd %p eh timeout, " | 138 | "eh timeout, not retrying " |
140 | "not retrying aborted " | 139 | "aborted command\n")); |
141 | "command\n", scmd)); | ||
142 | } else if (!scsi_noretry_cmd(scmd) && | 140 | } else if (!scsi_noretry_cmd(scmd) && |
143 | (++scmd->retries <= scmd->allowed)) { | 141 | (++scmd->retries <= scmd->allowed)) { |
144 | SCSI_LOG_ERROR_RECOVERY(3, | 142 | SCSI_LOG_ERROR_RECOVERY(3, |
145 | scmd_printk(KERN_WARNING, scmd, | 143 | scmd_printk(KERN_WARNING, scmd, |
146 | "scmd %p retry " | 144 | "retry aborted command\n")); |
147 | "aborted command\n", scmd)); | ||
148 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | 145 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); |
149 | return; | 146 | return; |
150 | } else { | 147 | } else { |
151 | SCSI_LOG_ERROR_RECOVERY(3, | 148 | SCSI_LOG_ERROR_RECOVERY(3, |
152 | scmd_printk(KERN_WARNING, scmd, | 149 | scmd_printk(KERN_WARNING, scmd, |
153 | "scmd %p finish " | 150 | "finish aborted command\n")); |
154 | "aborted command\n", scmd)); | ||
155 | scsi_finish_command(scmd); | 151 | scsi_finish_command(scmd); |
156 | return; | 152 | return; |
157 | } | 153 | } |
158 | } else { | 154 | } else { |
159 | SCSI_LOG_ERROR_RECOVERY(3, | 155 | SCSI_LOG_ERROR_RECOVERY(3, |
160 | scmd_printk(KERN_INFO, scmd, | 156 | scmd_printk(KERN_INFO, scmd, |
161 | "scmd %p abort %s\n", scmd, | 157 | "cmd abort %s\n", |
162 | (rtn == FAST_IO_FAIL) ? | 158 | (rtn == FAST_IO_FAIL) ? |
163 | "not send" : "failed")); | 159 | "not send" : "failed")); |
164 | } | 160 | } |
@@ -167,8 +163,7 @@ scmd_eh_abort_handler(struct work_struct *work) | |||
167 | if (!scsi_eh_scmd_add(scmd, 0)) { | 163 | if (!scsi_eh_scmd_add(scmd, 0)) { |
168 | SCSI_LOG_ERROR_RECOVERY(3, | 164 | SCSI_LOG_ERROR_RECOVERY(3, |
169 | scmd_printk(KERN_WARNING, scmd, | 165 | scmd_printk(KERN_WARNING, scmd, |
170 | "scmd %p terminate " | 166 | "terminate aborted command\n")); |
171 | "aborted command\n", scmd)); | ||
172 | set_host_byte(scmd, DID_TIME_OUT); | 167 | set_host_byte(scmd, DID_TIME_OUT); |
173 | scsi_finish_command(scmd); | 168 | scsi_finish_command(scmd); |
174 | } | 169 | } |
@@ -194,7 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
194 | scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; | 189 | scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; |
195 | SCSI_LOG_ERROR_RECOVERY(3, | 190 | SCSI_LOG_ERROR_RECOVERY(3, |
196 | scmd_printk(KERN_INFO, scmd, | 191 | scmd_printk(KERN_INFO, scmd, |
197 | "scmd %p previous abort failed\n", scmd)); | 192 | "previous abort failed\n")); |
198 | BUG_ON(delayed_work_pending(&scmd->abort_work)); | 193 | BUG_ON(delayed_work_pending(&scmd->abort_work)); |
199 | return FAILED; | 194 | return FAILED; |
200 | } | 195 | } |
@@ -208,8 +203,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
208 | spin_unlock_irqrestore(shost->host_lock, flags); | 203 | spin_unlock_irqrestore(shost->host_lock, flags); |
209 | SCSI_LOG_ERROR_RECOVERY(3, | 204 | SCSI_LOG_ERROR_RECOVERY(3, |
210 | scmd_printk(KERN_INFO, scmd, | 205 | scmd_printk(KERN_INFO, scmd, |
211 | "scmd %p not aborting, host in recovery\n", | 206 | "not aborting, host in recovery\n")); |
212 | scmd)); | ||
213 | return FAILED; | 207 | return FAILED; |
214 | } | 208 | } |
215 | 209 | ||
@@ -219,8 +213,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
219 | 213 | ||
220 | scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; | 214 | scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; |
221 | SCSI_LOG_ERROR_RECOVERY(3, | 215 | SCSI_LOG_ERROR_RECOVERY(3, |
222 | scmd_printk(KERN_INFO, scmd, | 216 | scmd_printk(KERN_INFO, scmd, "abort scheduled\n")); |
223 | "scmd %p abort scheduled\n", scmd)); | ||
224 | queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); | 217 | queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); |
225 | return SUCCESS; | 218 | return SUCCESS; |
226 | } | 219 | } |
@@ -737,8 +730,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) | |||
737 | struct completion *eh_action; | 730 | struct completion *eh_action; |
738 | 731 | ||
739 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 732 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
740 | "%s scmd: %p result: %x\n", | 733 | "%s result: %x\n", __func__, scmd->result)); |
741 | __func__, scmd, scmd->result)); | ||
742 | 734 | ||
743 | eh_action = scmd->device->host->eh_action; | 735 | eh_action = scmd->device->host->eh_action; |
744 | if (eh_action) | 736 | if (eh_action) |
@@ -868,6 +860,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |||
868 | 860 | ||
869 | /** | 861 | /** |
870 | * scsi_try_to_abort_cmd - Ask host to abort a SCSI command | 862 | * scsi_try_to_abort_cmd - Ask host to abort a SCSI command |
863 | * @hostt: SCSI driver host template | ||
871 | * @scmd: SCSI cmd used to send a target reset | 864 | * @scmd: SCSI cmd used to send a target reset |
872 | * | 865 | * |
873 | * Return value: | 866 | * Return value: |
@@ -1052,8 +1045,8 @@ retry: | |||
1052 | scsi_log_completion(scmd, rtn); | 1045 | scsi_log_completion(scmd, rtn); |
1053 | 1046 | ||
1054 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1047 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1055 | "%s: scmd: %p, timeleft: %ld\n", | 1048 | "%s timeleft: %ld\n", |
1056 | __func__, scmd, timeleft)); | 1049 | __func__, timeleft)); |
1057 | 1050 | ||
1058 | /* | 1051 | /* |
1059 | * If there is time left scsi_eh_done got called, and we will examine | 1052 | * If there is time left scsi_eh_done got called, and we will examine |
@@ -1192,8 +1185,7 @@ int scsi_eh_get_sense(struct list_head *work_q, | |||
1192 | continue; | 1185 | continue; |
1193 | 1186 | ||
1194 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1187 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1195 | "sense requested for %p result %x\n", | 1188 | "sense requested, result %x\n", scmd->result)); |
1196 | scmd, scmd->result)); | ||
1197 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); | 1189 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); |
1198 | 1190 | ||
1199 | rtn = scsi_decide_disposition(scmd); | 1191 | rtn = scsi_decide_disposition(scmd); |
@@ -1235,7 +1227,7 @@ retry_tur: | |||
1235 | scmd->device->eh_timeout, 0); | 1227 | scmd->device->eh_timeout, 0); |
1236 | 1228 | ||
1237 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1229 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1238 | "%s: scmd %p rtn %x\n", __func__, scmd, rtn)); | 1230 | "%s return: %x\n", __func__, rtn)); |
1239 | 1231 | ||
1240 | switch (rtn) { | 1232 | switch (rtn) { |
1241 | case NEEDS_RETRY: | 1233 | case NEEDS_RETRY: |
@@ -2092,8 +2084,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
2092 | (++scmd->retries <= scmd->allowed)) { | 2084 | (++scmd->retries <= scmd->allowed)) { |
2093 | SCSI_LOG_ERROR_RECOVERY(3, | 2085 | SCSI_LOG_ERROR_RECOVERY(3, |
2094 | scmd_printk(KERN_INFO, scmd, | 2086 | scmd_printk(KERN_INFO, scmd, |
2095 | "%s: flush retry cmd: %p\n", | 2087 | "%s: flush retry cmd\n", |
2096 | current->comm, scmd)); | 2088 | current->comm)); |
2097 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | 2089 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); |
2098 | } else { | 2090 | } else { |
2099 | /* | 2091 | /* |
@@ -2105,8 +2097,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
2105 | scmd->result |= (DRIVER_TIMEOUT << 24); | 2097 | scmd->result |= (DRIVER_TIMEOUT << 24); |
2106 | SCSI_LOG_ERROR_RECOVERY(3, | 2098 | SCSI_LOG_ERROR_RECOVERY(3, |
2107 | scmd_printk(KERN_INFO, scmd, | 2099 | scmd_printk(KERN_INFO, scmd, |
2108 | "%s: flush finish cmd: %p\n", | 2100 | "%s: flush finish cmd\n", |
2109 | current->comm, scmd)); | 2101 | current->comm)); |
2110 | scsi_finish_command(scmd); | 2102 | scsi_finish_command(scmd); |
2111 | } | 2103 | } |
2112 | } | 2104 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 17bb541f7cc2..54d7a6cbb98a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -2197,6 +2197,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) | |||
2197 | shost->tag_set.cmd_size = cmd_size; | 2197 | shost->tag_set.cmd_size = cmd_size; |
2198 | shost->tag_set.numa_node = NUMA_NO_NODE; | 2198 | shost->tag_set.numa_node = NUMA_NO_NODE; |
2199 | shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | 2199 | shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; |
2200 | shost->tag_set.flags |= | ||
2201 | BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); | ||
2200 | shost->tag_set.driver_data = shost; | 2202 | shost->tag_set.driver_data = shost; |
2201 | 2203 | ||
2202 | return blk_mq_alloc_tag_set(&shost->tag_set); | 2204 | return blk_mq_alloc_tag_set(&shost->tag_set); |
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c new file mode 100644 index 000000000000..bd70339c1242 --- /dev/null +++ b/drivers/scsi/scsi_logging.c | |||
@@ -0,0 +1,485 @@ | |||
1 | /* | ||
2 | * scsi_logging.c | ||
3 | * | ||
4 | * Copyright (C) 2014 SUSE Linux Products GmbH | ||
5 | * Copyright (C) 2014 Hannes Reinecke <hare@suse.de> | ||
6 | * | ||
7 | * This file is released under the GPLv2 | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/atomic.h> | ||
12 | |||
13 | #include <scsi/scsi.h> | ||
14 | #include <scsi/scsi_cmnd.h> | ||
15 | #include <scsi/scsi_device.h> | ||
16 | #include <scsi/scsi_eh.h> | ||
17 | #include <scsi/scsi_dbg.h> | ||
18 | |||
19 | #define SCSI_LOG_SPOOLSIZE 4096 | ||
20 | |||
21 | #if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG | ||
22 | #warning SCSI logging bitmask too large | ||
23 | #endif | ||
24 | |||
25 | struct scsi_log_buf { | ||
26 | char buffer[SCSI_LOG_SPOOLSIZE]; | ||
27 | unsigned long map; | ||
28 | }; | ||
29 | |||
30 | static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); | ||
31 | |||
32 | static char *scsi_log_reserve_buffer(size_t *len) | ||
33 | { | ||
34 | struct scsi_log_buf *buf; | ||
35 | unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; | ||
36 | unsigned long idx = 0; | ||
37 | |||
38 | preempt_disable(); | ||
39 | buf = this_cpu_ptr(&scsi_format_log); | ||
40 | idx = find_first_zero_bit(&buf->map, map_bits); | ||
41 | if (likely(idx < map_bits)) { | ||
42 | while (test_and_set_bit(idx, &buf->map)) { | ||
43 | idx = find_next_zero_bit(&buf->map, map_bits, idx); | ||
44 | if (idx >= map_bits) | ||
45 | break; | ||
46 | } | ||
47 | } | ||
48 | if (WARN_ON(idx >= map_bits)) { | ||
49 | preempt_enable(); | ||
50 | return NULL; | ||
51 | } | ||
52 | *len = SCSI_LOG_BUFSIZE; | ||
53 | return buf->buffer + idx * SCSI_LOG_BUFSIZE; | ||
54 | } | ||
55 | |||
56 | static void scsi_log_release_buffer(char *bufptr) | ||
57 | { | ||
58 | struct scsi_log_buf *buf; | ||
59 | unsigned long idx; | ||
60 | int ret; | ||
61 | |||
62 | buf = this_cpu_ptr(&scsi_format_log); | ||
63 | if (bufptr >= buf->buffer && | ||
64 | bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { | ||
65 | idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; | ||
66 | ret = test_and_clear_bit(idx, &buf->map); | ||
67 | WARN_ON(!ret); | ||
68 | } | ||
69 | preempt_enable(); | ||
70 | } | ||
71 | |||
72 | static inline const char *scmd_name(const struct scsi_cmnd *scmd) | ||
73 | { | ||
74 | return scmd->request->rq_disk ? | ||
75 | scmd->request->rq_disk->disk_name : NULL; | ||
76 | } | ||
77 | |||
78 | static size_t sdev_format_header(char *logbuf, size_t logbuf_len, | ||
79 | const char *name, int tag) | ||
80 | { | ||
81 | size_t off = 0; | ||
82 | |||
83 | if (name) | ||
84 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
85 | "[%s] ", name); | ||
86 | |||
87 | if (WARN_ON(off >= logbuf_len)) | ||
88 | return off; | ||
89 | |||
90 | if (tag >= 0) | ||
91 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
92 | "tag#%d ", tag); | ||
93 | return off; | ||
94 | } | ||
95 | |||
96 | void sdev_prefix_printk(const char *level, const struct scsi_device *sdev, | ||
97 | const char *name, const char *fmt, ...) | ||
98 | { | ||
99 | va_list args; | ||
100 | char *logbuf; | ||
101 | size_t off = 0, logbuf_len; | ||
102 | |||
103 | if (!sdev) | ||
104 | return; | ||
105 | |||
106 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
107 | if (!logbuf) | ||
108 | return; | ||
109 | |||
110 | if (name) | ||
111 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
112 | "[%s] ", name); | ||
113 | if (!WARN_ON(off >= logbuf_len)) { | ||
114 | va_start(args, fmt); | ||
115 | off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); | ||
116 | va_end(args); | ||
117 | } | ||
118 | dev_printk(level, &sdev->sdev_gendev, "%s", logbuf); | ||
119 | scsi_log_release_buffer(logbuf); | ||
120 | } | ||
121 | EXPORT_SYMBOL(sdev_prefix_printk); | ||
122 | |||
123 | void scmd_printk(const char *level, const struct scsi_cmnd *scmd, | ||
124 | const char *fmt, ...) | ||
125 | { | ||
126 | va_list args; | ||
127 | char *logbuf; | ||
128 | size_t off = 0, logbuf_len; | ||
129 | |||
130 | if (!scmd || !scmd->cmnd) | ||
131 | return; | ||
132 | |||
133 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
134 | if (!logbuf) | ||
135 | return; | ||
136 | off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), | ||
137 | scmd->request->tag); | ||
138 | if (off < logbuf_len) { | ||
139 | va_start(args, fmt); | ||
140 | off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); | ||
141 | va_end(args); | ||
142 | } | ||
143 | dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf); | ||
144 | scsi_log_release_buffer(logbuf); | ||
145 | } | ||
146 | EXPORT_SYMBOL(scmd_printk); | ||
147 | |||
148 | static size_t scsi_format_opcode_name(char *buffer, size_t buf_len, | ||
149 | const unsigned char *cdbp) | ||
150 | { | ||
151 | int sa, cdb0; | ||
152 | const char *cdb_name = NULL, *sa_name = NULL; | ||
153 | size_t off; | ||
154 | |||
155 | cdb0 = cdbp[0]; | ||
156 | if (cdb0 == VARIABLE_LENGTH_CMD) { | ||
157 | int len = scsi_varlen_cdb_length(cdbp); | ||
158 | |||
159 | if (len < 10) { | ||
160 | off = scnprintf(buffer, buf_len, | ||
161 | "short variable length command, len=%d", | ||
162 | len); | ||
163 | return off; | ||
164 | } | ||
165 | sa = (cdbp[8] << 8) + cdbp[9]; | ||
166 | } else | ||
167 | sa = cdbp[1] & 0x1f; | ||
168 | |||
169 | if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { | ||
170 | if (cdb_name) | ||
171 | off = scnprintf(buffer, buf_len, "%s", cdb_name); | ||
172 | else { | ||
173 | off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0); | ||
174 | if (WARN_ON(off >= buf_len)) | ||
175 | return off; | ||
176 | if (cdb0 >= VENDOR_SPECIFIC_CDB) | ||
177 | off += scnprintf(buffer + off, buf_len - off, | ||
178 | " (vendor)"); | ||
179 | else if (cdb0 >= 0x60 && cdb0 < 0x7e) | ||
180 | off += scnprintf(buffer + off, buf_len - off, | ||
181 | " (reserved)"); | ||
182 | } | ||
183 | } else { | ||
184 | if (sa_name) | ||
185 | off = scnprintf(buffer, buf_len, "%s", sa_name); | ||
186 | else if (cdb_name) | ||
187 | off = scnprintf(buffer, buf_len, "%s, sa=0x%x", | ||
188 | cdb_name, sa); | ||
189 | else | ||
190 | off = scnprintf(buffer, buf_len, | ||
191 | "opcode=0x%x, sa=0x%x", cdb0, sa); | ||
192 | } | ||
193 | WARN_ON(off >= buf_len); | ||
194 | return off; | ||
195 | } | ||
196 | |||
197 | size_t __scsi_format_command(char *logbuf, size_t logbuf_len, | ||
198 | const unsigned char *cdb, size_t cdb_len) | ||
199 | { | ||
200 | int len, k; | ||
201 | size_t off; | ||
202 | |||
203 | off = scsi_format_opcode_name(logbuf, logbuf_len, cdb); | ||
204 | if (off >= logbuf_len) | ||
205 | return off; | ||
206 | len = scsi_command_size(cdb); | ||
207 | if (cdb_len < len) | ||
208 | len = cdb_len; | ||
209 | /* print out all bytes in cdb */ | ||
210 | for (k = 0; k < len; ++k) { | ||
211 | if (off > logbuf_len - 3) | ||
212 | break; | ||
213 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
214 | " %02x", cdb[k]); | ||
215 | } | ||
216 | return off; | ||
217 | } | ||
218 | EXPORT_SYMBOL(__scsi_format_command); | ||
219 | |||
220 | void scsi_print_command(struct scsi_cmnd *cmd) | ||
221 | { | ||
222 | int k; | ||
223 | char *logbuf; | ||
224 | size_t off, logbuf_len; | ||
225 | |||
226 | if (!cmd->cmnd) | ||
227 | return; | ||
228 | |||
229 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
230 | if (!logbuf) | ||
231 | return; | ||
232 | |||
233 | off = sdev_format_header(logbuf, logbuf_len, | ||
234 | scmd_name(cmd), cmd->request->tag); | ||
235 | if (off >= logbuf_len) | ||
236 | goto out_printk; | ||
237 | off += scnprintf(logbuf + off, logbuf_len - off, "CDB: "); | ||
238 | if (WARN_ON(off >= logbuf_len)) | ||
239 | goto out_printk; | ||
240 | |||
241 | off += scsi_format_opcode_name(logbuf + off, logbuf_len - off, | ||
242 | cmd->cmnd); | ||
243 | if (off >= logbuf_len) | ||
244 | goto out_printk; | ||
245 | |||
246 | /* print out all bytes in cdb */ | ||
247 | if (cmd->cmd_len > 16) { | ||
248 | /* Print opcode in one line and use separate lines for CDB */ | ||
249 | off += scnprintf(logbuf + off, logbuf_len - off, "\n"); | ||
250 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
251 | scsi_log_release_buffer(logbuf); | ||
252 | for (k = 0; k < cmd->cmd_len; k += 16) { | ||
253 | size_t linelen = min(cmd->cmd_len - k, 16); | ||
254 | |||
255 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
256 | if (!logbuf) | ||
257 | break; | ||
258 | off = sdev_format_header(logbuf, logbuf_len, | ||
259 | scmd_name(cmd), | ||
260 | cmd->request->tag); | ||
261 | if (!WARN_ON(off > logbuf_len - 58)) { | ||
262 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
263 | "CDB[%02x]: ", k); | ||
264 | hex_dump_to_buffer(&cmd->cmnd[k], linelen, | ||
265 | 16, 1, logbuf + off, | ||
266 | logbuf_len - off, false); | ||
267 | } | ||
268 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", | ||
269 | logbuf); | ||
270 | scsi_log_release_buffer(logbuf); | ||
271 | } | ||
272 | return; | ||
273 | } | ||
274 | if (!WARN_ON(off > logbuf_len - 49)) { | ||
275 | off += scnprintf(logbuf + off, logbuf_len - off, " "); | ||
276 | hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1, | ||
277 | logbuf + off, logbuf_len - off, | ||
278 | false); | ||
279 | } | ||
280 | out_printk: | ||
281 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
282 | scsi_log_release_buffer(logbuf); | ||
283 | } | ||
284 | EXPORT_SYMBOL(scsi_print_command); | ||
285 | |||
286 | static size_t | ||
287 | scsi_format_extd_sense(char *buffer, size_t buf_len, | ||
288 | unsigned char asc, unsigned char ascq) | ||
289 | { | ||
290 | size_t off = 0; | ||
291 | const char *extd_sense_fmt = NULL; | ||
292 | const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, | ||
293 | &extd_sense_fmt); | ||
294 | |||
295 | if (extd_sense_str) { | ||
296 | off = scnprintf(buffer, buf_len, "Add. Sense: %s", | ||
297 | extd_sense_str); | ||
298 | if (extd_sense_fmt) | ||
299 | off += scnprintf(buffer + off, buf_len - off, | ||
300 | "(%s%x)", extd_sense_fmt, ascq); | ||
301 | } else { | ||
302 | if (asc >= 0x80) | ||
303 | off = scnprintf(buffer, buf_len, "<<vendor>>"); | ||
304 | off += scnprintf(buffer + off, buf_len - off, | ||
305 | "ASC=0x%x ", asc); | ||
306 | if (ascq >= 0x80) | ||
307 | off += scnprintf(buffer + off, buf_len - off, | ||
308 | "<<vendor>>"); | ||
309 | off += scnprintf(buffer + off, buf_len - off, | ||
310 | "ASCQ=0x%x ", ascq); | ||
311 | } | ||
312 | return off; | ||
313 | } | ||
314 | |||
315 | static size_t | ||
316 | scsi_format_sense_hdr(char *buffer, size_t buf_len, | ||
317 | const struct scsi_sense_hdr *sshdr) | ||
318 | { | ||
319 | const char *sense_txt; | ||
320 | size_t off; | ||
321 | |||
322 | off = scnprintf(buffer, buf_len, "Sense Key : "); | ||
323 | sense_txt = scsi_sense_key_string(sshdr->sense_key); | ||
324 | if (sense_txt) | ||
325 | off += scnprintf(buffer + off, buf_len - off, | ||
326 | "%s ", sense_txt); | ||
327 | else | ||
328 | off += scnprintf(buffer + off, buf_len - off, | ||
329 | "0x%x ", sshdr->sense_key); | ||
330 | off += scnprintf(buffer + off, buf_len - off, | ||
331 | scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] "); | ||
332 | |||
333 | if (sshdr->response_code >= 0x72) | ||
334 | off += scnprintf(buffer + off, buf_len - off, "[descriptor] "); | ||
335 | return off; | ||
336 | } | ||
337 | |||
338 | static void | ||
339 | scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag, | ||
340 | const unsigned char *sense_buffer, int sense_len) | ||
341 | { | ||
342 | char *logbuf; | ||
343 | size_t logbuf_len; | ||
344 | int i; | ||
345 | |||
346 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
347 | if (!logbuf) | ||
348 | return; | ||
349 | |||
350 | for (i = 0; i < sense_len; i += 16) { | ||
351 | int len = min(sense_len - i, 16); | ||
352 | size_t off; | ||
353 | |||
354 | off = sdev_format_header(logbuf, logbuf_len, | ||
355 | name, tag); | ||
356 | hex_dump_to_buffer(&sense_buffer[i], len, 16, 1, | ||
357 | logbuf + off, logbuf_len - off, | ||
358 | false); | ||
359 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
360 | } | ||
361 | scsi_log_release_buffer(logbuf); | ||
362 | } | ||
363 | |||
364 | static void | ||
365 | scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
366 | int tag, const struct scsi_sense_hdr *sshdr) | ||
367 | { | ||
368 | char *logbuf; | ||
369 | size_t off, logbuf_len; | ||
370 | |||
371 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
372 | if (!logbuf) | ||
373 | return; | ||
374 | off = sdev_format_header(logbuf, logbuf_len, name, tag); | ||
375 | off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr); | ||
376 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
377 | scsi_log_release_buffer(logbuf); | ||
378 | |||
379 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
380 | if (!logbuf) | ||
381 | return; | ||
382 | off = sdev_format_header(logbuf, logbuf_len, name, tag); | ||
383 | off += scsi_format_extd_sense(logbuf + off, logbuf_len - off, | ||
384 | sshdr->asc, sshdr->ascq); | ||
385 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
386 | scsi_log_release_buffer(logbuf); | ||
387 | } | ||
388 | |||
389 | static void | ||
390 | scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag, | ||
391 | const unsigned char *sense_buffer, int sense_len) | ||
392 | { | ||
393 | struct scsi_sense_hdr sshdr; | ||
394 | |||
395 | if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) | ||
396 | scsi_log_print_sense_hdr(sdev, name, tag, &sshdr); | ||
397 | else | ||
398 | scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Print normalized SCSI sense header with a prefix. | ||
403 | */ | ||
404 | void | ||
405 | scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
406 | const struct scsi_sense_hdr *sshdr) | ||
407 | { | ||
408 | scsi_log_print_sense_hdr(sdev, name, -1, sshdr); | ||
409 | } | ||
410 | EXPORT_SYMBOL(scsi_print_sense_hdr); | ||
411 | |||
412 | /* Normalize and print sense buffer with name prefix */ | ||
413 | void __scsi_print_sense(const struct scsi_device *sdev, const char *name, | ||
414 | const unsigned char *sense_buffer, int sense_len) | ||
415 | { | ||
416 | scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len); | ||
417 | } | ||
418 | EXPORT_SYMBOL(__scsi_print_sense); | ||
419 | |||
420 | /* Normalize and print sense buffer in SCSI command */ | ||
421 | void scsi_print_sense(const struct scsi_cmnd *cmd) | ||
422 | { | ||
423 | scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag, | ||
424 | cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); | ||
425 | } | ||
426 | EXPORT_SYMBOL(scsi_print_sense); | ||
427 | |||
428 | void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, | ||
429 | int disposition) | ||
430 | { | ||
431 | char *logbuf; | ||
432 | size_t off, logbuf_len; | ||
433 | const char *mlret_string = scsi_mlreturn_string(disposition); | ||
434 | const char *hb_string = scsi_hostbyte_string(cmd->result); | ||
435 | const char *db_string = scsi_driverbyte_string(cmd->result); | ||
436 | |||
437 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
438 | if (!logbuf) | ||
439 | return; | ||
440 | |||
441 | off = sdev_format_header(logbuf, logbuf_len, | ||
442 | scmd_name(cmd), cmd->request->tag); | ||
443 | |||
444 | if (off >= logbuf_len) | ||
445 | goto out_printk; | ||
446 | |||
447 | if (msg) { | ||
448 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
449 | "%s: ", msg); | ||
450 | if (WARN_ON(off >= logbuf_len)) | ||
451 | goto out_printk; | ||
452 | } | ||
453 | if (mlret_string) | ||
454 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
455 | "%s ", mlret_string); | ||
456 | else | ||
457 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
458 | "UNKNOWN(0x%02x) ", disposition); | ||
459 | if (WARN_ON(off >= logbuf_len)) | ||
460 | goto out_printk; | ||
461 | |||
462 | off += scnprintf(logbuf + off, logbuf_len - off, "Result: "); | ||
463 | if (WARN_ON(off >= logbuf_len)) | ||
464 | goto out_printk; | ||
465 | |||
466 | if (hb_string) | ||
467 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
468 | "hostbyte=%s ", hb_string); | ||
469 | else | ||
470 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
471 | "hostbyte=0x%02x ", host_byte(cmd->result)); | ||
472 | if (WARN_ON(off >= logbuf_len)) | ||
473 | goto out_printk; | ||
474 | |||
475 | if (db_string) | ||
476 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
477 | "driverbyte=%s", db_string); | ||
478 | else | ||
479 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
480 | "driverbyte=0x%02x", driver_byte(cmd->result)); | ||
481 | out_printk: | ||
482 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
483 | scsi_log_release_buffer(logbuf); | ||
484 | } | ||
485 | EXPORT_SYMBOL(scsi_print_result); | ||
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 6fcefa2da503..251598eb3547 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -189,36 +189,36 @@ static int proc_print_scsidevice(struct device *dev, void *data) | |||
189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
190 | for (i = 0; i < 8; i++) { | 190 | for (i = 0; i < 8; i++) { |
191 | if (sdev->vendor[i] >= 0x20) | 191 | if (sdev->vendor[i] >= 0x20) |
192 | seq_printf(s, "%c", sdev->vendor[i]); | 192 | seq_putc(s, sdev->vendor[i]); |
193 | else | 193 | else |
194 | seq_printf(s, " "); | 194 | seq_putc(s, ' '); |
195 | } | 195 | } |
196 | 196 | ||
197 | seq_printf(s, " Model: "); | 197 | seq_puts(s, " Model: "); |
198 | for (i = 0; i < 16; i++) { | 198 | for (i = 0; i < 16; i++) { |
199 | if (sdev->model[i] >= 0x20) | 199 | if (sdev->model[i] >= 0x20) |
200 | seq_printf(s, "%c", sdev->model[i]); | 200 | seq_putc(s, sdev->model[i]); |
201 | else | 201 | else |
202 | seq_printf(s, " "); | 202 | seq_putc(s, ' '); |
203 | } | 203 | } |
204 | 204 | ||
205 | seq_printf(s, " Rev: "); | 205 | seq_puts(s, " Rev: "); |
206 | for (i = 0; i < 4; i++) { | 206 | for (i = 0; i < 4; i++) { |
207 | if (sdev->rev[i] >= 0x20) | 207 | if (sdev->rev[i] >= 0x20) |
208 | seq_printf(s, "%c", sdev->rev[i]); | 208 | seq_putc(s, sdev->rev[i]); |
209 | else | 209 | else |
210 | seq_printf(s, " "); | 210 | seq_putc(s, ' '); |
211 | } | 211 | } |
212 | 212 | ||
213 | seq_printf(s, "\n"); | 213 | seq_putc(s, '\n'); |
214 | 214 | ||
215 | seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); | 215 | seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); |
216 | seq_printf(s, " ANSI SCSI revision: %02x", | 216 | seq_printf(s, " ANSI SCSI revision: %02x", |
217 | sdev->scsi_level - (sdev->scsi_level > 1)); | 217 | sdev->scsi_level - (sdev->scsi_level > 1)); |
218 | if (sdev->scsi_level == 2) | 218 | if (sdev->scsi_level == 2) |
219 | seq_printf(s, " CCS\n"); | 219 | seq_puts(s, " CCS\n"); |
220 | else | 220 | else |
221 | seq_printf(s, "\n"); | 221 | seq_putc(s, '\n'); |
222 | 222 | ||
223 | out: | 223 | out: |
224 | return 0; | 224 | return 0; |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 983aed10ff2f..9c0a520d933c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/async.h> | 35 | #include <linux/async.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <asm/unaligned.h> | ||
37 | 38 | ||
38 | #include <scsi/scsi.h> | 39 | #include <scsi/scsi.h> |
39 | #include <scsi/scsi_cmnd.h> | 40 | #include <scsi/scsi_cmnd.h> |
@@ -98,20 +99,6 @@ char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; | |||
98 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); | 99 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); |
99 | MODULE_PARM_DESC(scan, "sync, async or none"); | 100 | MODULE_PARM_DESC(scan, "sync, async or none"); |
100 | 101 | ||
101 | /* | ||
102 | * max_scsi_report_luns: the maximum number of LUNS that will be | ||
103 | * returned from the REPORT LUNS command. 8 times this value must | ||
104 | * be allocated. In theory this could be up to an 8 byte value, but | ||
105 | * in practice, the maximum number of LUNs suppored by any device | ||
106 | * is about 16k. | ||
107 | */ | ||
108 | static unsigned int max_scsi_report_luns = 511; | ||
109 | |||
110 | module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR); | ||
111 | MODULE_PARM_DESC(max_report_luns, | ||
112 | "REPORT LUNS maximum number of LUNS received (should be" | ||
113 | " between 1 and 16384)"); | ||
114 | |||
115 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; | 102 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; |
116 | 103 | ||
117 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); | 104 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); |
@@ -290,7 +277,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
290 | if (!shost_use_blk_mq(sdev->host) && | 277 | if (!shost_use_blk_mq(sdev->host) && |
291 | (shost->bqt || shost->hostt->use_blk_tags)) { | 278 | (shost->bqt || shost->hostt->use_blk_tags)) { |
292 | blk_queue_init_tags(sdev->request_queue, | 279 | blk_queue_init_tags(sdev->request_queue, |
293 | sdev->host->cmd_per_lun, shost->bqt); | 280 | sdev->host->cmd_per_lun, shost->bqt, |
281 | shost->hostt->tag_alloc_policy); | ||
294 | } | 282 | } |
295 | scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); | 283 | scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); |
296 | 284 | ||
@@ -1367,7 +1355,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1367 | unsigned int retries; | 1355 | unsigned int retries; |
1368 | int result; | 1356 | int result; |
1369 | struct scsi_lun *lunp, *lun_data; | 1357 | struct scsi_lun *lunp, *lun_data; |
1370 | u8 *data; | ||
1371 | struct scsi_sense_hdr sshdr; | 1358 | struct scsi_sense_hdr sshdr; |
1372 | struct scsi_device *sdev; | 1359 | struct scsi_device *sdev; |
1373 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); | 1360 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); |
@@ -1407,16 +1394,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1407 | 1394 | ||
1408 | /* | 1395 | /* |
1409 | * Allocate enough to hold the header (the same size as one scsi_lun) | 1396 | * Allocate enough to hold the header (the same size as one scsi_lun) |
1410 | * plus the max number of luns we are requesting. | 1397 | * plus the number of luns we are requesting. 511 was the default |
1411 | * | 1398 | * value of the now removed max_report_luns parameter. |
1412 | * Reallocating and trying again (with the exact amount we need) | ||
1413 | * would be nice, but then we need to somehow limit the size | ||
1414 | * allocated based on the available memory and the limits of | ||
1415 | * kmalloc - we don't want a kmalloc() failure of a huge value to | ||
1416 | * prevent us from finding any LUNs on this target. | ||
1417 | */ | 1399 | */ |
1418 | length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); | 1400 | length = (511 + 1) * sizeof(struct scsi_lun); |
1419 | lun_data = kmalloc(length, GFP_ATOMIC | | 1401 | retry: |
1402 | lun_data = kmalloc(length, GFP_KERNEL | | ||
1420 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); | 1403 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); |
1421 | if (!lun_data) { | 1404 | if (!lun_data) { |
1422 | printk(ALLOC_FAILURE_MSG, __func__); | 1405 | printk(ALLOC_FAILURE_MSG, __func__); |
@@ -1433,10 +1416,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1433 | /* | 1416 | /* |
1434 | * bytes 6 - 9: length of the command. | 1417 | * bytes 6 - 9: length of the command. |
1435 | */ | 1418 | */ |
1436 | scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff; | 1419 | put_unaligned_be32(length, &scsi_cmd[6]); |
1437 | scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff; | ||
1438 | scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff; | ||
1439 | scsi_cmd[9] = (unsigned char) length & 0xff; | ||
1440 | 1420 | ||
1441 | scsi_cmd[10] = 0; /* reserved */ | 1421 | scsi_cmd[10] = 0; /* reserved */ |
1442 | scsi_cmd[11] = 0; /* control */ | 1422 | scsi_cmd[11] = 0; /* control */ |
@@ -1484,19 +1464,16 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1484 | /* | 1464 | /* |
1485 | * Get the length from the first four bytes of lun_data. | 1465 | * Get the length from the first four bytes of lun_data. |
1486 | */ | 1466 | */ |
1487 | data = (u8 *) lun_data->scsi_lun; | 1467 | if (get_unaligned_be32(lun_data->scsi_lun) + |
1488 | length = ((data[0] << 24) | (data[1] << 16) | | 1468 | sizeof(struct scsi_lun) > length) { |
1489 | (data[2] << 8) | (data[3] << 0)); | 1469 | length = get_unaligned_be32(lun_data->scsi_lun) + |
1470 | sizeof(struct scsi_lun); | ||
1471 | kfree(lun_data); | ||
1472 | goto retry; | ||
1473 | } | ||
1474 | length = get_unaligned_be32(lun_data->scsi_lun); | ||
1490 | 1475 | ||
1491 | num_luns = (length / sizeof(struct scsi_lun)); | 1476 | num_luns = (length / sizeof(struct scsi_lun)); |
1492 | if (num_luns > max_scsi_report_luns) { | ||
1493 | sdev_printk(KERN_WARNING, sdev, | ||
1494 | "Only %d (max_scsi_report_luns)" | ||
1495 | " of %d luns reported, try increasing" | ||
1496 | " max_scsi_report_luns.\n", | ||
1497 | max_scsi_report_luns, num_luns); | ||
1498 | num_luns = max_scsi_report_luns; | ||
1499 | } | ||
1500 | 1477 | ||
1501 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, | 1478 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
1502 | "scsi scan: REPORT LUN scan\n")); | 1479 | "scsi scan: REPORT LUN scan\n")); |
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 82af28b90294..08bb47b53bc3 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c | |||
@@ -143,7 +143,7 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) | |||
143 | cmd = "WRITE_SAME"; | 143 | cmd = "WRITE_SAME"; |
144 | break; | 144 | break; |
145 | default: | 145 | default: |
146 | trace_seq_printf(p, "UNKNOWN"); | 146 | trace_seq_puts(p, "UNKNOWN"); |
147 | goto out; | 147 | goto out; |
148 | } | 148 | } |
149 | 149 | ||
@@ -204,7 +204,7 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) | |||
204 | cmd = "GET_LBA_STATUS"; | 204 | cmd = "GET_LBA_STATUS"; |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | trace_seq_printf(p, "UNKNOWN"); | 207 | trace_seq_puts(p, "UNKNOWN"); |
208 | goto out; | 208 | goto out; |
209 | } | 209 | } |
210 | 210 | ||
@@ -249,7 +249,7 @@ scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) | |||
249 | { | 249 | { |
250 | const char *ret = trace_seq_buffer_ptr(p); | 250 | const char *ret = trace_seq_buffer_ptr(p); |
251 | 251 | ||
252 | trace_seq_printf(p, "-"); | 252 | trace_seq_putc(p, '-'); |
253 | trace_seq_putc(p, 0); | 253 | trace_seq_putc(p, 0); |
254 | 254 | ||
255 | return ret; | 255 | return ret; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 05ea0d49a3a3..6b78476d04bb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -3320,11 +3320,8 @@ module_exit(exit_sd); | |||
3320 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, | 3320 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, |
3321 | struct scsi_sense_hdr *sshdr) | 3321 | struct scsi_sense_hdr *sshdr) |
3322 | { | 3322 | { |
3323 | scsi_show_sense_hdr(sdkp->device, | 3323 | scsi_print_sense_hdr(sdkp->device, |
3324 | sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); | 3324 | sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); |
3325 | scsi_show_extd_sense(sdkp->device, | ||
3326 | sdkp->disk ? sdkp->disk->disk_name : NULL, | ||
3327 | sshdr->asc, sshdr->ascq); | ||
3328 | } | 3325 | } |
3329 | 3326 | ||
3330 | static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, | 3327 | static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index b7e79e7646ad..dcb0d76d7312 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -47,7 +47,6 @@ struct ses_device { | |||
47 | 47 | ||
48 | struct ses_component { | 48 | struct ses_component { |
49 | u64 addr; | 49 | u64 addr; |
50 | unsigned char *desc; | ||
51 | }; | 50 | }; |
52 | 51 | ||
53 | static int ses_probe(struct device *dev) | 52 | static int ses_probe(struct device *dev) |
@@ -68,6 +67,20 @@ static int ses_probe(struct device *dev) | |||
68 | #define SES_TIMEOUT (30 * HZ) | 67 | #define SES_TIMEOUT (30 * HZ) |
69 | #define SES_RETRIES 3 | 68 | #define SES_RETRIES 3 |
70 | 69 | ||
70 | static void init_device_slot_control(unsigned char *dest_desc, | ||
71 | struct enclosure_component *ecomp, | ||
72 | unsigned char *status) | ||
73 | { | ||
74 | memcpy(dest_desc, status, 4); | ||
75 | dest_desc[0] = 0; | ||
76 | /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */ | ||
77 | if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE) | ||
78 | dest_desc[1] = 0; | ||
79 | dest_desc[2] &= 0xde; | ||
80 | dest_desc[3] &= 0x3c; | ||
81 | } | ||
82 | |||
83 | |||
71 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, | 84 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
72 | void *buf, int bufflen) | 85 | void *buf, int bufflen) |
73 | { | 86 | { |
@@ -179,14 +192,22 @@ static int ses_set_fault(struct enclosure_device *edev, | |||
179 | struct enclosure_component *ecomp, | 192 | struct enclosure_component *ecomp, |
180 | enum enclosure_component_setting val) | 193 | enum enclosure_component_setting val) |
181 | { | 194 | { |
182 | unsigned char desc[4] = {0 }; | 195 | unsigned char desc[4]; |
196 | unsigned char *desc_ptr; | ||
197 | |||
198 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
199 | |||
200 | if (!desc_ptr) | ||
201 | return -EIO; | ||
202 | |||
203 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
183 | 204 | ||
184 | switch (val) { | 205 | switch (val) { |
185 | case ENCLOSURE_SETTING_DISABLED: | 206 | case ENCLOSURE_SETTING_DISABLED: |
186 | /* zero is disabled */ | 207 | desc[3] &= 0xdf; |
187 | break; | 208 | break; |
188 | case ENCLOSURE_SETTING_ENABLED: | 209 | case ENCLOSURE_SETTING_ENABLED: |
189 | desc[3] = 0x20; | 210 | desc[3] |= 0x20; |
190 | break; | 211 | break; |
191 | default: | 212 | default: |
192 | /* SES doesn't do the SGPIO blink settings */ | 213 | /* SES doesn't do the SGPIO blink settings */ |
@@ -220,14 +241,22 @@ static int ses_set_locate(struct enclosure_device *edev, | |||
220 | struct enclosure_component *ecomp, | 241 | struct enclosure_component *ecomp, |
221 | enum enclosure_component_setting val) | 242 | enum enclosure_component_setting val) |
222 | { | 243 | { |
223 | unsigned char desc[4] = {0 }; | 244 | unsigned char desc[4]; |
245 | unsigned char *desc_ptr; | ||
246 | |||
247 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
248 | |||
249 | if (!desc_ptr) | ||
250 | return -EIO; | ||
251 | |||
252 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
224 | 253 | ||
225 | switch (val) { | 254 | switch (val) { |
226 | case ENCLOSURE_SETTING_DISABLED: | 255 | case ENCLOSURE_SETTING_DISABLED: |
227 | /* zero is disabled */ | 256 | desc[2] &= 0xfd; |
228 | break; | 257 | break; |
229 | case ENCLOSURE_SETTING_ENABLED: | 258 | case ENCLOSURE_SETTING_ENABLED: |
230 | desc[2] = 0x02; | 259 | desc[2] |= 0x02; |
231 | break; | 260 | break; |
232 | default: | 261 | default: |
233 | /* SES doesn't do the SGPIO blink settings */ | 262 | /* SES doesn't do the SGPIO blink settings */ |
@@ -240,15 +269,23 @@ static int ses_set_active(struct enclosure_device *edev, | |||
240 | struct enclosure_component *ecomp, | 269 | struct enclosure_component *ecomp, |
241 | enum enclosure_component_setting val) | 270 | enum enclosure_component_setting val) |
242 | { | 271 | { |
243 | unsigned char desc[4] = {0 }; | 272 | unsigned char desc[4]; |
273 | unsigned char *desc_ptr; | ||
274 | |||
275 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
276 | |||
277 | if (!desc_ptr) | ||
278 | return -EIO; | ||
279 | |||
280 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
244 | 281 | ||
245 | switch (val) { | 282 | switch (val) { |
246 | case ENCLOSURE_SETTING_DISABLED: | 283 | case ENCLOSURE_SETTING_DISABLED: |
247 | /* zero is disabled */ | 284 | desc[2] &= 0x7f; |
248 | ecomp->active = 0; | 285 | ecomp->active = 0; |
249 | break; | 286 | break; |
250 | case ENCLOSURE_SETTING_ENABLED: | 287 | case ENCLOSURE_SETTING_ENABLED: |
251 | desc[2] = 0x80; | 288 | desc[2] |= 0x80; |
252 | ecomp->active = 1; | 289 | ecomp->active = 1; |
253 | break; | 290 | break; |
254 | default: | 291 | default: |
@@ -258,13 +295,63 @@ static int ses_set_active(struct enclosure_device *edev, | |||
258 | return ses_set_page2_descriptor(edev, ecomp, desc); | 295 | return ses_set_page2_descriptor(edev, ecomp, desc); |
259 | } | 296 | } |
260 | 297 | ||
298 | static int ses_show_id(struct enclosure_device *edev, char *buf) | ||
299 | { | ||
300 | struct ses_device *ses_dev = edev->scratch; | ||
301 | unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4); | ||
302 | |||
303 | return sprintf(buf, "%#llx\n", id); | ||
304 | } | ||
305 | |||
306 | static void ses_get_power_status(struct enclosure_device *edev, | ||
307 | struct enclosure_component *ecomp) | ||
308 | { | ||
309 | unsigned char *desc; | ||
310 | |||
311 | desc = ses_get_page2_descriptor(edev, ecomp); | ||
312 | if (desc) | ||
313 | ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; | ||
314 | } | ||
315 | |||
316 | static int ses_set_power_status(struct enclosure_device *edev, | ||
317 | struct enclosure_component *ecomp, | ||
318 | int val) | ||
319 | { | ||
320 | unsigned char desc[4]; | ||
321 | unsigned char *desc_ptr; | ||
322 | |||
323 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
324 | |||
325 | if (!desc_ptr) | ||
326 | return -EIO; | ||
327 | |||
328 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
329 | |||
330 | switch (val) { | ||
331 | /* power = 1 is device_off = 0 and vice versa */ | ||
332 | case 0: | ||
333 | desc[3] |= 0x10; | ||
334 | break; | ||
335 | case 1: | ||
336 | desc[3] &= 0xef; | ||
337 | break; | ||
338 | default: | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | ecomp->power_status = val; | ||
342 | return ses_set_page2_descriptor(edev, ecomp, desc); | ||
343 | } | ||
344 | |||
261 | static struct enclosure_component_callbacks ses_enclosure_callbacks = { | 345 | static struct enclosure_component_callbacks ses_enclosure_callbacks = { |
262 | .get_fault = ses_get_fault, | 346 | .get_fault = ses_get_fault, |
263 | .set_fault = ses_set_fault, | 347 | .set_fault = ses_set_fault, |
264 | .get_status = ses_get_status, | 348 | .get_status = ses_get_status, |
265 | .get_locate = ses_get_locate, | 349 | .get_locate = ses_get_locate, |
266 | .set_locate = ses_set_locate, | 350 | .set_locate = ses_set_locate, |
351 | .get_power_status = ses_get_power_status, | ||
352 | .set_power_status = ses_set_power_status, | ||
267 | .set_active = ses_set_active, | 353 | .set_active = ses_set_active, |
354 | .show_id = ses_show_id, | ||
268 | }; | 355 | }; |
269 | 356 | ||
270 | struct ses_host_edev { | 357 | struct ses_host_edev { |
@@ -298,19 +385,26 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, | |||
298 | int invalid = desc[0] & 0x80; | 385 | int invalid = desc[0] & 0x80; |
299 | enum scsi_protocol proto = desc[0] & 0x0f; | 386 | enum scsi_protocol proto = desc[0] & 0x0f; |
300 | u64 addr = 0; | 387 | u64 addr = 0; |
388 | int slot = -1; | ||
301 | struct ses_component *scomp = ecomp->scratch; | 389 | struct ses_component *scomp = ecomp->scratch; |
302 | unsigned char *d; | 390 | unsigned char *d; |
303 | 391 | ||
304 | scomp->desc = desc; | ||
305 | |||
306 | if (invalid) | 392 | if (invalid) |
307 | return; | 393 | return; |
308 | 394 | ||
309 | switch (proto) { | 395 | switch (proto) { |
396 | case SCSI_PROTOCOL_FCP: | ||
397 | if (eip) { | ||
398 | d = desc + 4; | ||
399 | slot = d[3]; | ||
400 | } | ||
401 | break; | ||
310 | case SCSI_PROTOCOL_SAS: | 402 | case SCSI_PROTOCOL_SAS: |
311 | if (eip) | 403 | if (eip) { |
404 | d = desc + 4; | ||
405 | slot = d[3]; | ||
312 | d = desc + 8; | 406 | d = desc + 8; |
313 | else | 407 | } else |
314 | d = desc + 4; | 408 | d = desc + 4; |
315 | /* only take the phy0 addr */ | 409 | /* only take the phy0 addr */ |
316 | addr = (u64)d[12] << 56 | | 410 | addr = (u64)d[12] << 56 | |
@@ -326,6 +420,7 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, | |||
326 | /* FIXME: Need to add more protocols than just SAS */ | 420 | /* FIXME: Need to add more protocols than just SAS */ |
327 | break; | 421 | break; |
328 | } | 422 | } |
423 | ecomp->slot = slot; | ||
329 | scomp->addr = addr; | 424 | scomp->addr = addr; |
330 | } | 425 | } |
331 | 426 | ||
@@ -349,7 +444,8 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, | |||
349 | if (scomp->addr != efd->addr) | 444 | if (scomp->addr != efd->addr) |
350 | continue; | 445 | continue; |
351 | 446 | ||
352 | enclosure_add_device(edev, i, efd->dev); | 447 | if (enclosure_add_device(edev, i, efd->dev) == 0) |
448 | kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE); | ||
353 | return 1; | 449 | return 1; |
354 | } | 450 | } |
355 | return 0; | 451 | return 0; |
@@ -423,16 +519,24 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
423 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { | 519 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { |
424 | 520 | ||
425 | if (create) | 521 | if (create) |
426 | ecomp = enclosure_component_register(edev, | 522 | ecomp = enclosure_component_alloc( |
427 | components++, | 523 | edev, |
428 | type_ptr[0], | 524 | components++, |
429 | name); | 525 | type_ptr[0], |
526 | name); | ||
430 | else | 527 | else |
431 | ecomp = &edev->component[components++]; | 528 | ecomp = &edev->component[components++]; |
432 | 529 | ||
433 | if (!IS_ERR(ecomp) && addl_desc_ptr) | 530 | if (!IS_ERR(ecomp)) { |
434 | ses_process_descriptor(ecomp, | 531 | ses_get_power_status(edev, ecomp); |
435 | addl_desc_ptr); | 532 | if (addl_desc_ptr) |
533 | ses_process_descriptor( | ||
534 | ecomp, | ||
535 | addl_desc_ptr); | ||
536 | if (create) | ||
537 | enclosure_component_register( | ||
538 | ecomp); | ||
539 | } | ||
436 | } | 540 | } |
437 | if (desc_ptr) | 541 | if (desc_ptr) |
438 | desc_ptr += len; | 542 | desc_ptr += len; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index b14f64cb9724..2270bd51f9c2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -546,7 +546,7 @@ static ssize_t | |||
546 | sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) | 546 | sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) |
547 | { | 547 | { |
548 | sg_io_hdr_t *hp = &srp->header; | 548 | sg_io_hdr_t *hp = &srp->header; |
549 | int err = 0; | 549 | int err = 0, err2; |
550 | int len; | 550 | int len; |
551 | 551 | ||
552 | if (count < SZ_SG_IO_HDR) { | 552 | if (count < SZ_SG_IO_HDR) { |
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) | |||
575 | goto err_out; | 575 | goto err_out; |
576 | } | 576 | } |
577 | err_out: | 577 | err_out: |
578 | err = sg_finish_rem_req(srp); | 578 | err2 = sg_finish_rem_req(srp); |
579 | return (0 == err) ? count : err; | 579 | return err ? : err2 ? : count; |
580 | } | 580 | } |
581 | 581 | ||
582 | static ssize_t | 582 | static ssize_t |
@@ -763,7 +763,7 @@ static int | |||
763 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 763 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
764 | unsigned char *cmnd, int timeout, int blocking) | 764 | unsigned char *cmnd, int timeout, int blocking) |
765 | { | 765 | { |
766 | int k, data_dir, at_head; | 766 | int k, at_head; |
767 | Sg_device *sdp = sfp->parentdp; | 767 | Sg_device *sdp = sfp->parentdp; |
768 | sg_io_hdr_t *hp = &srp->header; | 768 | sg_io_hdr_t *hp = &srp->header; |
769 | 769 | ||
@@ -793,21 +793,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
793 | return -ENODEV; | 793 | return -ENODEV; |
794 | } | 794 | } |
795 | 795 | ||
796 | switch (hp->dxfer_direction) { | ||
797 | case SG_DXFER_TO_FROM_DEV: | ||
798 | case SG_DXFER_FROM_DEV: | ||
799 | data_dir = DMA_FROM_DEVICE; | ||
800 | break; | ||
801 | case SG_DXFER_TO_DEV: | ||
802 | data_dir = DMA_TO_DEVICE; | ||
803 | break; | ||
804 | case SG_DXFER_UNKNOWN: | ||
805 | data_dir = DMA_BIDIRECTIONAL; | ||
806 | break; | ||
807 | default: | ||
808 | data_dir = DMA_NONE; | ||
809 | break; | ||
810 | } | ||
811 | hp->duration = jiffies_to_msecs(jiffies); | 796 | hp->duration = jiffies_to_msecs(jiffies); |
812 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ | 797 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ |
813 | (SG_FLAG_Q_AT_TAIL & hp->flags)) | 798 | (SG_FLAG_Q_AT_TAIL & hp->flags)) |
@@ -1350,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate) | |||
1350 | } | 1335 | } |
1351 | /* Rely on write phase to clean out srp status values, so no "else" */ | 1336 | /* Rely on write phase to clean out srp status values, so no "else" */ |
1352 | 1337 | ||
1338 | /* | ||
1339 | * Free the request as soon as it is complete so that its resources | ||
1340 | * can be reused without waiting for userspace to read() the | ||
1341 | * result. But keep the associated bio (if any) around until | ||
1342 | * blk_rq_unmap_user() can be called from user context. | ||
1343 | */ | ||
1344 | srp->rq = NULL; | ||
1345 | if (rq->cmd != rq->__cmd) | ||
1346 | kfree(rq->cmd); | ||
1347 | __blk_put_request(rq->q, rq); | ||
1348 | |||
1353 | write_lock_irqsave(&sfp->rq_list_lock, iflags); | 1349 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
1354 | if (unlikely(srp->orphan)) { | 1350 | if (unlikely(srp->orphan)) { |
1355 | if (sfp->keep_orphan) | 1351 | if (sfp->keep_orphan) |
@@ -1684,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
1684 | return -ENOMEM; | 1680 | return -ENOMEM; |
1685 | } | 1681 | } |
1686 | 1682 | ||
1687 | rq = blk_get_request(q, rw, GFP_ATOMIC); | 1683 | /* |
1684 | * NOTE | ||
1685 | * | ||
1686 | * With scsi-mq enabled, there are a fixed number of preallocated | ||
1687 | * requests equal in number to shost->can_queue. If all of the | ||
1688 | * preallocated requests are already in use, then using GFP_ATOMIC with | ||
1689 | * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL | ||
1690 | * will cause blk_get_request() to sleep until an active command | ||
1691 | * completes, freeing up a request. Neither option is ideal, but | ||
1692 | * GFP_KERNEL is the better choice to prevent userspace from getting an | ||
1693 | * unexpected EWOULDBLOCK. | ||
1694 | * | ||
1695 | * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually | ||
1696 | * does not sleep except under memory pressure. | ||
1697 | */ | ||
1698 | rq = blk_get_request(q, rw, GFP_KERNEL); | ||
1688 | if (IS_ERR(rq)) { | 1699 | if (IS_ERR(rq)) { |
1689 | kfree(long_cmdp); | 1700 | kfree(long_cmdp); |
1690 | return PTR_ERR(rq); | 1701 | return PTR_ERR(rq); |
@@ -1734,22 +1745,19 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
1734 | } | 1745 | } |
1735 | 1746 | ||
1736 | if (iov_count) { | 1747 | if (iov_count) { |
1737 | int len, size = sizeof(struct sg_iovec) * iov_count; | 1748 | int size = sizeof(struct iovec) * iov_count; |
1738 | struct iovec *iov; | 1749 | struct iovec *iov; |
1750 | struct iov_iter i; | ||
1739 | 1751 | ||
1740 | iov = memdup_user(hp->dxferp, size); | 1752 | iov = memdup_user(hp->dxferp, size); |
1741 | if (IS_ERR(iov)) | 1753 | if (IS_ERR(iov)) |
1742 | return PTR_ERR(iov); | 1754 | return PTR_ERR(iov); |
1743 | 1755 | ||
1744 | len = iov_length(iov, iov_count); | 1756 | iov_iter_init(&i, rw, iov, iov_count, |
1745 | if (hp->dxfer_len < len) { | 1757 | min_t(size_t, hp->dxfer_len, |
1746 | iov_count = iov_shorten(iov, iov_count, hp->dxfer_len); | 1758 | iov_length(iov, iov_count))); |
1747 | len = hp->dxfer_len; | ||
1748 | } | ||
1749 | 1759 | ||
1750 | res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, | 1760 | res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); |
1751 | iov_count, | ||
1752 | len, GFP_ATOMIC); | ||
1753 | kfree(iov); | 1761 | kfree(iov); |
1754 | } else | 1762 | } else |
1755 | res = blk_rq_map_user(q, rq, md, hp->dxferp, | 1763 | res = blk_rq_map_user(q, rq, md, hp->dxferp, |
@@ -1777,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp) | |||
1777 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, | 1785 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
1778 | "sg_finish_rem_req: res_used=%d\n", | 1786 | "sg_finish_rem_req: res_used=%d\n", |
1779 | (int) srp->res_used)); | 1787 | (int) srp->res_used)); |
1780 | if (srp->rq) { | 1788 | if (srp->bio) |
1781 | if (srp->bio) | 1789 | ret = blk_rq_unmap_user(srp->bio); |
1782 | ret = blk_rq_unmap_user(srp->bio); | ||
1783 | 1790 | ||
1791 | if (srp->rq) { | ||
1784 | if (srp->rq->cmd != srp->rq->__cmd) | 1792 | if (srp->rq->cmd != srp->rq->__cmd) |
1785 | kfree(srp->rq->cmd); | 1793 | kfree(srp->rq->cmd); |
1786 | blk_put_request(srp->rq); | 1794 | blk_put_request(srp->rq); |
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index fb929fac22ba..03054c0e7689 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c | |||
@@ -245,9 +245,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
245 | sr_printk(KERN_INFO, cd, | 245 | sr_printk(KERN_INFO, cd, |
246 | "CDROM not ready. Make sure there " | 246 | "CDROM not ready. Make sure there " |
247 | "is a disc in the drive.\n"); | 247 | "is a disc in the drive.\n"); |
248 | #ifdef DEBUG | ||
249 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
250 | #endif | ||
251 | err = -ENOMEDIUM; | 248 | err = -ENOMEDIUM; |
252 | break; | 249 | break; |
253 | case ILLEGAL_REQUEST: | 250 | case ILLEGAL_REQUEST: |
@@ -256,16 +253,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
256 | sshdr.ascq == 0x00) | 253 | sshdr.ascq == 0x00) |
257 | /* sense: Invalid command operation code */ | 254 | /* sense: Invalid command operation code */ |
258 | err = -EDRIVE_CANT_DO_THIS; | 255 | err = -EDRIVE_CANT_DO_THIS; |
259 | #ifdef DEBUG | ||
260 | __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); | ||
261 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
262 | #endif | ||
263 | break; | 256 | break; |
264 | default: | 257 | default: |
265 | sr_printk(KERN_ERR, cd, | ||
266 | "CDROM (ioctl) error, command: "); | ||
267 | __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); | ||
268 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
269 | err = -EIO; | 258 | err = -EIO; |
270 | } | 259 | } |
271 | } | 260 | } |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 128d3b55bdd9..9a1c34205254 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4551,18 +4551,15 @@ static int sgl_map_user_pages(struct st_buffer *STbp, | |||
4551 | return -ENOMEM; | 4551 | return -ENOMEM; |
4552 | 4552 | ||
4553 | /* Try to fault in all of the necessary pages */ | 4553 | /* Try to fault in all of the necessary pages */ |
4554 | down_read(¤t->mm->mmap_sem); | ||
4555 | /* rw==READ means read from drive, write into memory area */ | 4554 | /* rw==READ means read from drive, write into memory area */ |
4556 | res = get_user_pages( | 4555 | res = get_user_pages_unlocked( |
4557 | current, | 4556 | current, |
4558 | current->mm, | 4557 | current->mm, |
4559 | uaddr, | 4558 | uaddr, |
4560 | nr_pages, | 4559 | nr_pages, |
4561 | rw == READ, | 4560 | rw == READ, |
4562 | 0, /* don't force */ | 4561 | 0, /* don't force */ |
4563 | pages, | 4562 | pages); |
4564 | NULL); | ||
4565 | up_read(¤t->mm->mmap_sem); | ||
4566 | 4563 | ||
4567 | /* Errors and no page mapped should return here */ | 4564 | /* Errors and no page mapped should return here */ |
4568 | if (res < nr_pages) | 4565 | if (res < nr_pages) |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 4cff0ddc2c25..efc6e446b6c8 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/device.h> | 33 | #include <linux/device.h> |
34 | #include <linux/hyperv.h> | 34 | #include <linux/hyperv.h> |
35 | #include <linux/mempool.h> | ||
36 | #include <linux/blkdev.h> | 35 | #include <linux/blkdev.h> |
37 | #include <scsi/scsi.h> | 36 | #include <scsi/scsi.h> |
38 | #include <scsi/scsi_cmnd.h> | 37 | #include <scsi/scsi_cmnd.h> |
@@ -309,14 +308,6 @@ enum storvsc_request_type { | |||
309 | * This is the end of Protocol specific defines. | 308 | * This is the end of Protocol specific defines. |
310 | */ | 309 | */ |
311 | 310 | ||
312 | |||
313 | /* | ||
314 | * We setup a mempool to allocate request structures for this driver | ||
315 | * on a per-lun basis. The following define specifies the number of | ||
316 | * elements in the pool. | ||
317 | */ | ||
318 | |||
319 | #define STORVSC_MIN_BUF_NR 64 | ||
320 | static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); | 311 | static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); |
321 | 312 | ||
322 | module_param(storvsc_ringbuffer_size, int, S_IRUGO); | 313 | module_param(storvsc_ringbuffer_size, int, S_IRUGO); |
@@ -346,7 +337,6 @@ static void storvsc_on_channel_callback(void *context); | |||
346 | #define STORVSC_IDE_MAX_CHANNELS 1 | 337 | #define STORVSC_IDE_MAX_CHANNELS 1 |
347 | 338 | ||
348 | struct storvsc_cmd_request { | 339 | struct storvsc_cmd_request { |
349 | struct list_head entry; | ||
350 | struct scsi_cmnd *cmd; | 340 | struct scsi_cmnd *cmd; |
351 | 341 | ||
352 | unsigned int bounce_sgl_count; | 342 | unsigned int bounce_sgl_count; |
@@ -357,7 +347,6 @@ struct storvsc_cmd_request { | |||
357 | /* Synchronize the request/response if needed */ | 347 | /* Synchronize the request/response if needed */ |
358 | struct completion wait_event; | 348 | struct completion wait_event; |
359 | 349 | ||
360 | unsigned char *sense_buffer; | ||
361 | struct hv_multipage_buffer data_buffer; | 350 | struct hv_multipage_buffer data_buffer; |
362 | struct vstor_packet vstor_packet; | 351 | struct vstor_packet vstor_packet; |
363 | }; | 352 | }; |
@@ -389,11 +378,6 @@ struct storvsc_device { | |||
389 | struct storvsc_cmd_request reset_request; | 378 | struct storvsc_cmd_request reset_request; |
390 | }; | 379 | }; |
391 | 380 | ||
392 | struct stor_mem_pools { | ||
393 | struct kmem_cache *request_pool; | ||
394 | mempool_t *request_mempool; | ||
395 | }; | ||
396 | |||
397 | struct hv_host_device { | 381 | struct hv_host_device { |
398 | struct hv_device *dev; | 382 | struct hv_device *dev; |
399 | unsigned int port; | 383 | unsigned int port; |
@@ -426,21 +410,42 @@ done: | |||
426 | kfree(wrk); | 410 | kfree(wrk); |
427 | } | 411 | } |
428 | 412 | ||
429 | static void storvsc_bus_scan(struct work_struct *work) | 413 | static void storvsc_host_scan(struct work_struct *work) |
430 | { | 414 | { |
431 | struct storvsc_scan_work *wrk; | 415 | struct storvsc_scan_work *wrk; |
432 | int id, order_id; | 416 | struct Scsi_Host *host; |
417 | struct scsi_device *sdev; | ||
418 | unsigned long flags; | ||
433 | 419 | ||
434 | wrk = container_of(work, struct storvsc_scan_work, work); | 420 | wrk = container_of(work, struct storvsc_scan_work, work); |
435 | for (id = 0; id < wrk->host->max_id; ++id) { | 421 | host = wrk->host; |
436 | if (wrk->host->reverse_ordering) | 422 | |
437 | order_id = wrk->host->max_id - id - 1; | 423 | /* |
438 | else | 424 | * Before scanning the host, first check to see if any of the |
439 | order_id = id; | 425 | * currrently known devices have been hot removed. We issue a |
440 | 426 | * "unit ready" command against all currently known devices. | |
441 | scsi_scan_target(&wrk->host->shost_gendev, 0, | 427 | * This I/O will result in an error for devices that have been |
442 | order_id, SCAN_WILD_CARD, 1); | 428 | * removed. As part of handling the I/O error, we remove the device. |
429 | * | ||
430 | * When a LUN is added or removed, the host sends us a signal to | ||
431 | * scan the host. Thus we are forced to discover the LUNs that | ||
432 | * may have been removed this way. | ||
433 | */ | ||
434 | mutex_lock(&host->scan_mutex); | ||
435 | spin_lock_irqsave(host->host_lock, flags); | ||
436 | list_for_each_entry(sdev, &host->__devices, siblings) { | ||
437 | spin_unlock_irqrestore(host->host_lock, flags); | ||
438 | scsi_test_unit_ready(sdev, 1, 1, NULL); | ||
439 | spin_lock_irqsave(host->host_lock, flags); | ||
440 | continue; | ||
443 | } | 441 | } |
442 | spin_unlock_irqrestore(host->host_lock, flags); | ||
443 | mutex_unlock(&host->scan_mutex); | ||
444 | /* | ||
445 | * Now scan the host to discover LUNs that may have been added. | ||
446 | */ | ||
447 | scsi_scan_host(host); | ||
448 | |||
444 | kfree(wrk); | 449 | kfree(wrk); |
445 | } | 450 | } |
446 | 451 | ||
@@ -1070,10 +1075,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
1070 | { | 1075 | { |
1071 | struct scsi_cmnd *scmnd = cmd_request->cmd; | 1076 | struct scsi_cmnd *scmnd = cmd_request->cmd; |
1072 | struct hv_host_device *host_dev = shost_priv(scmnd->device->host); | 1077 | struct hv_host_device *host_dev = shost_priv(scmnd->device->host); |
1073 | void (*scsi_done_fn)(struct scsi_cmnd *); | ||
1074 | struct scsi_sense_hdr sense_hdr; | 1078 | struct scsi_sense_hdr sense_hdr; |
1075 | struct vmscsi_request *vm_srb; | 1079 | struct vmscsi_request *vm_srb; |
1076 | struct stor_mem_pools *memp = scmnd->device->hostdata; | ||
1077 | struct Scsi_Host *host; | 1080 | struct Scsi_Host *host; |
1078 | struct storvsc_device *stor_dev; | 1081 | struct storvsc_device *stor_dev; |
1079 | struct hv_device *dev = host_dev->dev; | 1082 | struct hv_device *dev = host_dev->dev; |
@@ -1109,14 +1112,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
1109 | cmd_request->data_buffer.len - | 1112 | cmd_request->data_buffer.len - |
1110 | vm_srb->data_transfer_length); | 1113 | vm_srb->data_transfer_length); |
1111 | 1114 | ||
1112 | scsi_done_fn = scmnd->scsi_done; | 1115 | scmnd->scsi_done(scmnd); |
1113 | |||
1114 | scmnd->host_scribble = NULL; | ||
1115 | scmnd->scsi_done = NULL; | ||
1116 | |||
1117 | scsi_done_fn(scmnd); | ||
1118 | |||
1119 | mempool_free(cmd_request, memp->request_mempool); | ||
1120 | } | 1116 | } |
1121 | 1117 | ||
1122 | static void storvsc_on_io_completion(struct hv_device *device, | 1118 | static void storvsc_on_io_completion(struct hv_device *device, |
@@ -1160,7 +1156,7 @@ static void storvsc_on_io_completion(struct hv_device *device, | |||
1160 | SRB_STATUS_AUTOSENSE_VALID) { | 1156 | SRB_STATUS_AUTOSENSE_VALID) { |
1161 | /* autosense data available */ | 1157 | /* autosense data available */ |
1162 | 1158 | ||
1163 | memcpy(request->sense_buffer, | 1159 | memcpy(request->cmd->sense_buffer, |
1164 | vstor_packet->vm_srb.sense_data, | 1160 | vstor_packet->vm_srb.sense_data, |
1165 | vstor_packet->vm_srb.sense_info_length); | 1161 | vstor_packet->vm_srb.sense_info_length); |
1166 | 1162 | ||
@@ -1198,7 +1194,7 @@ static void storvsc_on_receive(struct hv_device *device, | |||
1198 | if (!work) | 1194 | if (!work) |
1199 | return; | 1195 | return; |
1200 | 1196 | ||
1201 | INIT_WORK(&work->work, storvsc_bus_scan); | 1197 | INIT_WORK(&work->work, storvsc_host_scan); |
1202 | work->host = stor_device->host; | 1198 | work->host = stor_device->host; |
1203 | schedule_work(&work->work); | 1199 | schedule_work(&work->work); |
1204 | break; | 1200 | break; |
@@ -1378,55 +1374,6 @@ static int storvsc_do_io(struct hv_device *device, | |||
1378 | return ret; | 1374 | return ret; |
1379 | } | 1375 | } |
1380 | 1376 | ||
1381 | static int storvsc_device_alloc(struct scsi_device *sdevice) | ||
1382 | { | ||
1383 | struct stor_mem_pools *memp; | ||
1384 | int number = STORVSC_MIN_BUF_NR; | ||
1385 | |||
1386 | memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL); | ||
1387 | if (!memp) | ||
1388 | return -ENOMEM; | ||
1389 | |||
1390 | memp->request_pool = | ||
1391 | kmem_cache_create(dev_name(&sdevice->sdev_dev), | ||
1392 | sizeof(struct storvsc_cmd_request), 0, | ||
1393 | SLAB_HWCACHE_ALIGN, NULL); | ||
1394 | |||
1395 | if (!memp->request_pool) | ||
1396 | goto err0; | ||
1397 | |||
1398 | memp->request_mempool = mempool_create(number, mempool_alloc_slab, | ||
1399 | mempool_free_slab, | ||
1400 | memp->request_pool); | ||
1401 | |||
1402 | if (!memp->request_mempool) | ||
1403 | goto err1; | ||
1404 | |||
1405 | sdevice->hostdata = memp; | ||
1406 | |||
1407 | return 0; | ||
1408 | |||
1409 | err1: | ||
1410 | kmem_cache_destroy(memp->request_pool); | ||
1411 | |||
1412 | err0: | ||
1413 | kfree(memp); | ||
1414 | return -ENOMEM; | ||
1415 | } | ||
1416 | |||
1417 | static void storvsc_device_destroy(struct scsi_device *sdevice) | ||
1418 | { | ||
1419 | struct stor_mem_pools *memp = sdevice->hostdata; | ||
1420 | |||
1421 | if (!memp) | ||
1422 | return; | ||
1423 | |||
1424 | mempool_destroy(memp->request_mempool); | ||
1425 | kmem_cache_destroy(memp->request_pool); | ||
1426 | kfree(memp); | ||
1427 | sdevice->hostdata = NULL; | ||
1428 | } | ||
1429 | |||
1430 | static int storvsc_device_configure(struct scsi_device *sdevice) | 1377 | static int storvsc_device_configure(struct scsi_device *sdevice) |
1431 | { | 1378 | { |
1432 | scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); | 1379 | scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); |
@@ -1447,6 +1394,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice) | |||
1447 | */ | 1394 | */ |
1448 | sdevice->sdev_bflags |= msft_blist_flags; | 1395 | sdevice->sdev_bflags |= msft_blist_flags; |
1449 | 1396 | ||
1397 | /* | ||
1398 | * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 | ||
1399 | * if the device is a MSFT virtual device. | ||
1400 | */ | ||
1401 | if (!strncmp(sdevice->vendor, "Msft", 4)) { | ||
1402 | switch (vmbus_proto_version) { | ||
1403 | case VERSION_WIN8: | ||
1404 | case VERSION_WIN8_1: | ||
1405 | sdevice->scsi_level = SCSI_SPC_3; | ||
1406 | break; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1450 | return 0; | 1410 | return 0; |
1451 | } | 1411 | } |
1452 | 1412 | ||
@@ -1561,13 +1521,11 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1561 | int ret; | 1521 | int ret; |
1562 | struct hv_host_device *host_dev = shost_priv(host); | 1522 | struct hv_host_device *host_dev = shost_priv(host); |
1563 | struct hv_device *dev = host_dev->dev; | 1523 | struct hv_device *dev = host_dev->dev; |
1564 | struct storvsc_cmd_request *cmd_request; | 1524 | struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); |
1565 | unsigned int request_size = 0; | ||
1566 | int i; | 1525 | int i; |
1567 | struct scatterlist *sgl; | 1526 | struct scatterlist *sgl; |
1568 | unsigned int sg_count = 0; | 1527 | unsigned int sg_count = 0; |
1569 | struct vmscsi_request *vm_srb; | 1528 | struct vmscsi_request *vm_srb; |
1570 | struct stor_mem_pools *memp = scmnd->device->hostdata; | ||
1571 | 1529 | ||
1572 | if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { | 1530 | if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { |
1573 | /* | 1531 | /* |
@@ -1584,25 +1542,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1584 | } | 1542 | } |
1585 | } | 1543 | } |
1586 | 1544 | ||
1587 | request_size = sizeof(struct storvsc_cmd_request); | ||
1588 | |||
1589 | cmd_request = mempool_alloc(memp->request_mempool, | ||
1590 | GFP_ATOMIC); | ||
1591 | |||
1592 | /* | ||
1593 | * We might be invoked in an interrupt context; hence | ||
1594 | * mempool_alloc() can fail. | ||
1595 | */ | ||
1596 | if (!cmd_request) | ||
1597 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
1598 | |||
1599 | memset(cmd_request, 0, sizeof(struct storvsc_cmd_request)); | ||
1600 | |||
1601 | /* Setup the cmd request */ | 1545 | /* Setup the cmd request */ |
1602 | cmd_request->cmd = scmnd; | 1546 | cmd_request->cmd = scmnd; |
1603 | 1547 | ||
1604 | scmnd->host_scribble = (unsigned char *)cmd_request; | ||
1605 | |||
1606 | vm_srb = &cmd_request->vstor_packet.vm_srb; | 1548 | vm_srb = &cmd_request->vstor_packet.vm_srb; |
1607 | vm_srb->win8_extension.time_out_value = 60; | 1549 | vm_srb->win8_extension.time_out_value = 60; |
1608 | 1550 | ||
@@ -1637,9 +1579,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1637 | 1579 | ||
1638 | memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); | 1580 | memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); |
1639 | 1581 | ||
1640 | cmd_request->sense_buffer = scmnd->sense_buffer; | ||
1641 | |||
1642 | |||
1643 | cmd_request->data_buffer.len = scsi_bufflen(scmnd); | 1582 | cmd_request->data_buffer.len = scsi_bufflen(scmnd); |
1644 | if (scsi_sg_count(scmnd)) { | 1583 | if (scsi_sg_count(scmnd)) { |
1645 | sgl = (struct scatterlist *)scsi_sglist(scmnd); | 1584 | sgl = (struct scatterlist *)scsi_sglist(scmnd); |
@@ -1651,10 +1590,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1651 | create_bounce_buffer(sgl, scsi_sg_count(scmnd), | 1590 | create_bounce_buffer(sgl, scsi_sg_count(scmnd), |
1652 | scsi_bufflen(scmnd), | 1591 | scsi_bufflen(scmnd), |
1653 | vm_srb->data_in); | 1592 | vm_srb->data_in); |
1654 | if (!cmd_request->bounce_sgl) { | 1593 | if (!cmd_request->bounce_sgl) |
1655 | ret = SCSI_MLQUEUE_HOST_BUSY; | 1594 | return SCSI_MLQUEUE_HOST_BUSY; |
1656 | goto queue_error; | ||
1657 | } | ||
1658 | 1595 | ||
1659 | cmd_request->bounce_sgl_count = | 1596 | cmd_request->bounce_sgl_count = |
1660 | ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> | 1597 | ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> |
@@ -1692,27 +1629,21 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1692 | destroy_bounce_buffer(cmd_request->bounce_sgl, | 1629 | destroy_bounce_buffer(cmd_request->bounce_sgl, |
1693 | cmd_request->bounce_sgl_count); | 1630 | cmd_request->bounce_sgl_count); |
1694 | 1631 | ||
1695 | ret = SCSI_MLQUEUE_DEVICE_BUSY; | 1632 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1696 | goto queue_error; | ||
1697 | } | 1633 | } |
1698 | 1634 | ||
1699 | return 0; | 1635 | return 0; |
1700 | |||
1701 | queue_error: | ||
1702 | mempool_free(cmd_request, memp->request_mempool); | ||
1703 | scmnd->host_scribble = NULL; | ||
1704 | return ret; | ||
1705 | } | 1636 | } |
1706 | 1637 | ||
1707 | static struct scsi_host_template scsi_driver = { | 1638 | static struct scsi_host_template scsi_driver = { |
1708 | .module = THIS_MODULE, | 1639 | .module = THIS_MODULE, |
1709 | .name = "storvsc_host_t", | 1640 | .name = "storvsc_host_t", |
1641 | .cmd_size = sizeof(struct storvsc_cmd_request), | ||
1710 | .bios_param = storvsc_get_chs, | 1642 | .bios_param = storvsc_get_chs, |
1711 | .queuecommand = storvsc_queuecommand, | 1643 | .queuecommand = storvsc_queuecommand, |
1712 | .eh_host_reset_handler = storvsc_host_reset_handler, | 1644 | .eh_host_reset_handler = storvsc_host_reset_handler, |
1645 | .proc_name = "storvsc_host", | ||
1713 | .eh_timed_out = storvsc_eh_timed_out, | 1646 | .eh_timed_out = storvsc_eh_timed_out, |
1714 | .slave_alloc = storvsc_device_alloc, | ||
1715 | .slave_destroy = storvsc_device_destroy, | ||
1716 | .slave_configure = storvsc_device_configure, | 1647 | .slave_configure = storvsc_device_configure, |
1717 | .cmd_per_lun = 255, | 1648 | .cmd_per_lun = 255, |
1718 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, | 1649 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, |
@@ -1760,6 +1691,9 @@ static int storvsc_probe(struct hv_device *device, | |||
1760 | bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); | 1691 | bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); |
1761 | int target = 0; | 1692 | int target = 0; |
1762 | struct storvsc_device *stor_device; | 1693 | struct storvsc_device *stor_device; |
1694 | int max_luns_per_target; | ||
1695 | int max_targets; | ||
1696 | int max_channels; | ||
1763 | 1697 | ||
1764 | /* | 1698 | /* |
1765 | * Based on the windows host we are running on, | 1699 | * Based on the windows host we are running on, |
@@ -1773,12 +1707,18 @@ static int storvsc_probe(struct hv_device *device, | |||
1773 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); | 1707 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); |
1774 | vmstor_current_major = VMSTOR_WIN7_MAJOR; | 1708 | vmstor_current_major = VMSTOR_WIN7_MAJOR; |
1775 | vmstor_current_minor = VMSTOR_WIN7_MINOR; | 1709 | vmstor_current_minor = VMSTOR_WIN7_MINOR; |
1710 | max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; | ||
1711 | max_targets = STORVSC_IDE_MAX_TARGETS; | ||
1712 | max_channels = STORVSC_IDE_MAX_CHANNELS; | ||
1776 | break; | 1713 | break; |
1777 | default: | 1714 | default: |
1778 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; | 1715 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; |
1779 | vmscsi_size_delta = 0; | 1716 | vmscsi_size_delta = 0; |
1780 | vmstor_current_major = VMSTOR_WIN8_MAJOR; | 1717 | vmstor_current_major = VMSTOR_WIN8_MAJOR; |
1781 | vmstor_current_minor = VMSTOR_WIN8_MINOR; | 1718 | vmstor_current_minor = VMSTOR_WIN8_MINOR; |
1719 | max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; | ||
1720 | max_targets = STORVSC_MAX_TARGETS; | ||
1721 | max_channels = STORVSC_MAX_CHANNELS; | ||
1782 | break; | 1722 | break; |
1783 | } | 1723 | } |
1784 | 1724 | ||
@@ -1826,9 +1766,9 @@ static int storvsc_probe(struct hv_device *device, | |||
1826 | break; | 1766 | break; |
1827 | 1767 | ||
1828 | case SCSI_GUID: | 1768 | case SCSI_GUID: |
1829 | host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; | 1769 | host->max_lun = max_luns_per_target; |
1830 | host->max_id = STORVSC_MAX_TARGETS; | 1770 | host->max_id = max_targets; |
1831 | host->max_channel = STORVSC_MAX_CHANNELS - 1; | 1771 | host->max_channel = max_channels - 1; |
1832 | break; | 1772 | break; |
1833 | 1773 | ||
1834 | default: | 1774 | default: |
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 6e07b2afddeb..8a1f4b355416 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -70,3 +70,16 @@ config SCSI_UFSHCD_PLATFORM | |||
70 | If you have a controller with this interface, say Y or M here. | 70 | If you have a controller with this interface, say Y or M here. |
71 | 71 | ||
72 | If unsure, say N. | 72 | If unsure, say N. |
73 | |||
74 | config SCSI_UFS_QCOM | ||
75 | bool "QCOM specific hooks to UFS controller platform driver" | ||
76 | depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM | ||
77 | select PHY_QCOM_UFS | ||
78 | help | ||
79 | This selects the QCOM specific additions to UFSHCD platform driver. | ||
80 | UFS host on QCOM needs some vendor specific configuration before | ||
81 | accessing the hardware which includes PHY configuration and vendor | ||
82 | specific registers. | ||
83 | |||
84 | Select this if you have UFS controller on QCOM chipset. | ||
85 | If unsure, say N. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 1e5bd48457d6..8303bcce7a23 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # UFSHCD makefile | 1 | # UFSHCD makefile |
2 | obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o | ||
2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o | 3 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o |
3 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o | 4 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o |
4 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o | 5 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o |
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c new file mode 100644 index 000000000000..9217af9bf734 --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/time.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/phy/phy.h> | ||
19 | |||
20 | #include <linux/phy/phy-qcom-ufs.h> | ||
21 | #include "ufshcd.h" | ||
22 | #include "unipro.h" | ||
23 | #include "ufs-qcom.h" | ||
24 | #include "ufshci.h" | ||
25 | |||
26 | static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; | ||
27 | |||
28 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result); | ||
29 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
30 | const char *speed_mode); | ||
31 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); | ||
32 | |||
33 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) | ||
34 | { | ||
35 | int err = 0; | ||
36 | |||
37 | err = ufshcd_dme_get(hba, | ||
38 | UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); | ||
39 | if (err) | ||
40 | dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", | ||
41 | __func__, err); | ||
42 | |||
43 | return err; | ||
44 | } | ||
45 | |||
46 | static int ufs_qcom_host_clk_get(struct device *dev, | ||
47 | const char *name, struct clk **clk_out) | ||
48 | { | ||
49 | struct clk *clk; | ||
50 | int err = 0; | ||
51 | |||
52 | clk = devm_clk_get(dev, name); | ||
53 | if (IS_ERR(clk)) { | ||
54 | err = PTR_ERR(clk); | ||
55 | dev_err(dev, "%s: failed to get %s err %d", | ||
56 | __func__, name, err); | ||
57 | } else { | ||
58 | *clk_out = clk; | ||
59 | } | ||
60 | |||
61 | return err; | ||
62 | } | ||
63 | |||
64 | static int ufs_qcom_host_clk_enable(struct device *dev, | ||
65 | const char *name, struct clk *clk) | ||
66 | { | ||
67 | int err = 0; | ||
68 | |||
69 | err = clk_prepare_enable(clk); | ||
70 | if (err) | ||
71 | dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) | ||
77 | { | ||
78 | if (!host->is_lane_clks_enabled) | ||
79 | return; | ||
80 | |||
81 | clk_disable_unprepare(host->tx_l1_sync_clk); | ||
82 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
83 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
84 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
85 | |||
86 | host->is_lane_clks_enabled = false; | ||
87 | } | ||
88 | |||
89 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) | ||
90 | { | ||
91 | int err = 0; | ||
92 | struct device *dev = host->hba->dev; | ||
93 | |||
94 | if (host->is_lane_clks_enabled) | ||
95 | return 0; | ||
96 | |||
97 | err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", | ||
98 | host->rx_l0_sync_clk); | ||
99 | if (err) | ||
100 | goto out; | ||
101 | |||
102 | err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", | ||
103 | host->tx_l0_sync_clk); | ||
104 | if (err) | ||
105 | goto disable_rx_l0; | ||
106 | |||
107 | err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", | ||
108 | host->rx_l1_sync_clk); | ||
109 | if (err) | ||
110 | goto disable_tx_l0; | ||
111 | |||
112 | err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", | ||
113 | host->tx_l1_sync_clk); | ||
114 | if (err) | ||
115 | goto disable_rx_l1; | ||
116 | |||
117 | host->is_lane_clks_enabled = true; | ||
118 | goto out; | ||
119 | |||
120 | disable_rx_l1: | ||
121 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
122 | disable_tx_l0: | ||
123 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
124 | disable_rx_l0: | ||
125 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
126 | out: | ||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) | ||
131 | { | ||
132 | int err = 0; | ||
133 | struct device *dev = host->hba->dev; | ||
134 | |||
135 | err = ufs_qcom_host_clk_get(dev, | ||
136 | "rx_lane0_sync_clk", &host->rx_l0_sync_clk); | ||
137 | if (err) | ||
138 | goto out; | ||
139 | |||
140 | err = ufs_qcom_host_clk_get(dev, | ||
141 | "tx_lane0_sync_clk", &host->tx_l0_sync_clk); | ||
142 | if (err) | ||
143 | goto out; | ||
144 | |||
145 | err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", | ||
146 | &host->rx_l1_sync_clk); | ||
147 | if (err) | ||
148 | goto out; | ||
149 | |||
150 | err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", | ||
151 | &host->tx_l1_sync_clk); | ||
152 | out: | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) | ||
157 | { | ||
158 | struct ufs_qcom_host *host = hba->priv; | ||
159 | struct phy *phy = host->generic_phy; | ||
160 | u32 tx_lanes; | ||
161 | int err = 0; | ||
162 | |||
163 | err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); | ||
164 | if (err) | ||
165 | goto out; | ||
166 | |||
167 | err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); | ||
168 | if (err) | ||
169 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", | ||
170 | __func__); | ||
171 | |||
172 | out: | ||
173 | return err; | ||
174 | } | ||
175 | |||
176 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) | ||
177 | { | ||
178 | int err; | ||
179 | u32 tx_fsm_val = 0; | ||
180 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); | ||
181 | |||
182 | do { | ||
183 | err = ufshcd_dme_get(hba, | ||
184 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
185 | if (err || tx_fsm_val == TX_FSM_HIBERN8) | ||
186 | break; | ||
187 | |||
188 | /* sleep for max. 200us */ | ||
189 | usleep_range(100, 200); | ||
190 | } while (time_before(jiffies, timeout)); | ||
191 | |||
192 | /* | ||
193 | * we might have scheduled out for long during polling so | ||
194 | * check the state again. | ||
195 | */ | ||
196 | if (time_after(jiffies, timeout)) | ||
197 | err = ufshcd_dme_get(hba, | ||
198 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
199 | |||
200 | if (err) { | ||
201 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | ||
202 | __func__, err); | ||
203 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { | ||
204 | err = tx_fsm_val; | ||
205 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", | ||
206 | __func__, err); | ||
207 | } | ||
208 | |||
209 | return err; | ||
210 | } | ||
211 | |||
212 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) | ||
213 | { | ||
214 | struct ufs_qcom_host *host = hba->priv; | ||
215 | struct phy *phy = host->generic_phy; | ||
216 | int ret = 0; | ||
217 | u8 major; | ||
218 | u16 minor, step; | ||
219 | bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) | ||
220 | ? true : false; | ||
221 | |||
222 | /* Assert PHY reset and apply PHY calibration values */ | ||
223 | ufs_qcom_assert_reset(hba); | ||
224 | /* provide 1ms delay to let the reset pulse propagate */ | ||
225 | usleep_range(1000, 1100); | ||
226 | |||
227 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
228 | ufs_qcom_phy_save_controller_version(phy, major, minor, step); | ||
229 | ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); | ||
230 | if (ret) { | ||
231 | dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", | ||
232 | __func__, ret); | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | /* De-assert PHY reset and start serdes */ | ||
237 | ufs_qcom_deassert_reset(hba); | ||
238 | |||
239 | /* | ||
240 | * after reset deassertion, phy will need all ref clocks, | ||
241 | * voltage, current to settle down before starting serdes. | ||
242 | */ | ||
243 | usleep_range(1000, 1100); | ||
244 | ret = ufs_qcom_phy_start_serdes(phy); | ||
245 | if (ret) { | ||
246 | dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n", | ||
247 | __func__, ret); | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | ret = ufs_qcom_phy_is_pcs_ready(phy); | ||
252 | if (ret) | ||
253 | dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", | ||
254 | __func__, ret); | ||
255 | |||
256 | out: | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * The UTP controller has a number of internal clock gating cells (CGCs). | ||
262 | * Internal hardware sub-modules within the UTP controller control the CGCs. | ||
263 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved | ||
264 | * in a specific operation, UTP controller CGCs are by default disabled and | ||
265 | * this function enables them (after every UFS link startup) to save some power | ||
266 | * leakage. | ||
267 | */ | ||
268 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) | ||
269 | { | ||
270 | ufshcd_writel(hba, | ||
271 | ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, | ||
272 | REG_UFS_CFG2); | ||
273 | |||
274 | /* Ensure that HW clock gating is enabled before next operations */ | ||
275 | mb(); | ||
276 | } | ||
277 | |||
278 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) | ||
279 | { | ||
280 | struct ufs_qcom_host *host = hba->priv; | ||
281 | int err = 0; | ||
282 | |||
283 | switch (status) { | ||
284 | case PRE_CHANGE: | ||
285 | ufs_qcom_power_up_sequence(hba); | ||
286 | /* | ||
287 | * The PHY PLL output is the source of tx/rx lane symbol | ||
288 | * clocks, hence, enable the lane clocks only after PHY | ||
289 | * is initialized. | ||
290 | */ | ||
291 | err = ufs_qcom_enable_lane_clks(host); | ||
292 | break; | ||
293 | case POST_CHANGE: | ||
294 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ | ||
295 | err = ufs_qcom_check_hibern8(hba); | ||
296 | ufs_qcom_enable_hw_clk_gating(hba); | ||
297 | |||
298 | break; | ||
299 | default: | ||
300 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); | ||
301 | err = -EINVAL; | ||
302 | break; | ||
303 | } | ||
304 | return err; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * Returns non-zero for success (which rate of core_clk) and 0 | ||
309 | * in case of a failure | ||
310 | */ | ||
311 | static unsigned long | ||
312 | ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) | ||
313 | { | ||
314 | struct ufs_clk_info *clki; | ||
315 | u32 core_clk_period_in_ns; | ||
316 | u32 tx_clk_cycles_per_us = 0; | ||
317 | unsigned long core_clk_rate = 0; | ||
318 | u32 core_clk_cycles_per_us = 0; | ||
319 | |||
320 | static u32 pwm_fr_table[][2] = { | ||
321 | {UFS_PWM_G1, 0x1}, | ||
322 | {UFS_PWM_G2, 0x1}, | ||
323 | {UFS_PWM_G3, 0x1}, | ||
324 | {UFS_PWM_G4, 0x1}, | ||
325 | }; | ||
326 | |||
327 | static u32 hs_fr_table_rA[][2] = { | ||
328 | {UFS_HS_G1, 0x1F}, | ||
329 | {UFS_HS_G2, 0x3e}, | ||
330 | }; | ||
331 | |||
332 | static u32 hs_fr_table_rB[][2] = { | ||
333 | {UFS_HS_G1, 0x24}, | ||
334 | {UFS_HS_G2, 0x49}, | ||
335 | }; | ||
336 | |||
337 | if (gear == 0) { | ||
338 | dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); | ||
339 | goto out_error; | ||
340 | } | ||
341 | |||
342 | list_for_each_entry(clki, &hba->clk_list_head, list) { | ||
343 | if (!strcmp(clki->name, "core_clk")) | ||
344 | core_clk_rate = clk_get_rate(clki->clk); | ||
345 | } | ||
346 | |||
347 | /* If frequency is smaller than 1MHz, set to 1MHz */ | ||
348 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) | ||
349 | core_clk_rate = DEFAULT_CLK_RATE_HZ; | ||
350 | |||
351 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; | ||
352 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); | ||
353 | |||
354 | core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; | ||
355 | core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; | ||
356 | core_clk_period_in_ns &= MASK_CLK_NS_REG; | ||
357 | |||
358 | switch (hs) { | ||
359 | case FASTAUTO_MODE: | ||
360 | case FAST_MODE: | ||
361 | if (rate == PA_HS_MODE_A) { | ||
362 | if (gear > ARRAY_SIZE(hs_fr_table_rA)) { | ||
363 | dev_err(hba->dev, | ||
364 | "%s: index %d exceeds table size %zu\n", | ||
365 | __func__, gear, | ||
366 | ARRAY_SIZE(hs_fr_table_rA)); | ||
367 | goto out_error; | ||
368 | } | ||
369 | tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; | ||
370 | } else if (rate == PA_HS_MODE_B) { | ||
371 | if (gear > ARRAY_SIZE(hs_fr_table_rB)) { | ||
372 | dev_err(hba->dev, | ||
373 | "%s: index %d exceeds table size %zu\n", | ||
374 | __func__, gear, | ||
375 | ARRAY_SIZE(hs_fr_table_rB)); | ||
376 | goto out_error; | ||
377 | } | ||
378 | tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; | ||
379 | } else { | ||
380 | dev_err(hba->dev, "%s: invalid rate = %d\n", | ||
381 | __func__, rate); | ||
382 | goto out_error; | ||
383 | } | ||
384 | break; | ||
385 | case SLOWAUTO_MODE: | ||
386 | case SLOW_MODE: | ||
387 | if (gear > ARRAY_SIZE(pwm_fr_table)) { | ||
388 | dev_err(hba->dev, | ||
389 | "%s: index %d exceeds table size %zu\n", | ||
390 | __func__, gear, | ||
391 | ARRAY_SIZE(pwm_fr_table)); | ||
392 | goto out_error; | ||
393 | } | ||
394 | tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; | ||
395 | break; | ||
396 | case UNCHANGED: | ||
397 | default: | ||
398 | dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); | ||
399 | goto out_error; | ||
400 | } | ||
401 | |||
402 | /* this register 2 fields shall be written at once */ | ||
403 | ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, | ||
404 | REG_UFS_TX_SYMBOL_CLK_NS_US); | ||
405 | goto out; | ||
406 | |||
407 | out_error: | ||
408 | core_clk_rate = 0; | ||
409 | out: | ||
410 | return core_clk_rate; | ||
411 | } | ||
412 | |||
413 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) | ||
414 | { | ||
415 | unsigned long core_clk_rate = 0; | ||
416 | u32 core_clk_cycles_per_100ms; | ||
417 | |||
418 | switch (status) { | ||
419 | case PRE_CHANGE: | ||
420 | core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1, | ||
421 | SLOWAUTO_MODE, 0); | ||
422 | if (!core_clk_rate) { | ||
423 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
424 | __func__); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | core_clk_cycles_per_100ms = | ||
428 | (core_clk_rate / MSEC_PER_SEC) * 100; | ||
429 | ufshcd_writel(hba, core_clk_cycles_per_100ms, | ||
430 | REG_UFS_PA_LINK_STARTUP_TIMER); | ||
431 | break; | ||
432 | case POST_CHANGE: | ||
433 | ufs_qcom_link_startup_post_change(hba); | ||
434 | break; | ||
435 | default: | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
443 | { | ||
444 | struct ufs_qcom_host *host = hba->priv; | ||
445 | struct phy *phy = host->generic_phy; | ||
446 | int ret = 0; | ||
447 | |||
448 | if (ufs_qcom_is_link_off(hba)) { | ||
449 | /* | ||
450 | * Disable the tx/rx lane symbol clocks before PHY is | ||
451 | * powered down as the PLL source should be disabled | ||
452 | * after downstream clocks are disabled. | ||
453 | */ | ||
454 | ufs_qcom_disable_lane_clks(host); | ||
455 | phy_power_off(phy); | ||
456 | |||
457 | /* Assert PHY soft reset */ | ||
458 | ufs_qcom_assert_reset(hba); | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * If UniPro link is not active, PHY ref_clk, main PHY analog power | ||
464 | * rail and low noise analog power rail for PLL can be switched off. | ||
465 | */ | ||
466 | if (!ufs_qcom_is_link_active(hba)) | ||
467 | phy_power_off(phy); | ||
468 | |||
469 | out: | ||
470 | return ret; | ||
471 | } | ||
472 | |||
473 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
474 | { | ||
475 | struct ufs_qcom_host *host = hba->priv; | ||
476 | struct phy *phy = host->generic_phy; | ||
477 | int err; | ||
478 | |||
479 | err = phy_power_on(phy); | ||
480 | if (err) { | ||
481 | dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", | ||
482 | __func__, err); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | hba->is_sys_suspended = false; | ||
487 | |||
488 | out: | ||
489 | return err; | ||
490 | } | ||
491 | |||
492 | struct ufs_qcom_dev_params { | ||
493 | u32 pwm_rx_gear; /* pwm rx gear to work in */ | ||
494 | u32 pwm_tx_gear; /* pwm tx gear to work in */ | ||
495 | u32 hs_rx_gear; /* hs rx gear to work in */ | ||
496 | u32 hs_tx_gear; /* hs tx gear to work in */ | ||
497 | u32 rx_lanes; /* number of rx lanes */ | ||
498 | u32 tx_lanes; /* number of tx lanes */ | ||
499 | u32 rx_pwr_pwm; /* rx pwm working pwr */ | ||
500 | u32 tx_pwr_pwm; /* tx pwm working pwr */ | ||
501 | u32 rx_pwr_hs; /* rx hs working pwr */ | ||
502 | u32 tx_pwr_hs; /* tx hs working pwr */ | ||
503 | u32 hs_rate; /* rate A/B to work in HS */ | ||
504 | u32 desired_working_mode; | ||
505 | }; | ||
506 | |||
507 | static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, | ||
508 | struct ufs_pa_layer_attr *dev_max, | ||
509 | struct ufs_pa_layer_attr *agreed_pwr) | ||
510 | { | ||
511 | int min_qcom_gear; | ||
512 | int min_dev_gear; | ||
513 | bool is_dev_sup_hs = false; | ||
514 | bool is_qcom_max_hs = false; | ||
515 | |||
516 | if (dev_max->pwr_rx == FAST_MODE) | ||
517 | is_dev_sup_hs = true; | ||
518 | |||
519 | if (qcom_param->desired_working_mode == FAST) { | ||
520 | is_qcom_max_hs = true; | ||
521 | min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, | ||
522 | qcom_param->hs_tx_gear); | ||
523 | } else { | ||
524 | min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, | ||
525 | qcom_param->pwm_tx_gear); | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * device doesn't support HS but qcom_param->desired_working_mode is | ||
530 | * HS, thus device and qcom_param don't agree | ||
531 | */ | ||
532 | if (!is_dev_sup_hs && is_qcom_max_hs) { | ||
533 | pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", | ||
534 | __func__); | ||
535 | return -ENOTSUPP; | ||
536 | } else if (is_dev_sup_hs && is_qcom_max_hs) { | ||
537 | /* | ||
538 | * since device supports HS, it supports FAST_MODE. | ||
539 | * since qcom_param->desired_working_mode is also HS | ||
540 | * then final decision (FAST/FASTAUTO) is done according | ||
541 | * to qcom_params as it is the restricting factor | ||
542 | */ | ||
543 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
544 | qcom_param->rx_pwr_hs; | ||
545 | } else { | ||
546 | /* | ||
547 | * here qcom_param->desired_working_mode is PWM. | ||
548 | * it doesn't matter whether device supports HS or PWM, | ||
549 | * in both cases qcom_param->desired_working_mode will | ||
550 | * determine the mode | ||
551 | */ | ||
552 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
553 | qcom_param->rx_pwr_pwm; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * we would like tx to work in the minimum number of lanes | ||
558 | * between device capability and vendor preferences. | ||
559 | * the same decision will be made for rx | ||
560 | */ | ||
561 | agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, | ||
562 | qcom_param->tx_lanes); | ||
563 | agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, | ||
564 | qcom_param->rx_lanes); | ||
565 | |||
566 | /* device maximum gear is the minimum between device rx and tx gears */ | ||
567 | min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); | ||
568 | |||
569 | /* | ||
570 | * if both device capabilities and vendor pre-defined preferences are | ||
571 | * both HS or both PWM then set the minimum gear to be the chosen | ||
572 | * working gear. | ||
573 | * if one is PWM and one is HS then the one that is PWM get to decide | ||
574 | * what is the gear, as it is the one that also decided previously what | ||
575 | * pwr the device will be configured to. | ||
576 | */ | ||
577 | if ((is_dev_sup_hs && is_qcom_max_hs) || | ||
578 | (!is_dev_sup_hs && !is_qcom_max_hs)) | ||
579 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = | ||
580 | min_t(u32, min_dev_gear, min_qcom_gear); | ||
581 | else if (!is_dev_sup_hs) | ||
582 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; | ||
583 | else | ||
584 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; | ||
585 | |||
586 | agreed_pwr->hs_rate = qcom_param->hs_rate; | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) | ||
591 | { | ||
592 | int vote; | ||
593 | int err = 0; | ||
594 | char mode[BUS_VECTOR_NAME_LEN]; | ||
595 | |||
596 | ufs_qcom_get_speed_mode(&host->dev_req_params, mode); | ||
597 | |||
598 | vote = ufs_qcom_get_bus_vote(host, mode); | ||
599 | if (vote >= 0) | ||
600 | err = ufs_qcom_set_bus_vote(host, vote); | ||
601 | else | ||
602 | err = vote; | ||
603 | |||
604 | if (err) | ||
605 | dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); | ||
606 | else | ||
607 | host->bus_vote.saved_vote = vote; | ||
608 | return err; | ||
609 | } | ||
610 | |||
611 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, | ||
612 | bool status, | ||
613 | struct ufs_pa_layer_attr *dev_max_params, | ||
614 | struct ufs_pa_layer_attr *dev_req_params) | ||
615 | { | ||
616 | u32 val; | ||
617 | struct ufs_qcom_host *host = hba->priv; | ||
618 | struct phy *phy = host->generic_phy; | ||
619 | struct ufs_qcom_dev_params ufs_qcom_cap; | ||
620 | int ret = 0; | ||
621 | int res = 0; | ||
622 | |||
623 | if (!dev_req_params) { | ||
624 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); | ||
625 | ret = -EINVAL; | ||
626 | goto out; | ||
627 | } | ||
628 | |||
629 | switch (status) { | ||
630 | case PRE_CHANGE: | ||
631 | ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX; | ||
632 | ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX; | ||
633 | ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX; | ||
634 | ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX; | ||
635 | ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX; | ||
636 | ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX; | ||
637 | ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM; | ||
638 | ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM; | ||
639 | ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS; | ||
640 | ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS; | ||
641 | ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; | ||
642 | ufs_qcom_cap.desired_working_mode = | ||
643 | UFS_QCOM_LIMIT_DESIRED_MODE; | ||
644 | |||
645 | ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, | ||
646 | dev_max_params, | ||
647 | dev_req_params); | ||
648 | if (ret) { | ||
649 | pr_err("%s: failed to determine capabilities\n", | ||
650 | __func__); | ||
651 | goto out; | ||
652 | } | ||
653 | |||
654 | break; | ||
655 | case POST_CHANGE: | ||
656 | if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
657 | dev_req_params->pwr_rx, | ||
658 | dev_req_params->hs_rate)) { | ||
659 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
660 | __func__); | ||
661 | /* | ||
662 | * we return error code at the end of the routine, | ||
663 | * but continue to configure UFS_PHY_TX_LANE_ENABLE | ||
664 | * and bus voting as usual | ||
665 | */ | ||
666 | ret = -EINVAL; | ||
667 | } | ||
668 | |||
669 | val = ~(MAX_U32 << dev_req_params->lane_tx); | ||
670 | res = ufs_qcom_phy_set_tx_lane_enable(phy, val); | ||
671 | if (res) { | ||
672 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n", | ||
673 | __func__, res); | ||
674 | ret = res; | ||
675 | } | ||
676 | |||
677 | /* cache the power mode parameters to use internally */ | ||
678 | memcpy(&host->dev_req_params, | ||
679 | dev_req_params, sizeof(*dev_req_params)); | ||
680 | ufs_qcom_update_bus_bw_vote(host); | ||
681 | break; | ||
682 | default: | ||
683 | ret = -EINVAL; | ||
684 | break; | ||
685 | } | ||
686 | out: | ||
687 | return ret; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks | ||
692 | * @hba: host controller instance | ||
693 | * | ||
694 | * QCOM UFS host controller might have some non standard behaviours (quirks) | ||
695 | * than what is specified by UFSHCI specification. Advertise all such | ||
696 | * quirks to standard UFS host controller driver so standard takes them into | ||
697 | * account. | ||
698 | */ | ||
699 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | ||
700 | { | ||
701 | u8 major; | ||
702 | u16 minor, step; | ||
703 | |||
704 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
705 | |||
706 | /* | ||
707 | * TBD | ||
708 | * here we should be advertising controller quirks according to | ||
709 | * controller version. | ||
710 | */ | ||
711 | } | ||
712 | |||
713 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
714 | const char *speed_mode) | ||
715 | { | ||
716 | struct device *dev = host->hba->dev; | ||
717 | struct device_node *np = dev->of_node; | ||
718 | int err; | ||
719 | const char *key = "qcom,bus-vector-names"; | ||
720 | |||
721 | if (!speed_mode) { | ||
722 | err = -EINVAL; | ||
723 | goto out; | ||
724 | } | ||
725 | |||
726 | if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) | ||
727 | err = of_property_match_string(np, key, "MAX"); | ||
728 | else | ||
729 | err = of_property_match_string(np, key, speed_mode); | ||
730 | |||
731 | out: | ||
732 | if (err < 0) | ||
733 | dev_err(dev, "%s: Invalid %s mode %d\n", | ||
734 | __func__, speed_mode, err); | ||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) | ||
739 | { | ||
740 | int err = 0; | ||
741 | |||
742 | if (vote != host->bus_vote.curr_vote) | ||
743 | host->bus_vote.curr_vote = vote; | ||
744 | |||
745 | return err; | ||
746 | } | ||
747 | |||
748 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) | ||
749 | { | ||
750 | int gear = max_t(u32, p->gear_rx, p->gear_tx); | ||
751 | int lanes = max_t(u32, p->lane_rx, p->lane_tx); | ||
752 | int pwr; | ||
753 | |||
754 | /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ | ||
755 | if (!gear) | ||
756 | gear = 1; | ||
757 | |||
758 | if (!lanes) | ||
759 | lanes = 1; | ||
760 | |||
761 | if (!p->pwr_rx && !p->pwr_tx) { | ||
762 | pwr = SLOWAUTO_MODE; | ||
763 | snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); | ||
764 | } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || | ||
765 | p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { | ||
766 | pwr = FAST_MODE; | ||
767 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", | ||
768 | p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); | ||
769 | } else { | ||
770 | pwr = SLOW_MODE; | ||
771 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", | ||
772 | "PWM", gear, lanes); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) | ||
777 | { | ||
778 | struct ufs_qcom_host *host = hba->priv; | ||
779 | int err = 0; | ||
780 | int vote = 0; | ||
781 | |||
782 | /* | ||
783 | * In case ufs_qcom_init() is not yet done, simply ignore. | ||
784 | * This ufs_qcom_setup_clocks() shall be called from | ||
785 | * ufs_qcom_init() after init is done. | ||
786 | */ | ||
787 | if (!host) | ||
788 | return 0; | ||
789 | |||
790 | if (on) { | ||
791 | err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); | ||
792 | if (err) | ||
793 | goto out; | ||
794 | |||
795 | err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); | ||
796 | if (err) { | ||
797 | dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n", | ||
798 | __func__, err); | ||
799 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
800 | goto out; | ||
801 | } | ||
802 | /* enable the device ref clock */ | ||
803 | ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy); | ||
804 | vote = host->bus_vote.saved_vote; | ||
805 | if (vote == host->bus_vote.min_bw_vote) | ||
806 | ufs_qcom_update_bus_bw_vote(host); | ||
807 | } else { | ||
808 | /* M-PHY RMMI interface clocks can be turned off */ | ||
809 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
810 | if (!ufs_qcom_is_link_active(hba)) { | ||
811 | /* turn off UFS local PHY ref_clk */ | ||
812 | ufs_qcom_phy_disable_ref_clk(host->generic_phy); | ||
813 | /* disable device ref_clk */ | ||
814 | ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); | ||
815 | } | ||
816 | vote = host->bus_vote.min_bw_vote; | ||
817 | } | ||
818 | |||
819 | err = ufs_qcom_set_bus_vote(host, vote); | ||
820 | if (err) | ||
821 | dev_err(hba->dev, "%s: set bus vote failed %d\n", | ||
822 | __func__, err); | ||
823 | |||
824 | out: | ||
825 | return err; | ||
826 | } | ||
827 | |||
828 | static ssize_t | ||
829 | show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
830 | char *buf) | ||
831 | { | ||
832 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
833 | struct ufs_qcom_host *host = hba->priv; | ||
834 | |||
835 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
836 | host->bus_vote.is_max_bw_needed); | ||
837 | } | ||
838 | |||
839 | static ssize_t | ||
840 | store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
841 | const char *buf, size_t count) | ||
842 | { | ||
843 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
844 | struct ufs_qcom_host *host = hba->priv; | ||
845 | uint32_t value; | ||
846 | |||
847 | if (!kstrtou32(buf, 0, &value)) { | ||
848 | host->bus_vote.is_max_bw_needed = !!value; | ||
849 | ufs_qcom_update_bus_bw_vote(host); | ||
850 | } | ||
851 | |||
852 | return count; | ||
853 | } | ||
854 | |||
855 | static int ufs_qcom_bus_register(struct ufs_qcom_host *host) | ||
856 | { | ||
857 | int err; | ||
858 | struct device *dev = host->hba->dev; | ||
859 | struct device_node *np = dev->of_node; | ||
860 | |||
861 | err = of_property_count_strings(np, "qcom,bus-vector-names"); | ||
862 | if (err < 0 ) { | ||
863 | dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", | ||
864 | __func__, err); | ||
865 | goto out; | ||
866 | } | ||
867 | |||
868 | /* cache the vote index for minimum and maximum bandwidth */ | ||
869 | host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); | ||
870 | host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); | ||
871 | |||
872 | host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; | ||
873 | host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; | ||
874 | sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); | ||
875 | host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; | ||
876 | host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; | ||
877 | err = device_create_file(dev, &host->bus_vote.max_bus_bw); | ||
878 | out: | ||
879 | return err; | ||
880 | } | ||
881 | |||
882 | #define ANDROID_BOOT_DEV_MAX 30 | ||
883 | static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; | ||
884 | static int get_android_boot_dev(char *str) | ||
885 | { | ||
886 | strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); | ||
887 | return 1; | ||
888 | } | ||
889 | __setup("androidboot.bootdevice=", get_android_boot_dev); | ||
890 | |||
891 | /** | ||
892 | * ufs_qcom_init - bind phy with controller | ||
893 | * @hba: host controller instance | ||
894 | * | ||
895 | * Binds PHY with controller and powers up PHY enabling clocks | ||
896 | * and regulators. | ||
897 | * | ||
898 | * Returns -EPROBE_DEFER if binding fails, returns negative error | ||
899 | * on phy power up failure and returns zero on success. | ||
900 | */ | ||
901 | static int ufs_qcom_init(struct ufs_hba *hba) | ||
902 | { | ||
903 | int err; | ||
904 | struct device *dev = hba->dev; | ||
905 | struct ufs_qcom_host *host; | ||
906 | |||
907 | if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) | ||
908 | return -ENODEV; | ||
909 | |||
910 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); | ||
911 | if (!host) { | ||
912 | err = -ENOMEM; | ||
913 | dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); | ||
914 | goto out; | ||
915 | } | ||
916 | |||
917 | host->hba = hba; | ||
918 | hba->priv = (void *)host; | ||
919 | |||
920 | host->generic_phy = devm_phy_get(dev, "ufsphy"); | ||
921 | |||
922 | if (IS_ERR(host->generic_phy)) { | ||
923 | err = PTR_ERR(host->generic_phy); | ||
924 | dev_err(dev, "%s: PHY get failed %d\n", __func__, err); | ||
925 | goto out; | ||
926 | } | ||
927 | |||
928 | err = ufs_qcom_bus_register(host); | ||
929 | if (err) | ||
930 | goto out_host_free; | ||
931 | |||
932 | phy_init(host->generic_phy); | ||
933 | err = phy_power_on(host->generic_phy); | ||
934 | if (err) | ||
935 | goto out_unregister_bus; | ||
936 | |||
937 | err = ufs_qcom_init_lane_clks(host); | ||
938 | if (err) | ||
939 | goto out_disable_phy; | ||
940 | |||
941 | ufs_qcom_advertise_quirks(hba); | ||
942 | |||
943 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING; | ||
944 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | ||
945 | |||
946 | ufs_qcom_setup_clocks(hba, true); | ||
947 | |||
948 | if (hba->dev->id < MAX_UFS_QCOM_HOSTS) | ||
949 | ufs_qcom_hosts[hba->dev->id] = host; | ||
950 | |||
951 | goto out; | ||
952 | |||
953 | out_disable_phy: | ||
954 | phy_power_off(host->generic_phy); | ||
955 | out_unregister_bus: | ||
956 | phy_exit(host->generic_phy); | ||
957 | out_host_free: | ||
958 | devm_kfree(dev, host); | ||
959 | hba->priv = NULL; | ||
960 | out: | ||
961 | return err; | ||
962 | } | ||
963 | |||
964 | static void ufs_qcom_exit(struct ufs_hba *hba) | ||
965 | { | ||
966 | struct ufs_qcom_host *host = hba->priv; | ||
967 | |||
968 | ufs_qcom_disable_lane_clks(host); | ||
969 | phy_power_off(host->generic_phy); | ||
970 | } | ||
971 | |||
972 | static | ||
973 | void ufs_qcom_clk_scale_notify(struct ufs_hba *hba) | ||
974 | { | ||
975 | struct ufs_qcom_host *host = hba->priv; | ||
976 | struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; | ||
977 | |||
978 | if (!dev_req_params) | ||
979 | return; | ||
980 | |||
981 | ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
982 | dev_req_params->pwr_rx, | ||
983 | dev_req_params->hs_rate); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations | ||
988 | * | ||
989 | * The variant operations configure the necessary controller and PHY | ||
990 | * handshake during initialization. | ||
991 | */ | ||
992 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { | ||
993 | .name = "qcom", | ||
994 | .init = ufs_qcom_init, | ||
995 | .exit = ufs_qcom_exit, | ||
996 | .clk_scale_notify = ufs_qcom_clk_scale_notify, | ||
997 | .setup_clocks = ufs_qcom_setup_clocks, | ||
998 | .hce_enable_notify = ufs_qcom_hce_enable_notify, | ||
999 | .link_startup_notify = ufs_qcom_link_startup_notify, | ||
1000 | .pwr_change_notify = ufs_qcom_pwr_change_notify, | ||
1001 | .suspend = ufs_qcom_suspend, | ||
1002 | .resume = ufs_qcom_resume, | ||
1003 | }; | ||
1004 | EXPORT_SYMBOL(ufs_hba_qcom_vops); | ||
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h new file mode 100644 index 000000000000..9a6febd007df --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef UFS_QCOM_H_ | ||
15 | #define UFS_QCOM_H_ | ||
16 | |||
17 | #define MAX_UFS_QCOM_HOSTS 1 | ||
18 | #define MAX_U32 (~(u32)0) | ||
19 | #define MPHY_TX_FSM_STATE 0x41 | ||
20 | #define TX_FSM_HIBERN8 0x1 | ||
21 | #define HBRN8_POLL_TOUT_MS 100 | ||
22 | #define DEFAULT_CLK_RATE_HZ 1000000 | ||
23 | #define BUS_VECTOR_NAME_LEN 32 | ||
24 | |||
25 | #define UFS_HW_VER_MAJOR_SHFT (28) | ||
26 | #define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT) | ||
27 | #define UFS_HW_VER_MINOR_SHFT (16) | ||
28 | #define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT) | ||
29 | #define UFS_HW_VER_STEP_SHFT (0) | ||
30 | #define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT) | ||
31 | |||
32 | /* vendor specific pre-defined parameters */ | ||
33 | #define SLOW 1 | ||
34 | #define FAST 2 | ||
35 | |||
36 | #define UFS_QCOM_LIMIT_NUM_LANES_RX 2 | ||
37 | #define UFS_QCOM_LIMIT_NUM_LANES_TX 2 | ||
38 | #define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2 | ||
39 | #define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2 | ||
40 | #define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4 | ||
41 | #define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4 | ||
42 | #define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE | ||
43 | #define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE | ||
44 | #define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE | ||
45 | #define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE | ||
46 | #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B | ||
47 | #define UFS_QCOM_LIMIT_DESIRED_MODE FAST | ||
48 | |||
49 | /* QCOM UFS host controller vendor specific registers */ | ||
50 | enum { | ||
51 | REG_UFS_SYS1CLK_1US = 0xC0, | ||
52 | REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, | ||
53 | REG_UFS_LOCAL_PORT_ID_REG = 0xC8, | ||
54 | REG_UFS_PA_ERR_CODE = 0xCC, | ||
55 | REG_UFS_RETRY_TIMER_REG = 0xD0, | ||
56 | REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8, | ||
57 | REG_UFS_CFG1 = 0xDC, | ||
58 | REG_UFS_CFG2 = 0xE0, | ||
59 | REG_UFS_HW_VERSION = 0xE4, | ||
60 | |||
61 | UFS_DBG_RD_REG_UAWM = 0x100, | ||
62 | UFS_DBG_RD_REG_UARM = 0x200, | ||
63 | UFS_DBG_RD_REG_TXUC = 0x300, | ||
64 | UFS_DBG_RD_REG_RXUC = 0x400, | ||
65 | UFS_DBG_RD_REG_DFC = 0x500, | ||
66 | UFS_DBG_RD_REG_TRLUT = 0x600, | ||
67 | UFS_DBG_RD_REG_TMRLUT = 0x700, | ||
68 | UFS_UFS_DBG_RD_REG_OCSC = 0x800, | ||
69 | |||
70 | UFS_UFS_DBG_RD_DESC_RAM = 0x1500, | ||
71 | UFS_UFS_DBG_RD_PRDT_RAM = 0x1700, | ||
72 | UFS_UFS_DBG_RD_RESP_RAM = 0x1800, | ||
73 | UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, | ||
74 | }; | ||
75 | |||
76 | /* bit definitions for REG_UFS_CFG2 register */ | ||
77 | #define UAWM_HW_CGC_EN (1 << 0) | ||
78 | #define UARM_HW_CGC_EN (1 << 1) | ||
79 | #define TXUC_HW_CGC_EN (1 << 2) | ||
80 | #define RXUC_HW_CGC_EN (1 << 3) | ||
81 | #define DFC_HW_CGC_EN (1 << 4) | ||
82 | #define TRLUT_HW_CGC_EN (1 << 5) | ||
83 | #define TMRLUT_HW_CGC_EN (1 << 6) | ||
84 | #define OCSC_HW_CGC_EN (1 << 7) | ||
85 | |||
86 | #define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ | ||
87 | TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ | ||
88 | DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ | ||
89 | TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) | ||
90 | |||
91 | /* bit offset */ | ||
92 | enum { | ||
93 | OFFSET_UFS_PHY_SOFT_RESET = 1, | ||
94 | OFFSET_CLK_NS_REG = 10, | ||
95 | }; | ||
96 | |||
97 | /* bit masks */ | ||
98 | enum { | ||
99 | MASK_UFS_PHY_SOFT_RESET = 0x2, | ||
100 | MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF, | ||
101 | MASK_CLK_NS_REG = 0xFFFC00, | ||
102 | }; | ||
103 | |||
104 | enum ufs_qcom_phy_init_type { | ||
105 | UFS_PHY_INIT_FULL, | ||
106 | UFS_PHY_INIT_CFG_RESTORE, | ||
107 | }; | ||
108 | |||
109 | static inline void | ||
110 | ufs_qcom_get_controller_revision(struct ufs_hba *hba, | ||
111 | u8 *major, u16 *minor, u16 *step) | ||
112 | { | ||
113 | u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); | ||
114 | |||
115 | *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT; | ||
116 | *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT; | ||
117 | *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT; | ||
118 | }; | ||
119 | |||
120 | static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) | ||
121 | { | ||
122 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
123 | 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
124 | |||
125 | /* | ||
126 | * Make sure assertion of ufs phy reset is written to | ||
127 | * register before returning | ||
128 | */ | ||
129 | mb(); | ||
130 | } | ||
131 | |||
132 | static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) | ||
133 | { | ||
134 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
135 | 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
136 | |||
137 | /* | ||
138 | * Make sure de-assertion of ufs phy reset is written to | ||
139 | * register before returning | ||
140 | */ | ||
141 | mb(); | ||
142 | } | ||
143 | |||
144 | struct ufs_qcom_bus_vote { | ||
145 | uint32_t client_handle; | ||
146 | uint32_t curr_vote; | ||
147 | int min_bw_vote; | ||
148 | int max_bw_vote; | ||
149 | int saved_vote; | ||
150 | bool is_max_bw_needed; | ||
151 | struct device_attribute max_bus_bw; | ||
152 | }; | ||
153 | |||
154 | struct ufs_qcom_host { | ||
155 | struct phy *generic_phy; | ||
156 | struct ufs_hba *hba; | ||
157 | struct ufs_qcom_bus_vote bus_vote; | ||
158 | struct ufs_pa_layer_attr dev_req_params; | ||
159 | struct clk *rx_l0_sync_clk; | ||
160 | struct clk *tx_l0_sync_clk; | ||
161 | struct clk *rx_l1_sync_clk; | ||
162 | struct clk *tx_l1_sync_clk; | ||
163 | bool is_lane_clks_enabled; | ||
164 | }; | ||
165 | |||
166 | #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) | ||
167 | #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) | ||
168 | #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) | ||
169 | |||
170 | #endif /* UFS_QCOM_H_ */ | ||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2e4614b9dddf..5d60a868830d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -4714,10 +4714,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |||
4714 | sdev_printk(KERN_WARNING, sdp, | 4714 | sdev_printk(KERN_WARNING, sdp, |
4715 | "START_STOP failed for power mode: %d, result %x\n", | 4715 | "START_STOP failed for power mode: %d, result %x\n", |
4716 | pwr_mode, ret); | 4716 | pwr_mode, ret); |
4717 | if (driver_byte(ret) & DRIVER_SENSE) { | 4717 | if (driver_byte(ret) & DRIVER_SENSE) |
4718 | scsi_show_sense_hdr(sdp, NULL, &sshdr); | 4718 | scsi_print_sense_hdr(sdp, NULL, &sshdr); |
4719 | scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq); | ||
4720 | } | ||
4721 | } | 4719 | } |
4722 | 4720 | ||
4723 | if (!ret) | 4721 | if (!ret) |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index c52bb5dfaedb..f164f24a4a55 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -950,6 +950,12 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
950 | u32 num_queues; | 950 | u32 num_queues; |
951 | struct scsi_host_template *hostt; | 951 | struct scsi_host_template *hostt; |
952 | 952 | ||
953 | if (!vdev->config->get) { | ||
954 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | ||
955 | __func__); | ||
956 | return -EINVAL; | ||
957 | } | ||
958 | |||
953 | /* We need to know how many queues before we allocate. */ | 959 | /* We need to know how many queues before we allocate. */ |
954 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; | 960 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; |
955 | 961 | ||
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index c0506de4f3b6..9e09da412b92 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -2143,22 +2143,22 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2143 | seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" | 2143 | seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" |
2144 | " dma_mode=%02x fast=%d", | 2144 | " dma_mode=%02x fast=%d", |
2145 | hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); | 2145 | hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); |
2146 | seq_printf(m, "\nsync_xfer[] = "); | 2146 | seq_puts(m, "\nsync_xfer[] = "); |
2147 | for (x = 0; x < 7; x++) | 2147 | for (x = 0; x < 7; x++) |
2148 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); | 2148 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); |
2149 | seq_printf(m, "\nsync_stat[] = "); | 2149 | seq_puts(m, "\nsync_stat[] = "); |
2150 | for (x = 0; x < 7; x++) | 2150 | for (x = 0; x < 7; x++) |
2151 | seq_printf(m, "\t%02x", hd->sync_stat[x]); | 2151 | seq_printf(m, "\t%02x", hd->sync_stat[x]); |
2152 | } | 2152 | } |
2153 | #ifdef PROC_STATISTICS | 2153 | #ifdef PROC_STATISTICS |
2154 | if (hd->proc & PR_STATISTICS) { | 2154 | if (hd->proc & PR_STATISTICS) { |
2155 | seq_printf(m, "\ncommands issued: "); | 2155 | seq_puts(m, "\ncommands issued: "); |
2156 | for (x = 0; x < 7; x++) | 2156 | for (x = 0; x < 7; x++) |
2157 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); | 2157 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); |
2158 | seq_printf(m, "\ndisconnects allowed:"); | 2158 | seq_puts(m, "\ndisconnects allowed:"); |
2159 | for (x = 0; x < 7; x++) | 2159 | for (x = 0; x < 7; x++) |
2160 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); | 2160 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); |
2161 | seq_printf(m, "\ndisconnects done: "); | 2161 | seq_puts(m, "\ndisconnects done: "); |
2162 | for (x = 0; x < 7; x++) | 2162 | for (x = 0; x < 7; x++) |
2163 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); | 2163 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); |
2164 | seq_printf(m, | 2164 | seq_printf(m, |
@@ -2167,7 +2167,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2167 | } | 2167 | } |
2168 | #endif | 2168 | #endif |
2169 | if (hd->proc & PR_CONNECTED) { | 2169 | if (hd->proc & PR_CONNECTED) { |
2170 | seq_printf(m, "\nconnected: "); | 2170 | seq_puts(m, "\nconnected: "); |
2171 | if (hd->connected) { | 2171 | if (hd->connected) { |
2172 | cmd = (struct scsi_cmnd *) hd->connected; | 2172 | cmd = (struct scsi_cmnd *) hd->connected; |
2173 | seq_printf(m, " %d:%llu(%02x)", | 2173 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2175,7 +2175,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2175 | } | 2175 | } |
2176 | } | 2176 | } |
2177 | if (hd->proc & PR_INPUTQ) { | 2177 | if (hd->proc & PR_INPUTQ) { |
2178 | seq_printf(m, "\ninput_Q: "); | 2178 | seq_puts(m, "\ninput_Q: "); |
2179 | cmd = (struct scsi_cmnd *) hd->input_Q; | 2179 | cmd = (struct scsi_cmnd *) hd->input_Q; |
2180 | while (cmd) { | 2180 | while (cmd) { |
2181 | seq_printf(m, " %d:%llu(%02x)", | 2181 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2184,7 +2184,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2184 | } | 2184 | } |
2185 | } | 2185 | } |
2186 | if (hd->proc & PR_DISCQ) { | 2186 | if (hd->proc & PR_DISCQ) { |
2187 | seq_printf(m, "\ndisconnected_Q:"); | 2187 | seq_puts(m, "\ndisconnected_Q:"); |
2188 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; | 2188 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; |
2189 | while (cmd) { | 2189 | while (cmd) { |
2190 | seq_printf(m, " %d:%llu(%02x)", | 2190 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2192,7 +2192,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2192 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 2192 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
2193 | } | 2193 | } |
2194 | } | 2194 | } |
2195 | seq_printf(m, "\n"); | 2195 | seq_putc(m, '\n'); |
2196 | spin_unlock_irq(&hd->lock); | 2196 | spin_unlock_irq(&hd->lock); |
2197 | #endif /* PROC_INTERFACE */ | 2197 | #endif /* PROC_INTERFACE */ |
2198 | return 0; | 2198 | return 0; |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index f94d73611ab4..0c0f17b9a3eb 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1295,9 +1295,6 @@ static void wd7000_revision(Adapter * host) | |||
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | 1297 | ||
1298 | #undef SPRINTF | ||
1299 | #define SPRINTF(args...) { seq_printf(m, ## args); } | ||
1300 | |||
1301 | static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) | 1298 | static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) |
1302 | { | 1299 | { |
1303 | dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length); | 1300 | dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length); |
@@ -1320,43 +1317,43 @@ static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1320 | #endif | 1317 | #endif |
1321 | 1318 | ||
1322 | spin_lock_irqsave(host->host_lock, flags); | 1319 | spin_lock_irqsave(host->host_lock, flags); |
1323 | SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); | 1320 | seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); |
1324 | SPRINTF(" IO base: 0x%x\n", adapter->iobase); | 1321 | seq_printf(m, " IO base: 0x%x\n", adapter->iobase); |
1325 | SPRINTF(" IRQ: %d\n", adapter->irq); | 1322 | seq_printf(m, " IRQ: %d\n", adapter->irq); |
1326 | SPRINTF(" DMA channel: %d\n", adapter->dma); | 1323 | seq_printf(m, " DMA channel: %d\n", adapter->dma); |
1327 | SPRINTF(" Interrupts: %d\n", adapter->int_counter); | 1324 | seq_printf(m, " Interrupts: %d\n", adapter->int_counter); |
1328 | SPRINTF(" BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125); | 1325 | seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125); |
1329 | SPRINTF(" BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125); | 1326 | seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125); |
1330 | 1327 | ||
1331 | #ifdef WD7000_DEBUG | 1328 | #ifdef WD7000_DEBUG |
1332 | ogmbs = adapter->mb.ogmb; | 1329 | ogmbs = adapter->mb.ogmb; |
1333 | icmbs = adapter->mb.icmb; | 1330 | icmbs = adapter->mb.icmb; |
1334 | 1331 | ||
1335 | SPRINTF("\nControl port value: 0x%x\n", adapter->control); | 1332 | seq_printf(m, "\nControl port value: 0x%x\n", adapter->control); |
1336 | SPRINTF("Incoming mailbox:\n"); | 1333 | seq_puts(m, "Incoming mailbox:\n"); |
1337 | SPRINTF(" size: %d\n", ICMB_CNT); | 1334 | seq_printf(m, " size: %d\n", ICMB_CNT); |
1338 | SPRINTF(" queued messages: "); | 1335 | seq_puts(m, " queued messages: "); |
1339 | 1336 | ||
1340 | for (i = count = 0; i < ICMB_CNT; i++) | 1337 | for (i = count = 0; i < ICMB_CNT; i++) |
1341 | if (icmbs[i].status) { | 1338 | if (icmbs[i].status) { |
1342 | count++; | 1339 | count++; |
1343 | SPRINTF("0x%x ", i); | 1340 | seq_printf(m, "0x%x ", i); |
1344 | } | 1341 | } |
1345 | 1342 | ||
1346 | SPRINTF(count ? "\n" : "none\n"); | 1343 | seq_puts(m, count ? "\n" : "none\n"); |
1347 | 1344 | ||
1348 | SPRINTF("Outgoing mailbox:\n"); | 1345 | seq_puts(m, "Outgoing mailbox:\n"); |
1349 | SPRINTF(" size: %d\n", OGMB_CNT); | 1346 | seq_printf(m, " size: %d\n", OGMB_CNT); |
1350 | SPRINTF(" next message: 0x%x\n", adapter->next_ogmb); | 1347 | seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb); |
1351 | SPRINTF(" queued messages: "); | 1348 | seq_puts(m, " queued messages: "); |
1352 | 1349 | ||
1353 | for (i = count = 0; i < OGMB_CNT; i++) | 1350 | for (i = count = 0; i < OGMB_CNT; i++) |
1354 | if (ogmbs[i].status) { | 1351 | if (ogmbs[i].status) { |
1355 | count++; | 1352 | count++; |
1356 | SPRINTF("0x%x ", i); | 1353 | seq_printf(m, "0x%x ", i); |
1357 | } | 1354 | } |
1358 | 1355 | ||
1359 | SPRINTF(count ? "\n" : "none\n"); | 1356 | seq_puts(m, count ? "\n" : "none\n"); |
1360 | #endif | 1357 | #endif |
1361 | 1358 | ||
1362 | spin_unlock_irqrestore(host->host_lock, flags); | 1359 | spin_unlock_irqrestore(host->host_lock, flags); |
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c index 7702664d7ed3..289ad016d925 100644 --- a/drivers/scsi/wd719x.c +++ b/drivers/scsi/wd719x.c | |||
@@ -870,6 +870,7 @@ fail_free_params: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static struct scsi_host_template wd719x_template = { | 872 | static struct scsi_host_template wd719x_template = { |
873 | .module = THIS_MODULE, | ||
873 | .name = "Western Digital 719x", | 874 | .name = "Western Digital 719x", |
874 | .queuecommand = wd719x_queuecommand, | 875 | .queuecommand = wd719x_queuecommand, |
875 | .eh_abort_handler = wd719x_abort, | 876 | .eh_abort_handler = wd719x_abort, |