diff options
Diffstat (limited to 'drivers/scsi')
209 files changed, 24904 insertions, 5496 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 36c21b19e5d7..3bf75924741f 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
| @@ -186,8 +186,12 @@ static ssize_t twa_show_stats(struct device *dev, | |||
| 186 | } /* End twa_show_stats() */ | 186 | } /* End twa_show_stats() */ |
| 187 | 187 | ||
| 188 | /* This function will set a devices queue depth */ | 188 | /* This function will set a devices queue depth */ |
| 189 | static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth) | 189 | static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth, |
| 190 | int reason) | ||
| 190 | { | 191 | { |
| 192 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 193 | return -EOPNOTSUPP; | ||
| 194 | |||
| 191 | if (queue_depth > TW_Q_LENGTH-2) | 195 | if (queue_depth > TW_Q_LENGTH-2) |
| 192 | queue_depth = TW_Q_LENGTH-2; | 196 | queue_depth = TW_Q_LENGTH-2; |
| 193 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | 197 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
| @@ -732,7 +736,7 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int | |||
| 732 | break; | 736 | break; |
| 733 | case TW_IOCTL_GET_COMPATIBILITY_INFO: | 737 | case TW_IOCTL_GET_COMPATIBILITY_INFO: |
| 734 | tw_ioctl->driver_command.status = 0; | 738 | tw_ioctl->driver_command.status = 0; |
| 735 | /* Copy compatiblity struct into ioctl data buffer */ | 739 | /* Copy compatibility struct into ioctl data buffer */ |
| 736 | tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; | 740 | tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; |
| 737 | memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); | 741 | memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); |
| 738 | break; | 742 | break; |
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c new file mode 100644 index 000000000000..4d314d740de4 --- /dev/null +++ b/drivers/scsi/3w-sas.c | |||
| @@ -0,0 +1,1924 @@ | |||
| 1 | /* | ||
| 2 | 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. | ||
| 3 | |||
| 4 | Written By: Adam Radford <linuxraid@lsi.com> | ||
| 5 | |||
| 6 | Copyright (C) 2009 LSI Corporation. | ||
| 7 | |||
| 8 | This program is free software; you can redistribute it and/or modify | ||
| 9 | it under the terms of the GNU General Public License as published by | ||
| 10 | the Free Software Foundation; version 2 of the License. | ||
| 11 | |||
| 12 | This program is distributed in the hope that it will be useful, | ||
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | GNU General Public License for more details. | ||
| 16 | |||
| 17 | NO WARRANTY | ||
| 18 | THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 19 | CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
| 20 | LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
| 21 | MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
| 22 | solely responsible for determining the appropriateness of using and | ||
| 23 | distributing the Program and assumes all risks associated with its | ||
| 24 | exercise of rights under this Agreement, including but not limited to | ||
| 25 | the risks and costs of program errors, damage to or loss of data, | ||
| 26 | programs or equipment, and unavailability or interruption of operations. | ||
| 27 | |||
| 28 | DISCLAIMER OF LIABILITY | ||
| 29 | NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
| 30 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 31 | DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
| 32 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
| 33 | TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
| 34 | USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
| 35 | HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
| 36 | |||
| 37 | You should have received a copy of the GNU General Public License | ||
| 38 | along with this program; if not, write to the Free Software | ||
| 39 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 40 | |||
| 41 | Controllers supported by this driver: | ||
| 42 | |||
| 43 | LSI 3ware 9750 6Gb/s SAS/SATA-RAID | ||
| 44 | |||
| 45 | Bugs/Comments/Suggestions should be mailed to: | ||
| 46 | linuxraid@lsi.com | ||
| 47 | |||
| 48 | For more information, goto: | ||
| 49 | http://www.lsi.com | ||
| 50 | |||
| 51 | History | ||
| 52 | ------- | ||
| 53 | 3.26.02.000 - Initial driver release. | ||
| 54 | */ | ||
| 55 | |||
| 56 | #include <linux/module.h> | ||
| 57 | #include <linux/reboot.h> | ||
| 58 | #include <linux/spinlock.h> | ||
| 59 | #include <linux/interrupt.h> | ||
| 60 | #include <linux/moduleparam.h> | ||
| 61 | #include <linux/errno.h> | ||
| 62 | #include <linux/types.h> | ||
| 63 | #include <linux/delay.h> | ||
| 64 | #include <linux/pci.h> | ||
| 65 | #include <linux/time.h> | ||
| 66 | #include <linux/mutex.h> | ||
| 67 | #include <linux/smp_lock.h> | ||
| 68 | #include <asm/io.h> | ||
| 69 | #include <asm/irq.h> | ||
| 70 | #include <asm/uaccess.h> | ||
| 71 | #include <scsi/scsi.h> | ||
| 72 | #include <scsi/scsi_host.h> | ||
| 73 | #include <scsi/scsi_tcq.h> | ||
| 74 | #include <scsi/scsi_cmnd.h> | ||
| 75 | #include "3w-sas.h" | ||
| 76 | |||
| 77 | /* Globals */ | ||
| 78 | #define TW_DRIVER_VERSION "3.26.02.000" | ||
| 79 | static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT]; | ||
| 80 | static unsigned int twl_device_extension_count; | ||
| 81 | static int twl_major = -1; | ||
| 82 | extern struct timezone sys_tz; | ||
| 83 | |||
| 84 | /* Module parameters */ | ||
| 85 | MODULE_AUTHOR ("LSI"); | ||
| 86 | MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver"); | ||
| 87 | MODULE_LICENSE("GPL"); | ||
| 88 | MODULE_VERSION(TW_DRIVER_VERSION); | ||
| 89 | |||
| 90 | static int use_msi; | ||
| 91 | module_param(use_msi, int, S_IRUGO); | ||
| 92 | MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); | ||
| 93 | |||
| 94 | /* Function prototypes */ | ||
| 95 | static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset); | ||
| 96 | |||
| 97 | /* Functions */ | ||
| 98 | |||
| 99 | /* This function returns AENs through sysfs */ | ||
| 100 | static ssize_t twl_sysfs_aen_read(struct kobject *kobj, | ||
| 101 | struct bin_attribute *bin_attr, | ||
| 102 | char *outbuf, loff_t offset, size_t count) | ||
| 103 | { | ||
| 104 | struct device *dev = container_of(kobj, struct device, kobj); | ||
| 105 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 106 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; | ||
| 107 | unsigned long flags = 0; | ||
| 108 | ssize_t ret; | ||
| 109 | |||
| 110 | if (!capable(CAP_SYS_ADMIN)) | ||
| 111 | return -EACCES; | ||
| 112 | |||
| 113 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 114 | ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH); | ||
| 115 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 116 | |||
| 117 | return ret; | ||
| 118 | } /* End twl_sysfs_aen_read() */ | ||
| 119 | |||
| 120 | /* aen_read sysfs attribute initializer */ | ||
| 121 | static struct bin_attribute twl_sysfs_aen_read_attr = { | ||
| 122 | .attr = { | ||
| 123 | .name = "3ware_aen_read", | ||
| 124 | .mode = S_IRUSR, | ||
| 125 | }, | ||
| 126 | .size = 0, | ||
| 127 | .read = twl_sysfs_aen_read | ||
| 128 | }; | ||
| 129 | |||
| 130 | /* This function returns driver compatibility info through sysfs */ | ||
| 131 | static ssize_t twl_sysfs_compat_info(struct kobject *kobj, | ||
| 132 | struct bin_attribute *bin_attr, | ||
| 133 | char *outbuf, loff_t offset, size_t count) | ||
| 134 | { | ||
| 135 | struct device *dev = container_of(kobj, struct device, kobj); | ||
| 136 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 137 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; | ||
| 138 | unsigned long flags = 0; | ||
| 139 | ssize_t ret; | ||
| 140 | |||
| 141 | if (!capable(CAP_SYS_ADMIN)) | ||
| 142 | return -EACCES; | ||
| 143 | |||
| 144 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 145 | ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); | ||
| 146 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 147 | |||
| 148 | return ret; | ||
| 149 | } /* End twl_sysfs_compat_info() */ | ||
| 150 | |||
| 151 | /* compat_info sysfs attribute initializer */ | ||
| 152 | static struct bin_attribute twl_sysfs_compat_info_attr = { | ||
| 153 | .attr = { | ||
| 154 | .name = "3ware_compat_info", | ||
| 155 | .mode = S_IRUSR, | ||
| 156 | }, | ||
| 157 | .size = 0, | ||
| 158 | .read = twl_sysfs_compat_info | ||
| 159 | }; | ||
| 160 | |||
| 161 | /* Show some statistics about the card */ | ||
| 162 | static ssize_t twl_show_stats(struct device *dev, | ||
| 163 | struct device_attribute *attr, char *buf) | ||
| 164 | { | ||
| 165 | struct Scsi_Host *host = class_to_shost(dev); | ||
| 166 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; | ||
| 167 | unsigned long flags = 0; | ||
| 168 | ssize_t len; | ||
| 169 | |||
| 170 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 171 | len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n" | ||
| 172 | "Current commands posted: %4d\n" | ||
| 173 | "Max commands posted: %4d\n" | ||
| 174 | "Last sgl length: %4d\n" | ||
| 175 | "Max sgl length: %4d\n" | ||
| 176 | "Last sector count: %4d\n" | ||
| 177 | "Max sector count: %4d\n" | ||
| 178 | "SCSI Host Resets: %4d\n" | ||
| 179 | "AEN's: %4d\n", | ||
| 180 | TW_DRIVER_VERSION, | ||
| 181 | tw_dev->posted_request_count, | ||
| 182 | tw_dev->max_posted_request_count, | ||
| 183 | tw_dev->sgl_entries, | ||
| 184 | tw_dev->max_sgl_entries, | ||
| 185 | tw_dev->sector_count, | ||
| 186 | tw_dev->max_sector_count, | ||
| 187 | tw_dev->num_resets, | ||
| 188 | tw_dev->aen_count); | ||
| 189 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 190 | return len; | ||
| 191 | } /* End twl_show_stats() */ | ||
| 192 | |||
| 193 | /* This function will set a devices queue depth */ | ||
| 194 | static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth, | ||
| 195 | int reason) | ||
| 196 | { | ||
| 197 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 198 | return -EOPNOTSUPP; | ||
| 199 | |||
| 200 | if (queue_depth > TW_Q_LENGTH-2) | ||
| 201 | queue_depth = TW_Q_LENGTH-2; | ||
| 202 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | ||
| 203 | return queue_depth; | ||
| 204 | } /* End twl_change_queue_depth() */ | ||
| 205 | |||
| 206 | /* stats sysfs attribute initializer */ | ||
| 207 | static struct device_attribute twl_host_stats_attr = { | ||
| 208 | .attr = { | ||
| 209 | .name = "3ware_stats", | ||
| 210 | .mode = S_IRUGO, | ||
| 211 | }, | ||
| 212 | .show = twl_show_stats | ||
| 213 | }; | ||
| 214 | |||
| 215 | /* Host attributes initializer */ | ||
| 216 | static struct device_attribute *twl_host_attrs[] = { | ||
| 217 | &twl_host_stats_attr, | ||
| 218 | NULL, | ||
| 219 | }; | ||
| 220 | |||
| 221 | /* This function will look up an AEN severity string */ | ||
| 222 | static char *twl_aen_severity_lookup(unsigned char severity_code) | ||
| 223 | { | ||
| 224 | char *retval = NULL; | ||
| 225 | |||
| 226 | if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || | ||
| 227 | (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) | ||
| 228 | goto out; | ||
| 229 | |||
| 230 | retval = twl_aen_severity_table[severity_code]; | ||
| 231 | out: | ||
| 232 | return retval; | ||
| 233 | } /* End twl_aen_severity_lookup() */ | ||
| 234 | |||
| 235 | /* This function will queue an event */ | ||
| 236 | static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) | ||
| 237 | { | ||
| 238 | u32 local_time; | ||
| 239 | struct timeval time; | ||
| 240 | TW_Event *event; | ||
| 241 | unsigned short aen; | ||
| 242 | char host[16]; | ||
| 243 | char *error_str; | ||
| 244 | |||
| 245 | tw_dev->aen_count++; | ||
| 246 | |||
| 247 | /* Fill out event info */ | ||
| 248 | event = tw_dev->event_queue[tw_dev->error_index]; | ||
| 249 | |||
| 250 | host[0] = '\0'; | ||
| 251 | if (tw_dev->host) | ||
| 252 | sprintf(host, " scsi%d:", tw_dev->host->host_no); | ||
| 253 | |||
| 254 | aen = le16_to_cpu(header->status_block.error); | ||
| 255 | memset(event, 0, sizeof(TW_Event)); | ||
| 256 | |||
| 257 | event->severity = TW_SEV_OUT(header->status_block.severity__reserved); | ||
| 258 | do_gettimeofday(&time); | ||
| 259 | local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); | ||
| 260 | event->time_stamp_sec = local_time; | ||
| 261 | event->aen_code = aen; | ||
| 262 | event->retrieved = TW_AEN_NOT_RETRIEVED; | ||
| 263 | event->sequence_id = tw_dev->error_sequence_id; | ||
| 264 | tw_dev->error_sequence_id++; | ||
| 265 | |||
| 266 | /* Check for embedded error string */ | ||
| 267 | error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); | ||
| 268 | |||
| 269 | header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; | ||
| 270 | event->parameter_len = strlen(header->err_specific_desc); | ||
| 271 | memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str)); | ||
| 272 | if (event->severity != TW_AEN_SEVERITY_DEBUG) | ||
| 273 | printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", | ||
| 274 | host, | ||
| 275 | twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), | ||
| 276 | TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str, | ||
| 277 | header->err_specific_desc); | ||
| 278 | else | ||
| 279 | tw_dev->aen_count--; | ||
| 280 | |||
| 281 | tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; | ||
| 282 | } /* End twl_aen_queue_event() */ | ||
| 283 | |||
| 284 | /* This function will attempt to post a command packet to the board */ | ||
| 285 | static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) | ||
| 286 | { | ||
| 287 | dma_addr_t command_que_value; | ||
| 288 | |||
| 289 | command_que_value = tw_dev->command_packet_phys[request_id]; | ||
| 290 | command_que_value += TW_COMMAND_OFFSET; | ||
| 291 | |||
| 292 | /* First write upper 4 bytes */ | ||
| 293 | writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev)); | ||
| 294 | /* Then the lower 4 bytes */ | ||
| 295 | writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev)); | ||
| 296 | |||
| 297 | tw_dev->state[request_id] = TW_S_POSTED; | ||
| 298 | tw_dev->posted_request_count++; | ||
| 299 | if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) | ||
| 300 | tw_dev->max_posted_request_count = tw_dev->posted_request_count; | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | } /* End twl_post_command_packet() */ | ||
| 304 | |||
| 305 | /* This function will perform a pci-dma mapping for a scatter gather list */ | ||
| 306 | static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) | ||
| 307 | { | ||
| 308 | int use_sg; | ||
| 309 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
| 310 | |||
| 311 | use_sg = scsi_dma_map(cmd); | ||
| 312 | if (!use_sg) | ||
| 313 | return 0; | ||
| 314 | else if (use_sg < 0) { | ||
| 315 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); | ||
| 316 | return 0; | ||
| 317 | } | ||
| 318 | |||
| 319 | cmd->SCp.phase = TW_PHASE_SGLIST; | ||
| 320 | cmd->SCp.have_data_in = use_sg; | ||
| 321 | |||
| 322 | return use_sg; | ||
| 323 | } /* End twl_map_scsi_sg_data() */ | ||
| 324 | |||
| 325 | /* This function hands scsi cdb's to the firmware */ | ||
| 326 | static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) | ||
| 327 | { | ||
| 328 | TW_Command_Full *full_command_packet; | ||
| 329 | TW_Command_Apache *command_packet; | ||
| 330 | int i, sg_count; | ||
| 331 | struct scsi_cmnd *srb = NULL; | ||
| 332 | struct scatterlist *sglist = NULL, *sg; | ||
| 333 | int retval = 1; | ||
| 334 | |||
| 335 | if (tw_dev->srb[request_id]) { | ||
| 336 | srb = tw_dev->srb[request_id]; | ||
| 337 | if (scsi_sglist(srb)) | ||
| 338 | sglist = scsi_sglist(srb); | ||
| 339 | } | ||
| 340 | |||
| 341 | /* Initialize command packet */ | ||
| 342 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 343 | full_command_packet->header.header_desc.size_header = 128; | ||
| 344 | full_command_packet->header.status_block.error = 0; | ||
| 345 | full_command_packet->header.status_block.severity__reserved = 0; | ||
| 346 | |||
| 347 | command_packet = &full_command_packet->command.newcommand; | ||
| 348 | command_packet->status = 0; | ||
| 349 | command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); | ||
| 350 | |||
| 351 | /* We forced 16 byte cdb use earlier */ | ||
| 352 | if (!cdb) | ||
| 353 | memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); | ||
| 354 | else | ||
| 355 | memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); | ||
| 356 | |||
| 357 | if (srb) { | ||
| 358 | command_packet->unit = srb->device->id; | ||
| 359 | command_packet->request_id__lunl = | ||
| 360 | cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); | ||
| 361 | } else { | ||
| 362 | command_packet->request_id__lunl = | ||
| 363 | cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); | ||
| 364 | command_packet->unit = 0; | ||
| 365 | } | ||
| 366 | |||
| 367 | command_packet->sgl_offset = 16; | ||
| 368 | |||
| 369 | if (!sglistarg) { | ||
| 370 | /* Map sglist from scsi layer to cmd packet */ | ||
| 371 | if (scsi_sg_count(srb)) { | ||
| 372 | sg_count = twl_map_scsi_sg_data(tw_dev, request_id); | ||
| 373 | if (sg_count == 0) | ||
| 374 | goto out; | ||
| 375 | |||
| 376 | scsi_for_each_sg(srb, sg, sg_count, i) { | ||
| 377 | command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); | ||
| 378 | command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg)); | ||
| 379 | } | ||
| 380 | command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); | ||
| 381 | } | ||
| 382 | } else { | ||
| 383 | /* Internal cdb post */ | ||
| 384 | for (i = 0; i < use_sg; i++) { | ||
| 385 | command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); | ||
| 386 | command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length); | ||
| 387 | } | ||
| 388 | command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); | ||
| 389 | } | ||
| 390 | |||
| 391 | /* Update some stats */ | ||
| 392 | if (srb) { | ||
| 393 | tw_dev->sector_count = scsi_bufflen(srb) / 512; | ||
| 394 | if (tw_dev->sector_count > tw_dev->max_sector_count) | ||
| 395 | tw_dev->max_sector_count = tw_dev->sector_count; | ||
| 396 | tw_dev->sgl_entries = scsi_sg_count(srb); | ||
| 397 | if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) | ||
| 398 | tw_dev->max_sgl_entries = tw_dev->sgl_entries; | ||
| 399 | } | ||
| 400 | |||
| 401 | /* Now post the command to the board */ | ||
| 402 | retval = twl_post_command_packet(tw_dev, request_id); | ||
| 403 | |||
| 404 | out: | ||
| 405 | return retval; | ||
| 406 | } /* End twl_scsiop_execute_scsi() */ | ||
| 407 | |||
| 408 | /* This function will read the aen queue from the isr */ | ||
| 409 | static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) | ||
| 410 | { | ||
| 411 | char cdb[TW_MAX_CDB_LEN]; | ||
| 412 | TW_SG_Entry_ISO sglist[1]; | ||
| 413 | TW_Command_Full *full_command_packet; | ||
| 414 | int retval = 1; | ||
| 415 | |||
| 416 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 417 | memset(full_command_packet, 0, sizeof(TW_Command_Full)); | ||
| 418 | |||
| 419 | /* Initialize cdb */ | ||
| 420 | memset(&cdb, 0, TW_MAX_CDB_LEN); | ||
| 421 | cdb[0] = REQUEST_SENSE; /* opcode */ | ||
| 422 | cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ | ||
| 423 | |||
| 424 | /* Initialize sglist */ | ||
| 425 | memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); | ||
| 426 | sglist[0].length = TW_SECTOR_SIZE; | ||
| 427 | sglist[0].address = tw_dev->generic_buffer_phys[request_id]; | ||
| 428 | |||
| 429 | /* Mark internal command */ | ||
| 430 | tw_dev->srb[request_id] = NULL; | ||
| 431 | |||
| 432 | /* Now post the command packet */ | ||
| 433 | if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { | ||
| 434 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue"); | ||
| 435 | goto out; | ||
| 436 | } | ||
| 437 | retval = 0; | ||
| 438 | out: | ||
| 439 | return retval; | ||
| 440 | } /* End twl_aen_read_queue() */ | ||
| 441 | |||
| 442 | /* This function will sync firmware time with the host time */ | ||
| 443 | static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) | ||
| 444 | { | ||
| 445 | u32 schedulertime; | ||
| 446 | struct timeval utc; | ||
| 447 | TW_Command_Full *full_command_packet; | ||
| 448 | TW_Command *command_packet; | ||
| 449 | TW_Param_Apache *param; | ||
| 450 | u32 local_time; | ||
| 451 | |||
| 452 | /* Fill out the command packet */ | ||
| 453 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 454 | memset(full_command_packet, 0, sizeof(TW_Command_Full)); | ||
| 455 | command_packet = &full_command_packet->command.oldcommand; | ||
| 456 | command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); | ||
| 457 | command_packet->request_id = request_id; | ||
| 458 | command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); | ||
| 459 | command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); | ||
| 460 | command_packet->size = TW_COMMAND_SIZE; | ||
| 461 | command_packet->byte6_offset.parameter_count = cpu_to_le16(1); | ||
| 462 | |||
| 463 | /* Setup the param */ | ||
| 464 | param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; | ||
| 465 | memset(param, 0, TW_SECTOR_SIZE); | ||
| 466 | param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ | ||
| 467 | param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ | ||
| 468 | param->parameter_size_bytes = cpu_to_le16(4); | ||
| 469 | |||
| 470 | /* Convert system time in UTC to local time seconds since last | ||
| 471 | Sunday 12:00AM */ | ||
| 472 | do_gettimeofday(&utc); | ||
| 473 | local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); | ||
| 474 | schedulertime = local_time - (3 * 86400); | ||
| 475 | schedulertime = cpu_to_le32(schedulertime % 604800); | ||
| 476 | |||
| 477 | memcpy(param->data, &schedulertime, sizeof(u32)); | ||
| 478 | |||
| 479 | /* Mark internal command */ | ||
| 480 | tw_dev->srb[request_id] = NULL; | ||
| 481 | |||
| 482 | /* Now post the command */ | ||
| 483 | twl_post_command_packet(tw_dev, request_id); | ||
| 484 | } /* End twl_aen_sync_time() */ | ||
| 485 | |||
| 486 | /* This function will assign an available request id */ | ||
| 487 | static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id) | ||
| 488 | { | ||
| 489 | *request_id = tw_dev->free_queue[tw_dev->free_head]; | ||
| 490 | tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; | ||
| 491 | tw_dev->state[*request_id] = TW_S_STARTED; | ||
| 492 | } /* End twl_get_request_id() */ | ||
| 493 | |||
| 494 | /* This function will free a request id */ | ||
| 495 | static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id) | ||
| 496 | { | ||
| 497 | tw_dev->free_queue[tw_dev->free_tail] = request_id; | ||
| 498 | tw_dev->state[request_id] = TW_S_FINISHED; | ||
| 499 | tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; | ||
| 500 | } /* End twl_free_request_id() */ | ||
| 501 | |||
| 502 | /* This function will complete an aen request from the isr */ | ||
| 503 | static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id) | ||
| 504 | { | ||
| 505 | TW_Command_Full *full_command_packet; | ||
| 506 | TW_Command *command_packet; | ||
| 507 | TW_Command_Apache_Header *header; | ||
| 508 | unsigned short aen; | ||
| 509 | int retval = 1; | ||
| 510 | |||
| 511 | header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; | ||
| 512 | tw_dev->posted_request_count--; | ||
| 513 | aen = le16_to_cpu(header->status_block.error); | ||
| 514 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 515 | command_packet = &full_command_packet->command.oldcommand; | ||
| 516 | |||
| 517 | /* First check for internal completion of set param for time sync */ | ||
| 518 | if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { | ||
| 519 | /* Keep reading the queue in case there are more aen's */ | ||
| 520 | if (twl_aen_read_queue(tw_dev, request_id)) | ||
| 521 | goto out2; | ||
| 522 | else { | ||
| 523 | retval = 0; | ||
| 524 | goto out; | ||
| 525 | } | ||
| 526 | } | ||
| 527 | |||
| 528 | switch (aen) { | ||
| 529 | case TW_AEN_QUEUE_EMPTY: | ||
| 530 | /* Quit reading the queue if this is the last one */ | ||
| 531 | break; | ||
| 532 | case TW_AEN_SYNC_TIME_WITH_HOST: | ||
| 533 | twl_aen_sync_time(tw_dev, request_id); | ||
| 534 | retval = 0; | ||
| 535 | goto out; | ||
| 536 | default: | ||
| 537 | twl_aen_queue_event(tw_dev, header); | ||
| 538 | |||
| 539 | /* If there are more aen's, keep reading the queue */ | ||
| 540 | if (twl_aen_read_queue(tw_dev, request_id)) | ||
| 541 | goto out2; | ||
| 542 | else { | ||
| 543 | retval = 0; | ||
| 544 | goto out; | ||
| 545 | } | ||
| 546 | } | ||
| 547 | retval = 0; | ||
| 548 | out2: | ||
| 549 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
| 550 | twl_free_request_id(tw_dev, request_id); | ||
| 551 | clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); | ||
| 552 | out: | ||
| 553 | return retval; | ||
| 554 | } /* End twl_aen_complete() */ | ||
| 555 | |||
| 556 | /* This function will poll for a response */ | ||
| 557 | static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) | ||
| 558 | { | ||
| 559 | unsigned long before; | ||
| 560 | dma_addr_t mfa; | ||
| 561 | u32 regh, regl; | ||
| 562 | u32 response; | ||
| 563 | int retval = 1; | ||
| 564 | int found = 0; | ||
| 565 | |||
| 566 | before = jiffies; | ||
| 567 | |||
| 568 | while (!found) { | ||
| 569 | if (sizeof(dma_addr_t) > 4) { | ||
| 570 | regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); | ||
| 571 | regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 572 | mfa = ((u64)regh << 32) | regl; | ||
| 573 | } else | ||
| 574 | mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 575 | |||
| 576 | response = (u32)mfa; | ||
| 577 | |||
| 578 | if (TW_RESID_OUT(response) == request_id) | ||
| 579 | found = 1; | ||
| 580 | |||
| 581 | if (time_after(jiffies, before + HZ * seconds)) | ||
| 582 | goto out; | ||
| 583 | |||
| 584 | msleep(50); | ||
| 585 | } | ||
| 586 | retval = 0; | ||
| 587 | out: | ||
| 588 | return retval; | ||
| 589 | } /* End twl_poll_response() */ | ||
| 590 | |||
| 591 | /* This function will drain the aen queue */ | ||
| 592 | static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) | ||
| 593 | { | ||
| 594 | int request_id = 0; | ||
| 595 | char cdb[TW_MAX_CDB_LEN]; | ||
| 596 | TW_SG_Entry_ISO sglist[1]; | ||
| 597 | int finished = 0, count = 0; | ||
| 598 | TW_Command_Full *full_command_packet; | ||
| 599 | TW_Command_Apache_Header *header; | ||
| 600 | unsigned short aen; | ||
| 601 | int first_reset = 0, queue = 0, retval = 1; | ||
| 602 | |||
| 603 | if (no_check_reset) | ||
| 604 | first_reset = 0; | ||
| 605 | else | ||
| 606 | first_reset = 1; | ||
| 607 | |||
| 608 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 609 | memset(full_command_packet, 0, sizeof(TW_Command_Full)); | ||
| 610 | |||
| 611 | /* Initialize cdb */ | ||
| 612 | memset(&cdb, 0, TW_MAX_CDB_LEN); | ||
| 613 | cdb[0] = REQUEST_SENSE; /* opcode */ | ||
| 614 | cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ | ||
| 615 | |||
| 616 | /* Initialize sglist */ | ||
| 617 | memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); | ||
| 618 | sglist[0].length = TW_SECTOR_SIZE; | ||
| 619 | sglist[0].address = tw_dev->generic_buffer_phys[request_id]; | ||
| 620 | |||
| 621 | /* Mark internal command */ | ||
| 622 | tw_dev->srb[request_id] = NULL; | ||
| 623 | |||
| 624 | do { | ||
| 625 | /* Send command to the board */ | ||
| 626 | if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { | ||
| 627 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense"); | ||
| 628 | goto out; | ||
| 629 | } | ||
| 630 | |||
| 631 | /* Now poll for completion */ | ||
| 632 | if (twl_poll_response(tw_dev, request_id, 30)) { | ||
| 633 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue"); | ||
| 634 | tw_dev->posted_request_count--; | ||
| 635 | goto out; | ||
| 636 | } | ||
| 637 | |||
| 638 | tw_dev->posted_request_count--; | ||
| 639 | header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; | ||
| 640 | aen = le16_to_cpu(header->status_block.error); | ||
| 641 | queue = 0; | ||
| 642 | count++; | ||
| 643 | |||
| 644 | switch (aen) { | ||
| 645 | case TW_AEN_QUEUE_EMPTY: | ||
| 646 | if (first_reset != 1) | ||
| 647 | goto out; | ||
| 648 | else | ||
| 649 | finished = 1; | ||
| 650 | break; | ||
| 651 | case TW_AEN_SOFT_RESET: | ||
| 652 | if (first_reset == 0) | ||
| 653 | first_reset = 1; | ||
| 654 | else | ||
| 655 | queue = 1; | ||
| 656 | break; | ||
| 657 | case TW_AEN_SYNC_TIME_WITH_HOST: | ||
| 658 | break; | ||
| 659 | default: | ||
| 660 | queue = 1; | ||
| 661 | } | ||
| 662 | |||
| 663 | /* Now queue an event info */ | ||
| 664 | if (queue) | ||
| 665 | twl_aen_queue_event(tw_dev, header); | ||
| 666 | } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); | ||
| 667 | |||
| 668 | if (count == TW_MAX_AEN_DRAIN) | ||
| 669 | goto out; | ||
| 670 | |||
| 671 | retval = 0; | ||
| 672 | out: | ||
| 673 | tw_dev->state[request_id] = TW_S_INITIAL; | ||
| 674 | return retval; | ||
| 675 | } /* End twl_aen_drain_queue() */ | ||
| 676 | |||
| 677 | /* This function will allocate memory and check if it is correctly aligned */ | ||
| 678 | static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) | ||
| 679 | { | ||
| 680 | int i; | ||
| 681 | dma_addr_t dma_handle; | ||
| 682 | unsigned long *cpu_addr; | ||
| 683 | int retval = 1; | ||
| 684 | |||
| 685 | cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); | ||
| 686 | if (!cpu_addr) { | ||
| 687 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); | ||
| 688 | goto out; | ||
| 689 | } | ||
| 690 | |||
| 691 | memset(cpu_addr, 0, size*TW_Q_LENGTH); | ||
| 692 | |||
| 693 | for (i = 0; i < TW_Q_LENGTH; i++) { | ||
| 694 | switch(which) { | ||
| 695 | case 0: | ||
| 696 | tw_dev->command_packet_phys[i] = dma_handle+(i*size); | ||
| 697 | tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); | ||
| 698 | break; | ||
| 699 | case 1: | ||
| 700 | tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); | ||
| 701 | tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); | ||
| 702 | break; | ||
| 703 | case 2: | ||
| 704 | tw_dev->sense_buffer_phys[i] = dma_handle+(i*size); | ||
| 705 | tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size)); | ||
| 706 | break; | ||
| 707 | } | ||
| 708 | } | ||
| 709 | retval = 0; | ||
| 710 | out: | ||
| 711 | return retval; | ||
| 712 | } /* End twl_allocate_memory() */ | ||
| 713 | |||
| 714 | /* This function will load the request id and various sgls for ioctls */ | ||
| 715 | static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) | ||
| 716 | { | ||
| 717 | TW_Command *oldcommand; | ||
| 718 | TW_Command_Apache *newcommand; | ||
| 719 | TW_SG_Entry_ISO *sgl; | ||
| 720 | unsigned int pae = 0; | ||
| 721 | |||
| 722 | if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) | ||
| 723 | pae = 1; | ||
| 724 | |||
| 725 | if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { | ||
| 726 | newcommand = &full_command_packet->command.newcommand; | ||
| 727 | newcommand->request_id__lunl = | ||
| 728 | cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); | ||
| 729 | if (length) { | ||
| 730 | newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); | ||
| 731 | newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); | ||
| 732 | } | ||
| 733 | newcommand->sgl_entries__lunh = | ||
| 734 | cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); | ||
| 735 | } else { | ||
| 736 | oldcommand = &full_command_packet->command.oldcommand; | ||
| 737 | oldcommand->request_id = request_id; | ||
| 738 | |||
| 739 | if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { | ||
| 740 | /* Load the sg list */ | ||
| 741 | sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0)); | ||
| 742 | sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); | ||
| 743 | sgl->length = TW_CPU_TO_SGL(length); | ||
| 744 | oldcommand->size += pae; | ||
| 745 | oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0; | ||
| 746 | } | ||
| 747 | } | ||
| 748 | } /* End twl_load_sgl() */ | ||
| 749 | |||
| 750 | /* This function handles ioctl for the character device | ||
| 751 | This interface is used by smartmontools open source software */ | ||
| 752 | static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | ||
| 753 | { | ||
| 754 | long timeout; | ||
| 755 | unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; | ||
| 756 | dma_addr_t dma_handle; | ||
| 757 | int request_id = 0; | ||
| 758 | TW_Ioctl_Driver_Command driver_command; | ||
| 759 | TW_Ioctl_Buf_Apache *tw_ioctl; | ||
| 760 | TW_Command_Full *full_command_packet; | ||
| 761 | TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; | ||
| 762 | int retval = -EFAULT; | ||
| 763 | void __user *argp = (void __user *)arg; | ||
| 764 | |||
| 765 | /* Only let one of these through at a time */ | ||
| 766 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { | ||
| 767 | retval = -EINTR; | ||
| 768 | goto out; | ||
| 769 | } | ||
| 770 | |||
| 771 | /* First copy down the driver command */ | ||
| 772 | if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) | ||
| 773 | goto out2; | ||
| 774 | |||
| 775 | /* Check data buffer size */ | ||
| 776 | if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { | ||
| 777 | retval = -EINVAL; | ||
| 778 | goto out2; | ||
| 779 | } | ||
| 780 | |||
| 781 | /* Hardware can only do multiple of 512 byte transfers */ | ||
| 782 | data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; | ||
| 783 | |||
| 784 | /* Now allocate ioctl buf memory */ | ||
| 785 | cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); | ||
| 786 | if (!cpu_addr) { | ||
| 787 | retval = -ENOMEM; | ||
| 788 | goto out2; | ||
| 789 | } | ||
| 790 | |||
| 791 | tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; | ||
| 792 | |||
| 793 | /* Now copy down the entire ioctl */ | ||
| 794 | if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) | ||
| 795 | goto out3; | ||
| 796 | |||
| 797 | /* See which ioctl we are doing */ | ||
| 798 | switch (cmd) { | ||
| 799 | case TW_IOCTL_FIRMWARE_PASS_THROUGH: | ||
| 800 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 801 | twl_get_request_id(tw_dev, &request_id); | ||
| 802 | |||
| 803 | /* Flag internal command */ | ||
| 804 | tw_dev->srb[request_id] = NULL; | ||
| 805 | |||
| 806 | /* Flag chrdev ioctl */ | ||
| 807 | tw_dev->chrdev_request_id = request_id; | ||
| 808 | |||
| 809 | full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command; | ||
| 810 | |||
| 811 | /* Load request id and sglist for both command types */ | ||
| 812 | twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); | ||
| 813 | |||
| 814 | memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); | ||
| 815 | |||
| 816 | /* Now post the command packet to the controller */ | ||
| 817 | twl_post_command_packet(tw_dev, request_id); | ||
| 818 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 819 | |||
| 820 | timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; | ||
| 821 | |||
| 822 | /* Now wait for command to complete */ | ||
| 823 | timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); | ||
| 824 | |||
| 825 | /* We timed out, and didn't get an interrupt */ | ||
| 826 | if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { | ||
| 827 | /* Now we need to reset the board */ | ||
| 828 | printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", | ||
| 829 | tw_dev->host->host_no, TW_DRIVER, 0x6, | ||
| 830 | cmd); | ||
| 831 | retval = -EIO; | ||
| 832 | twl_reset_device_extension(tw_dev, 1); | ||
| 833 | goto out3; | ||
| 834 | } | ||
| 835 | |||
| 836 | /* Now copy in the command packet response */ | ||
| 837 | memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); | ||
| 838 | |||
| 839 | /* Now complete the io */ | ||
| 840 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 841 | tw_dev->posted_request_count--; | ||
| 842 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
| 843 | twl_free_request_id(tw_dev, request_id); | ||
| 844 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 845 | break; | ||
| 846 | default: | ||
| 847 | retval = -ENOTTY; | ||
| 848 | goto out3; | ||
| 849 | } | ||
| 850 | |||
| 851 | /* Now copy the entire response to userspace */ | ||
| 852 | if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) | ||
| 853 | retval = 0; | ||
| 854 | out3: | ||
| 855 | /* Now free ioctl buf memory */ | ||
| 856 | dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); | ||
| 857 | out2: | ||
| 858 | mutex_unlock(&tw_dev->ioctl_lock); | ||
| 859 | out: | ||
| 860 | return retval; | ||
| 861 | } /* End twl_chrdev_ioctl() */ | ||
| 862 | |||
| 863 | /* This function handles open for the character device */ | ||
| 864 | static int twl_chrdev_open(struct inode *inode, struct file *file) | ||
| 865 | { | ||
| 866 | unsigned int minor_number; | ||
| 867 | int retval = -ENODEV; | ||
| 868 | |||
| 869 | if (!capable(CAP_SYS_ADMIN)) { | ||
| 870 | retval = -EACCES; | ||
| 871 | goto out; | ||
| 872 | } | ||
| 873 | |||
| 874 | cycle_kernel_lock(); | ||
| 875 | minor_number = iminor(inode); | ||
| 876 | if (minor_number >= twl_device_extension_count) | ||
| 877 | goto out; | ||
| 878 | retval = 0; | ||
| 879 | out: | ||
| 880 | return retval; | ||
| 881 | } /* End twl_chrdev_open() */ | ||
| 882 | |||
| 883 | /* File operations struct for character device */ | ||
| 884 | static const struct file_operations twl_fops = { | ||
| 885 | .owner = THIS_MODULE, | ||
| 886 | .ioctl = twl_chrdev_ioctl, | ||
| 887 | .open = twl_chrdev_open, | ||
| 888 | .release = NULL | ||
| 889 | }; | ||
| 890 | |||
| 891 | /* This function passes sense data from firmware to scsi layer */ | ||
| 892 | static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host) | ||
| 893 | { | ||
| 894 | TW_Command_Apache_Header *header; | ||
| 895 | TW_Command_Full *full_command_packet; | ||
| 896 | unsigned short error; | ||
| 897 | char *error_str; | ||
| 898 | int retval = 1; | ||
| 899 | |||
| 900 | header = tw_dev->sense_buffer_virt[i]; | ||
| 901 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 902 | |||
| 903 | /* Get embedded firmware error string */ | ||
| 904 | error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]); | ||
| 905 | |||
| 906 | /* Don't print error for Logical unit not supported during rollcall */ | ||
| 907 | error = le16_to_cpu(header->status_block.error); | ||
| 908 | if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) { | ||
| 909 | if (print_host) | ||
| 910 | printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", | ||
| 911 | tw_dev->host->host_no, | ||
| 912 | TW_MESSAGE_SOURCE_CONTROLLER_ERROR, | ||
| 913 | header->status_block.error, | ||
| 914 | error_str, | ||
| 915 | header->err_specific_desc); | ||
| 916 | else | ||
| 917 | printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n", | ||
| 918 | TW_MESSAGE_SOURCE_CONTROLLER_ERROR, | ||
| 919 | header->status_block.error, | ||
| 920 | error_str, | ||
| 921 | header->err_specific_desc); | ||
| 922 | } | ||
| 923 | |||
| 924 | if (copy_sense) { | ||
| 925 | memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH); | ||
| 926 | tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); | ||
| 927 | goto out; | ||
| 928 | } | ||
| 929 | out: | ||
| 930 | return retval; | ||
| 931 | } /* End twl_fill_sense() */ | ||
| 932 | |||
| 933 | /* This function will free up device extension resources */ | ||
| 934 | static void twl_free_device_extension(TW_Device_Extension *tw_dev) | ||
| 935 | { | ||
| 936 | if (tw_dev->command_packet_virt[0]) | ||
| 937 | pci_free_consistent(tw_dev->tw_pci_dev, | ||
| 938 | sizeof(TW_Command_Full)*TW_Q_LENGTH, | ||
| 939 | tw_dev->command_packet_virt[0], | ||
| 940 | tw_dev->command_packet_phys[0]); | ||
| 941 | |||
| 942 | if (tw_dev->generic_buffer_virt[0]) | ||
| 943 | pci_free_consistent(tw_dev->tw_pci_dev, | ||
| 944 | TW_SECTOR_SIZE*TW_Q_LENGTH, | ||
| 945 | tw_dev->generic_buffer_virt[0], | ||
| 946 | tw_dev->generic_buffer_phys[0]); | ||
| 947 | |||
| 948 | if (tw_dev->sense_buffer_virt[0]) | ||
| 949 | pci_free_consistent(tw_dev->tw_pci_dev, | ||
| 950 | sizeof(TW_Command_Apache_Header)* | ||
| 951 | TW_Q_LENGTH, | ||
| 952 | tw_dev->sense_buffer_virt[0], | ||
| 953 | tw_dev->sense_buffer_phys[0]); | ||
| 954 | |||
| 955 | kfree(tw_dev->event_queue[0]); | ||
| 956 | } /* End twl_free_device_extension() */ | ||
| 957 | |||
| 958 | /* This function will get parameter table entries from the firmware */ | ||
| 959 | static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) | ||
| 960 | { | ||
| 961 | TW_Command_Full *full_command_packet; | ||
| 962 | TW_Command *command_packet; | ||
| 963 | TW_Param_Apache *param; | ||
| 964 | void *retval = NULL; | ||
| 965 | |||
| 966 | /* Setup the command packet */ | ||
| 967 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 968 | memset(full_command_packet, 0, sizeof(TW_Command_Full)); | ||
| 969 | command_packet = &full_command_packet->command.oldcommand; | ||
| 970 | |||
| 971 | command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); | ||
| 972 | command_packet->size = TW_COMMAND_SIZE; | ||
| 973 | command_packet->request_id = request_id; | ||
| 974 | command_packet->byte6_offset.block_count = cpu_to_le16(1); | ||
| 975 | |||
| 976 | /* Now setup the param */ | ||
| 977 | param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; | ||
| 978 | memset(param, 0, TW_SECTOR_SIZE); | ||
| 979 | param->table_id = cpu_to_le16(table_id | 0x8000); | ||
| 980 | param->parameter_id = cpu_to_le16(parameter_id); | ||
| 981 | param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); | ||
| 982 | |||
| 983 | command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); | ||
| 984 | command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); | ||
| 985 | |||
| 986 | /* Post the command packet to the board */ | ||
| 987 | twl_post_command_packet(tw_dev, request_id); | ||
| 988 | |||
| 989 | /* Poll for completion */ | ||
| 990 | if (twl_poll_response(tw_dev, request_id, 30)) | ||
| 991 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param") | ||
| 992 | else | ||
| 993 | retval = (void *)&(param->data[0]); | ||
| 994 | |||
| 995 | tw_dev->posted_request_count--; | ||
| 996 | tw_dev->state[request_id] = TW_S_INITIAL; | ||
| 997 | |||
| 998 | return retval; | ||
| 999 | } /* End twl_get_param() */ | ||
| 1000 | |||
| 1001 | /* This function will send an initconnection command to controller */ | ||
| 1002 | static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits, | ||
| 1003 | u32 set_features, unsigned short current_fw_srl, | ||
| 1004 | unsigned short current_fw_arch_id, | ||
| 1005 | unsigned short current_fw_branch, | ||
| 1006 | unsigned short current_fw_build, | ||
| 1007 | unsigned short *fw_on_ctlr_srl, | ||
| 1008 | unsigned short *fw_on_ctlr_arch_id, | ||
| 1009 | unsigned short *fw_on_ctlr_branch, | ||
| 1010 | unsigned short *fw_on_ctlr_build, | ||
| 1011 | u32 *init_connect_result) | ||
| 1012 | { | ||
| 1013 | TW_Command_Full *full_command_packet; | ||
| 1014 | TW_Initconnect *tw_initconnect; | ||
| 1015 | int request_id = 0, retval = 1; | ||
| 1016 | |||
| 1017 | /* Initialize InitConnection command packet */ | ||
| 1018 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 1019 | memset(full_command_packet, 0, sizeof(TW_Command_Full)); | ||
| 1020 | full_command_packet->header.header_desc.size_header = 128; | ||
| 1021 | |||
| 1022 | tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; | ||
| 1023 | tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); | ||
| 1024 | tw_initconnect->request_id = request_id; | ||
| 1025 | tw_initconnect->message_credits = cpu_to_le16(message_credits); | ||
| 1026 | tw_initconnect->features = set_features; | ||
| 1027 | |||
| 1028 | /* Turn on 64-bit sgl support if we need to */ | ||
| 1029 | tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; | ||
| 1030 | |||
| 1031 | tw_initconnect->features = cpu_to_le32(tw_initconnect->features); | ||
| 1032 | |||
| 1033 | if (set_features & TW_EXTENDED_INIT_CONNECT) { | ||
| 1034 | tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; | ||
| 1035 | tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); | ||
| 1036 | tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); | ||
| 1037 | tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); | ||
| 1038 | tw_initconnect->fw_build = cpu_to_le16(current_fw_build); | ||
| 1039 | } else | ||
| 1040 | tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; | ||
| 1041 | |||
| 1042 | /* Send command packet to the board */ | ||
| 1043 | twl_post_command_packet(tw_dev, request_id); | ||
| 1044 | |||
| 1045 | /* Poll for completion */ | ||
| 1046 | if (twl_poll_response(tw_dev, request_id, 30)) { | ||
| 1047 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection"); | ||
| 1048 | } else { | ||
| 1049 | if (set_features & TW_EXTENDED_INIT_CONNECT) { | ||
| 1050 | *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); | ||
| 1051 | *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); | ||
| 1052 | *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); | ||
| 1053 | *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); | ||
| 1054 | *init_connect_result = le32_to_cpu(tw_initconnect->result); | ||
| 1055 | } | ||
| 1056 | retval = 0; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | tw_dev->posted_request_count--; | ||
| 1060 | tw_dev->state[request_id] = TW_S_INITIAL; | ||
| 1061 | |||
| 1062 | return retval; | ||
| 1063 | } /* End twl_initconnection() */ | ||
| 1064 | |||
| 1065 | /* This function will initialize the fields of a device extension */ | ||
| 1066 | static int twl_initialize_device_extension(TW_Device_Extension *tw_dev) | ||
| 1067 | { | ||
| 1068 | int i, retval = 1; | ||
| 1069 | |||
| 1070 | /* Initialize command packet buffers */ | ||
| 1071 | if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { | ||
| 1072 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed"); | ||
| 1073 | goto out; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | /* Initialize generic buffer */ | ||
| 1077 | if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { | ||
| 1078 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed"); | ||
| 1079 | goto out; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | /* Allocate sense buffers */ | ||
| 1083 | if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) { | ||
| 1084 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed"); | ||
| 1085 | goto out; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | /* Allocate event info space */ | ||
| 1089 | tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); | ||
| 1090 | if (!tw_dev->event_queue[0]) { | ||
| 1091 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed"); | ||
| 1092 | goto out; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | for (i = 0; i < TW_Q_LENGTH; i++) { | ||
| 1096 | tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); | ||
| 1097 | tw_dev->free_queue[i] = i; | ||
| 1098 | tw_dev->state[i] = TW_S_INITIAL; | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | tw_dev->free_head = TW_Q_START; | ||
| 1102 | tw_dev->free_tail = TW_Q_START; | ||
| 1103 | tw_dev->error_sequence_id = 1; | ||
| 1104 | tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; | ||
| 1105 | |||
| 1106 | mutex_init(&tw_dev->ioctl_lock); | ||
| 1107 | init_waitqueue_head(&tw_dev->ioctl_wqueue); | ||
| 1108 | |||
| 1109 | retval = 0; | ||
| 1110 | out: | ||
| 1111 | return retval; | ||
| 1112 | } /* End twl_initialize_device_extension() */ | ||
| 1113 | |||
| 1114 | /* This function will perform a pci-dma unmap */ | ||
| 1115 | static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) | ||
| 1116 | { | ||
| 1117 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
| 1118 | |||
| 1119 | if (cmd->SCp.phase == TW_PHASE_SGLIST) | ||
| 1120 | scsi_dma_unmap(cmd); | ||
| 1121 | } /* End twl_unmap_scsi_data() */ | ||
| 1122 | |||
| 1123 | /* This function will handle attention interrupts */ | ||
| 1124 | static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) | ||
| 1125 | { | ||
| 1126 | int retval = 1; | ||
| 1127 | u32 request_id, doorbell; | ||
| 1128 | |||
| 1129 | /* Read doorbell status */ | ||
| 1130 | doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev)); | ||
| 1131 | |||
| 1132 | /* Check for controller errors */ | ||
| 1133 | if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) { | ||
| 1134 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing"); | ||
| 1135 | goto out; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | /* Check if we need to perform an AEN drain */ | ||
| 1139 | if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) { | ||
| 1140 | if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { | ||
| 1141 | twl_get_request_id(tw_dev, &request_id); | ||
| 1142 | if (twl_aen_read_queue(tw_dev, request_id)) { | ||
| 1143 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
| 1144 | twl_free_request_id(tw_dev, request_id); | ||
| 1145 | clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); | ||
| 1146 | } | ||
| 1147 | } | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | retval = 0; | ||
| 1151 | out: | ||
| 1152 | /* Clear doorbell interrupt */ | ||
| 1153 | TWL_CLEAR_DB_INTERRUPT(tw_dev); | ||
| 1154 | |||
| 1155 | /* Make sure the clear was flushed by reading it back */ | ||
| 1156 | readl(TWL_HOBDBC_REG_ADDR(tw_dev)); | ||
| 1157 | |||
| 1158 | return retval; | ||
| 1159 | } /* End twl_handle_attention_interrupt() */ | ||
| 1160 | |||
| 1161 | /* Interrupt service routine */ | ||
| 1162 | static irqreturn_t twl_interrupt(int irq, void *dev_instance) | ||
| 1163 | { | ||
| 1164 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; | ||
| 1165 | int i, handled = 0, error = 0; | ||
| 1166 | dma_addr_t mfa = 0; | ||
| 1167 | u32 reg, regl, regh, response, request_id = 0; | ||
| 1168 | struct scsi_cmnd *cmd; | ||
| 1169 | TW_Command_Full *full_command_packet; | ||
| 1170 | |||
| 1171 | spin_lock(tw_dev->host->host_lock); | ||
| 1172 | |||
| 1173 | /* Read host interrupt status */ | ||
| 1174 | reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); | ||
| 1175 | |||
| 1176 | /* Check if this is our interrupt, otherwise bail */ | ||
| 1177 | if (!(reg & TWL_HISTATUS_VALID_INTERRUPT)) | ||
| 1178 | goto twl_interrupt_bail; | ||
| 1179 | |||
| 1180 | handled = 1; | ||
| 1181 | |||
| 1182 | /* If we are resetting, bail */ | ||
| 1183 | if (test_bit(TW_IN_RESET, &tw_dev->flags)) | ||
| 1184 | goto twl_interrupt_bail; | ||
| 1185 | |||
| 1186 | /* Attention interrupt */ | ||
| 1187 | if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) { | ||
| 1188 | if (twl_handle_attention_interrupt(tw_dev)) { | ||
| 1189 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1190 | goto twl_interrupt_bail; | ||
| 1191 | } | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | /* Response interrupt */ | ||
| 1195 | while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) { | ||
| 1196 | if (sizeof(dma_addr_t) > 4) { | ||
| 1197 | regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); | ||
| 1198 | regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 1199 | mfa = ((u64)regh << 32) | regl; | ||
| 1200 | } else | ||
| 1201 | mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 1202 | |||
| 1203 | error = 0; | ||
| 1204 | response = (u32)mfa; | ||
| 1205 | |||
| 1206 | /* Check for command packet error */ | ||
| 1207 | if (!TW_NOTMFA_OUT(response)) { | ||
| 1208 | for (i=0;i<TW_Q_LENGTH;i++) { | ||
| 1209 | if (tw_dev->sense_buffer_phys[i] == mfa) { | ||
| 1210 | request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id); | ||
| 1211 | if (tw_dev->srb[request_id] != NULL) | ||
| 1212 | error = twl_fill_sense(tw_dev, i, request_id, 1, 1); | ||
| 1213 | else { | ||
| 1214 | /* Skip ioctl error prints */ | ||
| 1215 | if (request_id != tw_dev->chrdev_request_id) | ||
| 1216 | error = twl_fill_sense(tw_dev, i, request_id, 0, 1); | ||
| 1217 | else | ||
| 1218 | memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header)); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | /* Now re-post the sense buffer */ | ||
| 1222 | writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); | ||
| 1223 | writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 1224 | break; | ||
| 1225 | } | ||
| 1226 | } | ||
| 1227 | } else | ||
| 1228 | request_id = TW_RESID_OUT(response); | ||
| 1229 | |||
| 1230 | full_command_packet = tw_dev->command_packet_virt[request_id]; | ||
| 1231 | |||
| 1232 | /* Check for correct state */ | ||
| 1233 | if (tw_dev->state[request_id] != TW_S_POSTED) { | ||
| 1234 | if (tw_dev->srb[request_id] != NULL) { | ||
| 1235 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted"); | ||
| 1236 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1237 | goto twl_interrupt_bail; | ||
| 1238 | } | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | /* Check for internal command completion */ | ||
| 1242 | if (tw_dev->srb[request_id] == NULL) { | ||
| 1243 | if (request_id != tw_dev->chrdev_request_id) { | ||
| 1244 | if (twl_aen_complete(tw_dev, request_id)) | ||
| 1245 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt"); | ||
| 1246 | } else { | ||
| 1247 | tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; | ||
| 1248 | wake_up(&tw_dev->ioctl_wqueue); | ||
| 1249 | } | ||
| 1250 | } else { | ||
| 1251 | cmd = tw_dev->srb[request_id]; | ||
| 1252 | |||
| 1253 | if (!error) | ||
| 1254 | cmd->result = (DID_OK << 16); | ||
| 1255 | |||
| 1256 | /* Report residual bytes for single sgl */ | ||
| 1257 | if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { | ||
| 1258 | if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) | ||
| 1259 | scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | /* Now complete the io */ | ||
| 1263 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
| 1264 | twl_free_request_id(tw_dev, request_id); | ||
| 1265 | tw_dev->posted_request_count--; | ||
| 1266 | tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); | ||
| 1267 | twl_unmap_scsi_data(tw_dev, request_id); | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | /* Check for another response interrupt */ | ||
| 1271 | reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | twl_interrupt_bail: | ||
| 1275 | spin_unlock(tw_dev->host->host_lock); | ||
| 1276 | return IRQ_RETVAL(handled); | ||
| 1277 | } /* End twl_interrupt() */ | ||
| 1278 | |||
| 1279 | /* This function will poll for a register change */ | ||
| 1280 | static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds) | ||
| 1281 | { | ||
| 1282 | unsigned long before; | ||
| 1283 | int retval = 1; | ||
| 1284 | u32 reg_value; | ||
| 1285 | |||
| 1286 | reg_value = readl(reg); | ||
| 1287 | before = jiffies; | ||
| 1288 | |||
| 1289 | while ((reg_value & value) != result) { | ||
| 1290 | reg_value = readl(reg); | ||
| 1291 | if (time_after(jiffies, before + HZ * seconds)) | ||
| 1292 | goto out; | ||
| 1293 | msleep(50); | ||
| 1294 | } | ||
| 1295 | retval = 0; | ||
| 1296 | out: | ||
| 1297 | return retval; | ||
| 1298 | } /* End twl_poll_register() */ | ||
| 1299 | |||
| 1300 | /* This function will reset a controller */ | ||
| 1301 | static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) | ||
| 1302 | { | ||
| 1303 | int retval = 1; | ||
| 1304 | int i = 0; | ||
| 1305 | u32 status = 0; | ||
| 1306 | unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; | ||
| 1307 | unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; | ||
| 1308 | u32 init_connect_result = 0; | ||
| 1309 | int tries = 0; | ||
| 1310 | int do_soft_reset = soft_reset; | ||
| 1311 | |||
| 1312 | while (tries < TW_MAX_RESET_TRIES) { | ||
| 1313 | /* Do a soft reset if one is needed */ | ||
| 1314 | if (do_soft_reset) { | ||
| 1315 | TWL_SOFT_RESET(tw_dev); | ||
| 1316 | |||
| 1317 | /* Make sure controller is in a good state */ | ||
| 1318 | if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) { | ||
| 1319 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence"); | ||
| 1320 | tries++; | ||
| 1321 | continue; | ||
| 1322 | } | ||
| 1323 | if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) { | ||
| 1324 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence"); | ||
| 1325 | tries++; | ||
| 1326 | continue; | ||
| 1327 | } | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | /* Initconnect */ | ||
| 1331 | if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, | ||
| 1332 | TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, | ||
| 1333 | TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, | ||
| 1334 | TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, | ||
| 1335 | &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, | ||
| 1336 | &fw_on_ctlr_build, &init_connect_result)) { | ||
| 1337 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL"); | ||
| 1338 | do_soft_reset = 1; | ||
| 1339 | tries++; | ||
| 1340 | continue; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | /* Load sense buffers */ | ||
| 1344 | while (i < TW_Q_LENGTH) { | ||
| 1345 | writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); | ||
| 1346 | writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); | ||
| 1347 | |||
| 1348 | /* Check status for over-run after each write */ | ||
| 1349 | status = readl(TWL_STATUS_REG_ADDR(tw_dev)); | ||
| 1350 | if (!(status & TWL_STATUS_OVERRUN_SUBMIT)) | ||
| 1351 | i++; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | /* Now check status */ | ||
| 1355 | status = readl(TWL_STATUS_REG_ADDR(tw_dev)); | ||
| 1356 | if (status) { | ||
| 1357 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers"); | ||
| 1358 | do_soft_reset = 1; | ||
| 1359 | tries++; | ||
| 1360 | continue; | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | /* Drain the AEN queue */ | ||
| 1364 | if (twl_aen_drain_queue(tw_dev, soft_reset)) { | ||
| 1365 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence"); | ||
| 1366 | do_soft_reset = 1; | ||
| 1367 | tries++; | ||
| 1368 | continue; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | /* Load rest of compatibility struct */ | ||
| 1372 | strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); | ||
| 1373 | tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; | ||
| 1374 | tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; | ||
| 1375 | tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; | ||
| 1376 | tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; | ||
| 1377 | tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; | ||
| 1378 | tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; | ||
| 1379 | tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; | ||
| 1380 | tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; | ||
| 1381 | tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; | ||
| 1382 | |||
| 1383 | /* If we got here, controller is in a good state */ | ||
| 1384 | retval = 0; | ||
| 1385 | goto out; | ||
| 1386 | } | ||
| 1387 | out: | ||
| 1388 | return retval; | ||
| 1389 | } /* End twl_reset_sequence() */ | ||
| 1390 | |||
| 1391 | /* This function will reset a device extension */ | ||
| 1392 | static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset) | ||
| 1393 | { | ||
| 1394 | int i = 0, retval = 1; | ||
| 1395 | unsigned long flags = 0; | ||
| 1396 | |||
| 1397 | /* Block SCSI requests while we are resetting */ | ||
| 1398 | if (ioctl_reset) | ||
| 1399 | scsi_block_requests(tw_dev->host); | ||
| 1400 | |||
| 1401 | set_bit(TW_IN_RESET, &tw_dev->flags); | ||
| 1402 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1403 | TWL_CLEAR_DB_INTERRUPT(tw_dev); | ||
| 1404 | |||
| 1405 | spin_lock_irqsave(tw_dev->host->host_lock, flags); | ||
| 1406 | |||
| 1407 | /* Abort all requests that are in progress */ | ||
| 1408 | for (i = 0; i < TW_Q_LENGTH; i++) { | ||
| 1409 | if ((tw_dev->state[i] != TW_S_FINISHED) && | ||
| 1410 | (tw_dev->state[i] != TW_S_INITIAL) && | ||
| 1411 | (tw_dev->state[i] != TW_S_COMPLETED)) { | ||
| 1412 | if (tw_dev->srb[i]) { | ||
| 1413 | tw_dev->srb[i]->result = (DID_RESET << 16); | ||
| 1414 | tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); | ||
| 1415 | twl_unmap_scsi_data(tw_dev, i); | ||
| 1416 | } | ||
| 1417 | } | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | /* Reset queues and counts */ | ||
| 1421 | for (i = 0; i < TW_Q_LENGTH; i++) { | ||
| 1422 | tw_dev->free_queue[i] = i; | ||
| 1423 | tw_dev->state[i] = TW_S_INITIAL; | ||
| 1424 | } | ||
| 1425 | tw_dev->free_head = TW_Q_START; | ||
| 1426 | tw_dev->free_tail = TW_Q_START; | ||
| 1427 | tw_dev->posted_request_count = 0; | ||
| 1428 | |||
| 1429 | spin_unlock_irqrestore(tw_dev->host->host_lock, flags); | ||
| 1430 | |||
| 1431 | if (twl_reset_sequence(tw_dev, 1)) | ||
| 1432 | goto out; | ||
| 1433 | |||
| 1434 | TWL_UNMASK_INTERRUPTS(tw_dev); | ||
| 1435 | |||
| 1436 | clear_bit(TW_IN_RESET, &tw_dev->flags); | ||
| 1437 | tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; | ||
| 1438 | |||
| 1439 | retval = 0; | ||
| 1440 | out: | ||
| 1441 | if (ioctl_reset) | ||
| 1442 | scsi_unblock_requests(tw_dev->host); | ||
| 1443 | return retval; | ||
| 1444 | } /* End twl_reset_device_extension() */ | ||
| 1445 | |||
| 1446 | /* This funciton returns unit geometry in cylinders/heads/sectors */ | ||
| 1447 | static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) | ||
| 1448 | { | ||
| 1449 | int heads, sectors; | ||
| 1450 | TW_Device_Extension *tw_dev; | ||
| 1451 | |||
| 1452 | tw_dev = (TW_Device_Extension *)sdev->host->hostdata; | ||
| 1453 | |||
| 1454 | if (capacity >= 0x200000) { | ||
| 1455 | heads = 255; | ||
| 1456 | sectors = 63; | ||
| 1457 | } else { | ||
| 1458 | heads = 64; | ||
| 1459 | sectors = 32; | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | geom[0] = heads; | ||
| 1463 | geom[1] = sectors; | ||
| 1464 | geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ | ||
| 1465 | |||
| 1466 | return 0; | ||
| 1467 | } /* End twl_scsi_biosparam() */ | ||
| 1468 | |||
| 1469 | /* This is the new scsi eh reset function */ | ||
| 1470 | static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt) | ||
| 1471 | { | ||
| 1472 | TW_Device_Extension *tw_dev = NULL; | ||
| 1473 | int retval = FAILED; | ||
| 1474 | |||
| 1475 | tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | ||
| 1476 | |||
| 1477 | tw_dev->num_resets++; | ||
| 1478 | |||
| 1479 | sdev_printk(KERN_WARNING, SCpnt->device, | ||
| 1480 | "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", | ||
| 1481 | TW_DRIVER, 0x2c, SCpnt->cmnd[0]); | ||
| 1482 | |||
| 1483 | /* Make sure we are not issuing an ioctl or resetting from ioctl */ | ||
| 1484 | mutex_lock(&tw_dev->ioctl_lock); | ||
| 1485 | |||
| 1486 | /* Now reset the card and some of the device extension data */ | ||
| 1487 | if (twl_reset_device_extension(tw_dev, 0)) { | ||
| 1488 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset"); | ||
| 1489 | goto out; | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | retval = SUCCESS; | ||
| 1493 | out: | ||
| 1494 | mutex_unlock(&tw_dev->ioctl_lock); | ||
| 1495 | return retval; | ||
| 1496 | } /* End twl_scsi_eh_reset() */ | ||
| 1497 | |||
| 1498 | /* This is the main scsi queue function to handle scsi opcodes */ | ||
| 1499 | static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | ||
| 1500 | { | ||
| 1501 | int request_id, retval; | ||
| 1502 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | ||
| 1503 | |||
| 1504 | /* If we are resetting due to timed out ioctl, report as busy */ | ||
| 1505 | if (test_bit(TW_IN_RESET, &tw_dev->flags)) { | ||
| 1506 | retval = SCSI_MLQUEUE_HOST_BUSY; | ||
| 1507 | goto out; | ||
| 1508 | } | ||
| 1509 | |||
| 1510 | /* Save done function into scsi_cmnd struct */ | ||
| 1511 | SCpnt->scsi_done = done; | ||
| 1512 | |||
| 1513 | /* Get a free request id */ | ||
| 1514 | twl_get_request_id(tw_dev, &request_id); | ||
| 1515 | |||
| 1516 | /* Save the scsi command for use by the ISR */ | ||
| 1517 | tw_dev->srb[request_id] = SCpnt; | ||
| 1518 | |||
| 1519 | /* Initialize phase to zero */ | ||
| 1520 | SCpnt->SCp.phase = TW_PHASE_INITIAL; | ||
| 1521 | |||
| 1522 | retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); | ||
| 1523 | if (retval) { | ||
| 1524 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
| 1525 | twl_free_request_id(tw_dev, request_id); | ||
| 1526 | SCpnt->result = (DID_ERROR << 16); | ||
| 1527 | done(SCpnt); | ||
| 1528 | retval = 0; | ||
| 1529 | } | ||
| 1530 | out: | ||
| 1531 | return retval; | ||
| 1532 | } /* End twl_scsi_queue() */ | ||
| 1533 | |||
| 1534 | /* This function tells the controller to shut down */ | ||
| 1535 | static void __twl_shutdown(TW_Device_Extension *tw_dev) | ||
| 1536 | { | ||
| 1537 | /* Disable interrupts */ | ||
| 1538 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1539 | |||
| 1540 | /* Free up the IRQ */ | ||
| 1541 | free_irq(tw_dev->tw_pci_dev->irq, tw_dev); | ||
| 1542 | |||
| 1543 | printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no); | ||
| 1544 | |||
| 1545 | /* Tell the card we are shutting down */ | ||
| 1546 | if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { | ||
| 1547 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed"); | ||
| 1548 | } else { | ||
| 1549 | printk(KERN_WARNING "3w-sas: Shutdown complete.\n"); | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* Clear doorbell interrupt just before exit */ | ||
| 1553 | TWL_CLEAR_DB_INTERRUPT(tw_dev); | ||
| 1554 | } /* End __twl_shutdown() */ | ||
| 1555 | |||
| 1556 | /* Wrapper for __twl_shutdown */ | ||
| 1557 | static void twl_shutdown(struct pci_dev *pdev) | ||
| 1558 | { | ||
| 1559 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 1560 | TW_Device_Extension *tw_dev; | ||
| 1561 | |||
| 1562 | if (!host) | ||
| 1563 | return; | ||
| 1564 | |||
| 1565 | tw_dev = (TW_Device_Extension *)host->hostdata; | ||
| 1566 | |||
| 1567 | if (tw_dev->online) | ||
| 1568 | __twl_shutdown(tw_dev); | ||
| 1569 | } /* End twl_shutdown() */ | ||
| 1570 | |||
| 1571 | /* This function configures unit settings when a unit is coming on-line */ | ||
| 1572 | static int twl_slave_configure(struct scsi_device *sdev) | ||
| 1573 | { | ||
| 1574 | /* Force 60 second timeout */ | ||
| 1575 | blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); | ||
| 1576 | |||
| 1577 | return 0; | ||
| 1578 | } /* End twl_slave_configure() */ | ||
| 1579 | |||
| 1580 | /* scsi_host_template initializer */ | ||
| 1581 | static struct scsi_host_template driver_template = { | ||
| 1582 | .module = THIS_MODULE, | ||
| 1583 | .name = "3w-sas", | ||
| 1584 | .queuecommand = twl_scsi_queue, | ||
| 1585 | .eh_host_reset_handler = twl_scsi_eh_reset, | ||
| 1586 | .bios_param = twl_scsi_biosparam, | ||
| 1587 | .change_queue_depth = twl_change_queue_depth, | ||
| 1588 | .can_queue = TW_Q_LENGTH-2, | ||
| 1589 | .slave_configure = twl_slave_configure, | ||
| 1590 | .this_id = -1, | ||
| 1591 | .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, | ||
| 1592 | .max_sectors = TW_MAX_SECTORS, | ||
| 1593 | .cmd_per_lun = TW_MAX_CMDS_PER_LUN, | ||
| 1594 | .use_clustering = ENABLE_CLUSTERING, | ||
| 1595 | .shost_attrs = twl_host_attrs, | ||
| 1596 | .emulated = 1 | ||
| 1597 | }; | ||
| 1598 | |||
| 1599 | /* This function will probe and initialize a card */ | ||
| 1600 | static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | ||
| 1601 | { | ||
| 1602 | struct Scsi_Host *host = NULL; | ||
| 1603 | TW_Device_Extension *tw_dev; | ||
| 1604 | int retval = -ENODEV; | ||
| 1605 | int *ptr_phycount, phycount=0; | ||
| 1606 | |||
| 1607 | retval = pci_enable_device(pdev); | ||
| 1608 | if (retval) { | ||
| 1609 | TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device"); | ||
| 1610 | goto out_disable_device; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | pci_set_master(pdev); | ||
| 1614 | pci_try_set_mwi(pdev); | ||
| 1615 | |||
| 1616 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) | ||
| 1617 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
| 1618 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) | ||
| 1619 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
| 1620 | TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); | ||
| 1621 | retval = -ENODEV; | ||
| 1622 | goto out_disable_device; | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); | ||
| 1626 | if (!host) { | ||
| 1627 | TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension"); | ||
| 1628 | retval = -ENOMEM; | ||
| 1629 | goto out_disable_device; | ||
| 1630 | } | ||
| 1631 | tw_dev = shost_priv(host); | ||
| 1632 | |||
| 1633 | /* Save values to device extension */ | ||
| 1634 | tw_dev->host = host; | ||
| 1635 | tw_dev->tw_pci_dev = pdev; | ||
| 1636 | |||
| 1637 | if (twl_initialize_device_extension(tw_dev)) { | ||
| 1638 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); | ||
| 1639 | goto out_free_device_extension; | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | /* Request IO regions */ | ||
| 1643 | retval = pci_request_regions(pdev, "3w-sas"); | ||
| 1644 | if (retval) { | ||
| 1645 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region"); | ||
| 1646 | goto out_free_device_extension; | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | /* Save base address, use region 1 */ | ||
| 1650 | tw_dev->base_addr = pci_iomap(pdev, 1, 0); | ||
| 1651 | if (!tw_dev->base_addr) { | ||
| 1652 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); | ||
| 1653 | goto out_release_mem_region; | ||
| 1654 | } | ||
| 1655 | |||
| 1656 | /* Disable interrupts on the card */ | ||
| 1657 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1658 | |||
| 1659 | /* Initialize the card */ | ||
| 1660 | if (twl_reset_sequence(tw_dev, 0)) { | ||
| 1661 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); | ||
| 1662 | goto out_iounmap; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | /* Set host specific parameters */ | ||
| 1666 | host->max_id = TW_MAX_UNITS; | ||
| 1667 | host->max_cmd_len = TW_MAX_CDB_LEN; | ||
| 1668 | host->max_lun = TW_MAX_LUNS; | ||
| 1669 | host->max_channel = 0; | ||
| 1670 | |||
| 1671 | /* Register the card with the kernel SCSI layer */ | ||
| 1672 | retval = scsi_add_host(host, &pdev->dev); | ||
| 1673 | if (retval) { | ||
| 1674 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed"); | ||
| 1675 | goto out_iounmap; | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | pci_set_drvdata(pdev, host); | ||
| 1679 | |||
| 1680 | printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n", | ||
| 1681 | host->host_no, | ||
| 1682 | (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, | ||
| 1683 | TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH), | ||
| 1684 | (u64)pci_resource_start(pdev, 1), pdev->irq); | ||
| 1685 | |||
| 1686 | ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE, | ||
| 1687 | TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH); | ||
| 1688 | if (ptr_phycount) | ||
| 1689 | phycount = le32_to_cpu(*(int *)ptr_phycount); | ||
| 1690 | |||
| 1691 | printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n", | ||
| 1692 | host->host_no, | ||
| 1693 | (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, | ||
| 1694 | TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), | ||
| 1695 | (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE, | ||
| 1696 | TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), | ||
| 1697 | phycount); | ||
| 1698 | |||
| 1699 | /* Try to enable MSI */ | ||
| 1700 | if (use_msi && !pci_enable_msi(pdev)) | ||
| 1701 | set_bit(TW_USING_MSI, &tw_dev->flags); | ||
| 1702 | |||
| 1703 | /* Now setup the interrupt handler */ | ||
| 1704 | retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); | ||
| 1705 | if (retval) { | ||
| 1706 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ"); | ||
| 1707 | goto out_remove_host; | ||
| 1708 | } | ||
| 1709 | |||
| 1710 | twl_device_extension_list[twl_device_extension_count] = tw_dev; | ||
| 1711 | twl_device_extension_count++; | ||
| 1712 | |||
| 1713 | /* Re-enable interrupts on the card */ | ||
| 1714 | TWL_UNMASK_INTERRUPTS(tw_dev); | ||
| 1715 | |||
| 1716 | /* Finally, scan the host */ | ||
| 1717 | scsi_scan_host(host); | ||
| 1718 | |||
| 1719 | /* Add sysfs binary files */ | ||
| 1720 | if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr)) | ||
| 1721 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read"); | ||
| 1722 | if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr)) | ||
| 1723 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info"); | ||
| 1724 | |||
| 1725 | if (twl_major == -1) { | ||
| 1726 | if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0) | ||
| 1727 | TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device"); | ||
| 1728 | } | ||
| 1729 | tw_dev->online = 1; | ||
| 1730 | return 0; | ||
| 1731 | |||
| 1732 | out_remove_host: | ||
| 1733 | if (test_bit(TW_USING_MSI, &tw_dev->flags)) | ||
| 1734 | pci_disable_msi(pdev); | ||
| 1735 | scsi_remove_host(host); | ||
| 1736 | out_iounmap: | ||
| 1737 | iounmap(tw_dev->base_addr); | ||
| 1738 | out_release_mem_region: | ||
| 1739 | pci_release_regions(pdev); | ||
| 1740 | out_free_device_extension: | ||
| 1741 | twl_free_device_extension(tw_dev); | ||
| 1742 | scsi_host_put(host); | ||
| 1743 | out_disable_device: | ||
| 1744 | pci_disable_device(pdev); | ||
| 1745 | |||
| 1746 | return retval; | ||
| 1747 | } /* End twl_probe() */ | ||
| 1748 | |||
| 1749 | /* This function is called to remove a device */ | ||
| 1750 | static void twl_remove(struct pci_dev *pdev) | ||
| 1751 | { | ||
| 1752 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 1753 | TW_Device_Extension *tw_dev; | ||
| 1754 | |||
| 1755 | if (!host) | ||
| 1756 | return; | ||
| 1757 | |||
| 1758 | tw_dev = (TW_Device_Extension *)host->hostdata; | ||
| 1759 | |||
| 1760 | if (!tw_dev->online) | ||
| 1761 | return; | ||
| 1762 | |||
| 1763 | /* Remove sysfs binary files */ | ||
| 1764 | sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr); | ||
| 1765 | sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr); | ||
| 1766 | |||
| 1767 | scsi_remove_host(tw_dev->host); | ||
| 1768 | |||
| 1769 | /* Unregister character device */ | ||
| 1770 | if (twl_major >= 0) { | ||
| 1771 | unregister_chrdev(twl_major, "twl"); | ||
| 1772 | twl_major = -1; | ||
| 1773 | } | ||
| 1774 | |||
| 1775 | /* Shutdown the card */ | ||
| 1776 | __twl_shutdown(tw_dev); | ||
| 1777 | |||
| 1778 | /* Disable MSI if enabled */ | ||
| 1779 | if (test_bit(TW_USING_MSI, &tw_dev->flags)) | ||
| 1780 | pci_disable_msi(pdev); | ||
| 1781 | |||
| 1782 | /* Free IO remapping */ | ||
| 1783 | iounmap(tw_dev->base_addr); | ||
| 1784 | |||
| 1785 | /* Free up the mem region */ | ||
| 1786 | pci_release_regions(pdev); | ||
| 1787 | |||
| 1788 | /* Free up device extension resources */ | ||
| 1789 | twl_free_device_extension(tw_dev); | ||
| 1790 | |||
| 1791 | scsi_host_put(tw_dev->host); | ||
| 1792 | pci_disable_device(pdev); | ||
| 1793 | twl_device_extension_count--; | ||
| 1794 | } /* End twl_remove() */ | ||
| 1795 | |||
| 1796 | #ifdef CONFIG_PM | ||
| 1797 | /* This function is called on PCI suspend */ | ||
| 1798 | static int twl_suspend(struct pci_dev *pdev, pm_message_t state) | ||
| 1799 | { | ||
| 1800 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 1801 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; | ||
| 1802 | |||
| 1803 | printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no); | ||
| 1804 | /* Disable interrupts */ | ||
| 1805 | TWL_MASK_INTERRUPTS(tw_dev); | ||
| 1806 | |||
| 1807 | free_irq(tw_dev->tw_pci_dev->irq, tw_dev); | ||
| 1808 | |||
| 1809 | /* Tell the card we are shutting down */ | ||
| 1810 | if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { | ||
| 1811 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend"); | ||
| 1812 | } else { | ||
| 1813 | printk(KERN_WARNING "3w-sas: Suspend complete.\n"); | ||
| 1814 | } | ||
| 1815 | |||
| 1816 | /* Clear doorbell interrupt */ | ||
| 1817 | TWL_CLEAR_DB_INTERRUPT(tw_dev); | ||
| 1818 | |||
| 1819 | pci_save_state(pdev); | ||
| 1820 | pci_disable_device(pdev); | ||
| 1821 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
| 1822 | |||
| 1823 | return 0; | ||
| 1824 | } /* End twl_suspend() */ | ||
| 1825 | |||
| 1826 | /* This function is called on PCI resume */ | ||
| 1827 | static int twl_resume(struct pci_dev *pdev) | ||
| 1828 | { | ||
| 1829 | int retval = 0; | ||
| 1830 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 1831 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; | ||
| 1832 | |||
| 1833 | printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no); | ||
| 1834 | pci_set_power_state(pdev, PCI_D0); | ||
| 1835 | pci_enable_wake(pdev, PCI_D0, 0); | ||
| 1836 | pci_restore_state(pdev); | ||
| 1837 | |||
| 1838 | retval = pci_enable_device(pdev); | ||
| 1839 | if (retval) { | ||
| 1840 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume"); | ||
| 1841 | return retval; | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | pci_set_master(pdev); | ||
| 1845 | pci_try_set_mwi(pdev); | ||
| 1846 | |||
| 1847 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) | ||
| 1848 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
| 1849 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) | ||
| 1850 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
| 1851 | TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); | ||
| 1852 | retval = -ENODEV; | ||
| 1853 | goto out_disable_device; | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | /* Initialize the card */ | ||
| 1857 | if (twl_reset_sequence(tw_dev, 0)) { | ||
| 1858 | retval = -ENODEV; | ||
| 1859 | goto out_disable_device; | ||
| 1860 | } | ||
| 1861 | |||
| 1862 | /* Now setup the interrupt handler */ | ||
| 1863 | retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); | ||
| 1864 | if (retval) { | ||
| 1865 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume"); | ||
| 1866 | retval = -ENODEV; | ||
| 1867 | goto out_disable_device; | ||
| 1868 | } | ||
| 1869 | |||
| 1870 | /* Now enable MSI if enabled */ | ||
| 1871 | if (test_bit(TW_USING_MSI, &tw_dev->flags)) | ||
| 1872 | pci_enable_msi(pdev); | ||
| 1873 | |||
| 1874 | /* Re-enable interrupts on the card */ | ||
| 1875 | TWL_UNMASK_INTERRUPTS(tw_dev); | ||
| 1876 | |||
| 1877 | printk(KERN_WARNING "3w-sas: Resume complete.\n"); | ||
| 1878 | return 0; | ||
| 1879 | |||
| 1880 | out_disable_device: | ||
| 1881 | scsi_remove_host(host); | ||
| 1882 | pci_disable_device(pdev); | ||
| 1883 | |||
| 1884 | return retval; | ||
| 1885 | } /* End twl_resume() */ | ||
| 1886 | #endif | ||
| 1887 | |||
| 1888 | /* PCI Devices supported by this driver */ | ||
| 1889 | static struct pci_device_id twl_pci_tbl[] __devinitdata = { | ||
| 1890 | { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) }, | ||
| 1891 | { } | ||
| 1892 | }; | ||
| 1893 | MODULE_DEVICE_TABLE(pci, twl_pci_tbl); | ||
| 1894 | |||
| 1895 | /* pci_driver initializer */ | ||
| 1896 | static struct pci_driver twl_driver = { | ||
| 1897 | .name = "3w-sas", | ||
| 1898 | .id_table = twl_pci_tbl, | ||
| 1899 | .probe = twl_probe, | ||
| 1900 | .remove = twl_remove, | ||
| 1901 | #ifdef CONFIG_PM | ||
| 1902 | .suspend = twl_suspend, | ||
| 1903 | .resume = twl_resume, | ||
| 1904 | #endif | ||
| 1905 | .shutdown = twl_shutdown | ||
| 1906 | }; | ||
| 1907 | |||
| 1908 | /* This function is called on driver initialization */ | ||
| 1909 | static int __init twl_init(void) | ||
| 1910 | { | ||
| 1911 | printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); | ||
| 1912 | |||
| 1913 | return pci_register_driver(&twl_driver); | ||
| 1914 | } /* End twl_init() */ | ||
| 1915 | |||
| 1916 | /* This function is called on driver exit */ | ||
| 1917 | static void __exit twl_exit(void) | ||
| 1918 | { | ||
| 1919 | pci_unregister_driver(&twl_driver); | ||
| 1920 | } /* End twl_exit() */ | ||
| 1921 | |||
| 1922 | module_init(twl_init); | ||
| 1923 | module_exit(twl_exit); | ||
| 1924 | |||
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h new file mode 100644 index 000000000000..d474892701d4 --- /dev/null +++ b/drivers/scsi/3w-sas.h | |||
| @@ -0,0 +1,396 @@ | |||
| 1 | /* | ||
| 2 | 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. | ||
| 3 | |||
| 4 | Written By: Adam Radford <linuxraid@lsi.com> | ||
| 5 | |||
| 6 | Copyright (C) 2009 LSI Corporation. | ||
| 7 | |||
| 8 | This program is free software; you can redistribute it and/or modify | ||
| 9 | it under the terms of the GNU General Public License as published by | ||
| 10 | the Free Software Foundation; version 2 of the License. | ||
| 11 | |||
| 12 | This program is distributed in the hope that it will be useful, | ||
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | GNU General Public License for more details. | ||
| 16 | |||
| 17 | NO WARRANTY | ||
| 18 | THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 19 | CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
| 20 | LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
| 21 | MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
| 22 | solely responsible for determining the appropriateness of using and | ||
| 23 | distributing the Program and assumes all risks associated with its | ||
| 24 | exercise of rights under this Agreement, including but not limited to | ||
| 25 | the risks and costs of program errors, damage to or loss of data, | ||
| 26 | programs or equipment, and unavailability or interruption of operations. | ||
| 27 | |||
| 28 | DISCLAIMER OF LIABILITY | ||
| 29 | NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
| 30 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 31 | DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
| 32 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
| 33 | TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
| 34 | USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
| 35 | HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
| 36 | |||
| 37 | You should have received a copy of the GNU General Public License | ||
| 38 | along with this program; if not, write to the Free Software | ||
| 39 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 40 | |||
| 41 | Bugs/Comments/Suggestions should be mailed to: | ||
| 42 | linuxraid@lsi.com | ||
| 43 | |||
| 44 | For more information, goto: | ||
| 45 | http://www.lsi.com | ||
| 46 | */ | ||
| 47 | |||
| 48 | #ifndef _3W_SAS_H | ||
| 49 | #define _3W_SAS_H | ||
| 50 | |||
| 51 | /* AEN severity table */ | ||
| 52 | static char *twl_aen_severity_table[] = | ||
| 53 | { | ||
| 54 | "None", "ERROR", "WARNING", "INFO", "DEBUG", NULL | ||
| 55 | }; | ||
| 56 | |||
| 57 | /* Liberator register offsets */ | ||
| 58 | #define TWL_STATUS 0x0 /* Status */ | ||
| 59 | #define TWL_HIBDB 0x20 /* Inbound doorbell */ | ||
| 60 | #define TWL_HISTAT 0x30 /* Host interrupt status */ | ||
| 61 | #define TWL_HIMASK 0x34 /* Host interrupt mask */ | ||
| 62 | #define TWL_HOBDB 0x9C /* Outbound doorbell */ | ||
| 63 | #define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */ | ||
| 64 | #define TWL_SCRPD3 0xBC /* Scratchpad */ | ||
| 65 | #define TWL_HIBQPL 0xC0 /* Host inbound Q low */ | ||
| 66 | #define TWL_HIBQPH 0xC4 /* Host inbound Q high */ | ||
| 67 | #define TWL_HOBQPL 0xC8 /* Host outbound Q low */ | ||
| 68 | #define TWL_HOBQPH 0xCC /* Host outbound Q high */ | ||
| 69 | #define TWL_HISTATUS_VALID_INTERRUPT 0xC | ||
| 70 | #define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4 | ||
| 71 | #define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8 | ||
| 72 | #define TWL_STATUS_OVERRUN_SUBMIT 0x2000 | ||
| 73 | #define TWL_ISSUE_SOFT_RESET 0x100 | ||
| 74 | #define TWL_CONTROLLER_READY 0x2000 | ||
| 75 | #define TWL_DOORBELL_CONTROLLER_ERROR 0x200000 | ||
| 76 | #define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000 | ||
| 77 | #define TWL_PULL_MODE 0x1 | ||
| 78 | |||
| 79 | /* Command packet opcodes used by the driver */ | ||
| 80 | #define TW_OP_INIT_CONNECTION 0x1 | ||
| 81 | #define TW_OP_GET_PARAM 0x12 | ||
| 82 | #define TW_OP_SET_PARAM 0x13 | ||
| 83 | #define TW_OP_EXECUTE_SCSI 0x10 | ||
| 84 | |||
| 85 | /* Asynchronous Event Notification (AEN) codes used by the driver */ | ||
| 86 | #define TW_AEN_QUEUE_EMPTY 0x0000 | ||
| 87 | #define TW_AEN_SOFT_RESET 0x0001 | ||
| 88 | #define TW_AEN_SYNC_TIME_WITH_HOST 0x031 | ||
| 89 | #define TW_AEN_SEVERITY_ERROR 0x1 | ||
| 90 | #define TW_AEN_SEVERITY_DEBUG 0x4 | ||
| 91 | #define TW_AEN_NOT_RETRIEVED 0x1 | ||
| 92 | |||
| 93 | /* Command state defines */ | ||
| 94 | #define TW_S_INITIAL 0x1 /* Initial state */ | ||
| 95 | #define TW_S_STARTED 0x2 /* Id in use */ | ||
| 96 | #define TW_S_POSTED 0x4 /* Posted to the controller */ | ||
| 97 | #define TW_S_COMPLETED 0x8 /* Completed by isr */ | ||
| 98 | #define TW_S_FINISHED 0x10 /* I/O completely done */ | ||
| 99 | |||
| 100 | /* Compatibility defines */ | ||
| 101 | #define TW_9750_ARCH_ID 10 | ||
| 102 | #define TW_CURRENT_DRIVER_SRL 40 | ||
| 103 | #define TW_CURRENT_DRIVER_BUILD 0 | ||
| 104 | #define TW_CURRENT_DRIVER_BRANCH 0 | ||
| 105 | |||
| 106 | /* Phase defines */ | ||
| 107 | #define TW_PHASE_INITIAL 0 | ||
| 108 | #define TW_PHASE_SGLIST 2 | ||
| 109 | |||
| 110 | /* Misc defines */ | ||
| 111 | #define TW_SECTOR_SIZE 512 | ||
| 112 | #define TW_MAX_UNITS 32 | ||
| 113 | #define TW_INIT_MESSAGE_CREDITS 0x100 | ||
| 114 | #define TW_INIT_COMMAND_PACKET_SIZE 0x3 | ||
| 115 | #define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 | ||
| 116 | #define TW_EXTENDED_INIT_CONNECT 0x2 | ||
| 117 | #define TW_BASE_FW_SRL 24 | ||
| 118 | #define TW_BASE_FW_BRANCH 0 | ||
| 119 | #define TW_BASE_FW_BUILD 1 | ||
| 120 | #define TW_Q_LENGTH 256 | ||
| 121 | #define TW_Q_START 0 | ||
| 122 | #define TW_MAX_SLOT 32 | ||
| 123 | #define TW_MAX_RESET_TRIES 2 | ||
| 124 | #define TW_MAX_CMDS_PER_LUN 254 | ||
| 125 | #define TW_MAX_AEN_DRAIN 255 | ||
| 126 | #define TW_IN_RESET 2 | ||
| 127 | #define TW_USING_MSI 3 | ||
| 128 | #define TW_IN_ATTENTION_LOOP 4 | ||
| 129 | #define TW_MAX_SECTORS 256 | ||
| 130 | #define TW_MAX_CDB_LEN 16 | ||
| 131 | #define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ | ||
| 132 | #define TW_IOCTL_CHRDEV_FREE -1 | ||
| 133 | #define TW_COMMAND_OFFSET 128 /* 128 bytes */ | ||
| 134 | #define TW_VERSION_TABLE 0x0402 | ||
| 135 | #define TW_TIMEKEEP_TABLE 0x040A | ||
| 136 | #define TW_INFORMATION_TABLE 0x0403 | ||
| 137 | #define TW_PARAM_FWVER 3 | ||
| 138 | #define TW_PARAM_FWVER_LENGTH 16 | ||
| 139 | #define TW_PARAM_BIOSVER 4 | ||
| 140 | #define TW_PARAM_BIOSVER_LENGTH 16 | ||
| 141 | #define TW_PARAM_MODEL 8 | ||
| 142 | #define TW_PARAM_MODEL_LENGTH 16 | ||
| 143 | #define TW_PARAM_PHY_SUMMARY_TABLE 1 | ||
| 144 | #define TW_PARAM_PHYCOUNT 2 | ||
| 145 | #define TW_PARAM_PHYCOUNT_LENGTH 1 | ||
| 146 | #define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools | ||
| 147 | #define TW_ALLOCATION_LENGTH 128 | ||
| 148 | #define TW_SENSE_DATA_LENGTH 18 | ||
| 149 | #define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a | ||
| 150 | #define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d | ||
| 151 | #define TW_ERROR_UNIT_OFFLINE 0x128 | ||
| 152 | #define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 | ||
| 153 | #define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 | ||
| 154 | #define TW_DRIVER 6 | ||
| 155 | #ifndef PCI_DEVICE_ID_3WARE_9750 | ||
| 156 | #define PCI_DEVICE_ID_3WARE_9750 0x1010 | ||
| 157 | #endif | ||
| 158 | |||
| 159 | /* Bitmask macros to eliminate bitfields */ | ||
| 160 | |||
| 161 | /* opcode: 5, reserved: 3 */ | ||
| 162 | #define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) | ||
| 163 | #define TW_OP_OUT(x) (x & 0x1f) | ||
| 164 | |||
| 165 | /* opcode: 5, sgloffset: 3 */ | ||
| 166 | #define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) | ||
| 167 | #define TW_SGL_OUT(x) ((x >> 5) & 0x7) | ||
| 168 | |||
| 169 | /* severity: 3, reserved: 5 */ | ||
| 170 | #define TW_SEV_OUT(x) (x & 0x7) | ||
| 171 | |||
| 172 | /* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */ | ||
| 173 | #define TW_RESID_OUT(x) ((x >> 16) & 0xffff) | ||
| 174 | #define TW_NOTMFA_OUT(x) (x & 0x1) | ||
| 175 | |||
| 176 | /* request_id: 12, lun: 4 */ | ||
| 177 | #define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff)) | ||
| 178 | #define TW_LUN_OUT(lun) ((lun >> 12) & 0xf) | ||
| 179 | |||
| 180 | /* Register access macros */ | ||
| 181 | #define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS) | ||
| 182 | #define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL) | ||
| 183 | #define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH) | ||
| 184 | #define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB) | ||
| 185 | #define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC) | ||
| 186 | #define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK) | ||
| 187 | #define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT) | ||
| 188 | #define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH) | ||
| 189 | #define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL) | ||
| 190 | #define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB) | ||
| 191 | #define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3) | ||
| 192 | #define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev))) | ||
| 193 | #define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev))) | ||
| 194 | #define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev))) | ||
| 195 | #define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev))) | ||
| 196 | |||
| 197 | /* Macros */ | ||
| 198 | #define TW_PRINTK(h,a,b,c) { \ | ||
| 199 | if (h) \ | ||
| 200 | printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ | ||
| 201 | else \ | ||
| 202 | printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ | ||
| 203 | } | ||
| 204 | #define TW_MAX_LUNS 16 | ||
| 205 | #define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4) | ||
| 206 | #define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92) | ||
| 207 | #define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94) | ||
| 208 | #define TW_PADDING_LENGTH_LIBERATOR 136 | ||
| 209 | #define TW_PADDING_LENGTH_LIBERATOR_OLD 132 | ||
| 210 | #define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x)) | ||
| 211 | |||
| 212 | #pragma pack(1) | ||
| 213 | |||
| 214 | /* SGL entry */ | ||
| 215 | typedef struct TAG_TW_SG_Entry_ISO { | ||
| 216 | dma_addr_t address; | ||
| 217 | dma_addr_t length; | ||
| 218 | } TW_SG_Entry_ISO; | ||
| 219 | |||
| 220 | /* Old Command Packet with ISO SGL */ | ||
| 221 | typedef struct TW_Command { | ||
| 222 | unsigned char opcode__sgloffset; | ||
| 223 | unsigned char size; | ||
| 224 | unsigned char request_id; | ||
| 225 | unsigned char unit__hostid; | ||
| 226 | /* Second DWORD */ | ||
| 227 | unsigned char status; | ||
| 228 | unsigned char flags; | ||
| 229 | union { | ||
| 230 | unsigned short block_count; | ||
| 231 | unsigned short parameter_count; | ||
| 232 | } byte6_offset; | ||
| 233 | union { | ||
| 234 | struct { | ||
| 235 | u32 lba; | ||
| 236 | TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; | ||
| 237 | unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD]; | ||
| 238 | } io; | ||
| 239 | struct { | ||
| 240 | TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; | ||
| 241 | u32 padding; | ||
| 242 | unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD]; | ||
| 243 | } param; | ||
| 244 | } byte8_offset; | ||
| 245 | } TW_Command; | ||
| 246 | |||
| 247 | /* New Command Packet with ISO SGL */ | ||
| 248 | typedef struct TAG_TW_Command_Apache { | ||
| 249 | unsigned char opcode__reserved; | ||
| 250 | unsigned char unit; | ||
| 251 | unsigned short request_id__lunl; | ||
| 252 | unsigned char status; | ||
| 253 | unsigned char sgl_offset; | ||
| 254 | unsigned short sgl_entries__lunh; | ||
| 255 | unsigned char cdb[16]; | ||
| 256 | TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH]; | ||
| 257 | unsigned char padding[TW_PADDING_LENGTH_LIBERATOR]; | ||
| 258 | } TW_Command_Apache; | ||
| 259 | |||
| 260 | /* New command packet header */ | ||
| 261 | typedef struct TAG_TW_Command_Apache_Header { | ||
| 262 | unsigned char sense_data[TW_SENSE_DATA_LENGTH]; | ||
| 263 | struct { | ||
| 264 | char reserved[4]; | ||
| 265 | unsigned short error; | ||
| 266 | unsigned char padding; | ||
| 267 | unsigned char severity__reserved; | ||
| 268 | } status_block; | ||
| 269 | unsigned char err_specific_desc[98]; | ||
| 270 | struct { | ||
| 271 | unsigned char size_header; | ||
| 272 | unsigned short request_id; | ||
| 273 | unsigned char size_sense; | ||
| 274 | } header_desc; | ||
| 275 | } TW_Command_Apache_Header; | ||
| 276 | |||
| 277 | /* This struct is a union of the 2 command packets */ | ||
| 278 | typedef struct TAG_TW_Command_Full { | ||
| 279 | TW_Command_Apache_Header header; | ||
| 280 | union { | ||
| 281 | TW_Command oldcommand; | ||
| 282 | TW_Command_Apache newcommand; | ||
| 283 | } command; | ||
| 284 | } TW_Command_Full; | ||
| 285 | |||
| 286 | /* Initconnection structure */ | ||
| 287 | typedef struct TAG_TW_Initconnect { | ||
| 288 | unsigned char opcode__reserved; | ||
| 289 | unsigned char size; | ||
| 290 | unsigned char request_id; | ||
| 291 | unsigned char res2; | ||
| 292 | unsigned char status; | ||
| 293 | unsigned char flags; | ||
| 294 | unsigned short message_credits; | ||
| 295 | u32 features; | ||
| 296 | unsigned short fw_srl; | ||
| 297 | unsigned short fw_arch_id; | ||
| 298 | unsigned short fw_branch; | ||
| 299 | unsigned short fw_build; | ||
| 300 | u32 result; | ||
| 301 | } TW_Initconnect; | ||
| 302 | |||
| 303 | /* Event info structure */ | ||
| 304 | typedef struct TAG_TW_Event | ||
| 305 | { | ||
| 306 | unsigned int sequence_id; | ||
| 307 | unsigned int time_stamp_sec; | ||
| 308 | unsigned short aen_code; | ||
| 309 | unsigned char severity; | ||
| 310 | unsigned char retrieved; | ||
| 311 | unsigned char repeat_count; | ||
| 312 | unsigned char parameter_len; | ||
| 313 | unsigned char parameter_data[98]; | ||
| 314 | } TW_Event; | ||
| 315 | |||
| 316 | typedef struct TAG_TW_Ioctl_Driver_Command { | ||
| 317 | unsigned int control_code; | ||
| 318 | unsigned int status; | ||
| 319 | unsigned int unique_id; | ||
| 320 | unsigned int sequence_id; | ||
| 321 | unsigned int os_specific; | ||
| 322 | unsigned int buffer_length; | ||
| 323 | } TW_Ioctl_Driver_Command; | ||
| 324 | |||
| 325 | typedef struct TAG_TW_Ioctl_Apache { | ||
| 326 | TW_Ioctl_Driver_Command driver_command; | ||
| 327 | char padding[488]; | ||
| 328 | TW_Command_Full firmware_command; | ||
| 329 | char data_buffer[1]; | ||
| 330 | } TW_Ioctl_Buf_Apache; | ||
| 331 | |||
| 332 | /* GetParam descriptor */ | ||
| 333 | typedef struct { | ||
| 334 | unsigned short table_id; | ||
| 335 | unsigned short parameter_id; | ||
| 336 | unsigned short parameter_size_bytes; | ||
| 337 | unsigned short actual_parameter_size_bytes; | ||
| 338 | unsigned char data[1]; | ||
| 339 | } TW_Param_Apache; | ||
| 340 | |||
| 341 | /* Compatibility information structure */ | ||
| 342 | typedef struct TAG_TW_Compatibility_Info | ||
| 343 | { | ||
| 344 | char driver_version[32]; | ||
| 345 | unsigned short working_srl; | ||
| 346 | unsigned short working_branch; | ||
| 347 | unsigned short working_build; | ||
| 348 | unsigned short driver_srl_high; | ||
| 349 | unsigned short driver_branch_high; | ||
| 350 | unsigned short driver_build_high; | ||
| 351 | unsigned short driver_srl_low; | ||
| 352 | unsigned short driver_branch_low; | ||
| 353 | unsigned short driver_build_low; | ||
| 354 | unsigned short fw_on_ctlr_srl; | ||
| 355 | unsigned short fw_on_ctlr_branch; | ||
| 356 | unsigned short fw_on_ctlr_build; | ||
| 357 | } TW_Compatibility_Info; | ||
| 358 | |||
| 359 | #pragma pack() | ||
| 360 | |||
| 361 | typedef struct TAG_TW_Device_Extension { | ||
| 362 | void __iomem *base_addr; | ||
| 363 | unsigned long *generic_buffer_virt[TW_Q_LENGTH]; | ||
| 364 | dma_addr_t generic_buffer_phys[TW_Q_LENGTH]; | ||
| 365 | TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; | ||
| 366 | dma_addr_t command_packet_phys[TW_Q_LENGTH]; | ||
| 367 | TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH]; | ||
| 368 | dma_addr_t sense_buffer_phys[TW_Q_LENGTH]; | ||
| 369 | struct pci_dev *tw_pci_dev; | ||
| 370 | struct scsi_cmnd *srb[TW_Q_LENGTH]; | ||
| 371 | unsigned char free_queue[TW_Q_LENGTH]; | ||
| 372 | unsigned char free_head; | ||
| 373 | unsigned char free_tail; | ||
| 374 | int state[TW_Q_LENGTH]; | ||
| 375 | unsigned int posted_request_count; | ||
| 376 | unsigned int max_posted_request_count; | ||
| 377 | unsigned int max_sgl_entries; | ||
| 378 | unsigned int sgl_entries; | ||
| 379 | unsigned int num_resets; | ||
| 380 | unsigned int sector_count; | ||
| 381 | unsigned int max_sector_count; | ||
| 382 | unsigned int aen_count; | ||
| 383 | struct Scsi_Host *host; | ||
| 384 | long flags; | ||
| 385 | TW_Event *event_queue[TW_Q_LENGTH]; | ||
| 386 | unsigned char error_index; | ||
| 387 | unsigned int error_sequence_id; | ||
| 388 | int chrdev_request_id; | ||
| 389 | wait_queue_head_t ioctl_wqueue; | ||
| 390 | struct mutex ioctl_lock; | ||
| 391 | TW_Compatibility_Info tw_compat_info; | ||
| 392 | char online; | ||
| 393 | } TW_Device_Extension; | ||
| 394 | |||
| 395 | #endif /* _3W_SAS_H */ | ||
| 396 | |||
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index faa0fcfed71e..f65a1e92340c 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | Copyright (C) 1999-2009 3ware Inc. | 9 | Copyright (C) 1999-2009 3ware Inc. |
| 10 | 10 | ||
| 11 | Kernel compatiblity By: Andre Hedrick <andre@suse.com> | 11 | Kernel compatibility By: Andre Hedrick <andre@suse.com> |
| 12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> | 12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> |
| 13 | 13 | ||
| 14 | Further tiny build fixes and trivial hoovering Alan Cox | 14 | Further tiny build fixes and trivial hoovering Alan Cox |
| @@ -521,8 +521,12 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr, | |||
| 521 | } /* End tw_show_stats() */ | 521 | } /* End tw_show_stats() */ |
| 522 | 522 | ||
| 523 | /* This function will set a devices queue depth */ | 523 | /* This function will set a devices queue depth */ |
| 524 | static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth) | 524 | static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth, |
| 525 | int reason) | ||
| 525 | { | 526 | { |
| 527 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 528 | return -EOPNOTSUPP; | ||
| 529 | |||
| 526 | if (queue_depth > TW_Q_LENGTH-2) | 530 | if (queue_depth > TW_Q_LENGTH-2) |
| 527 | queue_depth = TW_Q_LENGTH-2; | 531 | queue_depth = TW_Q_LENGTH-2; |
| 528 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | 532 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index f5a9addb7050..9f4a911a6d8c 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
| @@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host); | |||
| 175 | STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); | 175 | STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); |
| 176 | STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); | 176 | STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); |
| 177 | STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); | 177 | STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); |
| 178 | static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); | 178 | static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason); |
| 179 | static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); | 179 | static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); |
| 180 | 180 | ||
| 181 | STATIC struct device_attribute *NCR_700_dev_attrs[]; | 181 | STATIC struct device_attribute *NCR_700_dev_attrs[]; |
| @@ -1491,7 +1491,7 @@ NCR_700_intr(int irq, void *dev_id) | |||
| 1491 | unsigned long flags; | 1491 | unsigned long flags; |
| 1492 | int handled = 0; | 1492 | int handled = 0; |
| 1493 | 1493 | ||
| 1494 | /* Use the host lock to serialise acess to the 53c700 | 1494 | /* Use the host lock to serialise access to the 53c700 |
| 1495 | * hardware. Note: In future, we may need to take the queue | 1495 | * hardware. Note: In future, we may need to take the queue |
| 1496 | * lock to enter the done routines. When that happens, we | 1496 | * lock to enter the done routines. When that happens, we |
| 1497 | * need to ensure that for this driver, the host lock and the | 1497 | * need to ensure that for this driver, the host lock and the |
| @@ -2082,8 +2082,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp) | |||
| 2082 | } | 2082 | } |
| 2083 | 2083 | ||
| 2084 | static int | 2084 | static int |
| 2085 | NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) | 2085 | NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason) |
| 2086 | { | 2086 | { |
| 2087 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 2088 | return -EOPNOTSUPP; | ||
| 2089 | |||
| 2087 | if (depth > NCR_700_MAX_TAGS) | 2090 | if (depth > NCR_700_MAX_TAGS) |
| 2088 | depth = NCR_700_MAX_TAGS; | 2091 | depth = NCR_700_MAX_TAGS; |
| 2089 | 2092 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e11cca4c784c..36900c71a592 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -399,6 +399,17 @@ config SCSI_3W_9XXX | |||
| 399 | Please read the comments at the top of | 399 | Please read the comments at the top of |
| 400 | <file:drivers/scsi/3w-9xxx.c>. | 400 | <file:drivers/scsi/3w-9xxx.c>. |
| 401 | 401 | ||
| 402 | config SCSI_3W_SAS | ||
| 403 | tristate "3ware 97xx SAS/SATA-RAID support" | ||
| 404 | depends on PCI && SCSI | ||
| 405 | help | ||
| 406 | This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards. | ||
| 407 | |||
| 408 | <http://www.lsi.com> | ||
| 409 | |||
| 410 | Please read the comments at the top of | ||
| 411 | <file:drivers/scsi/3w-sas.c>. | ||
| 412 | |||
| 402 | config SCSI_7000FASST | 413 | config SCSI_7000FASST |
| 403 | tristate "7000FASST SCSI support" | 414 | tristate "7000FASST SCSI support" |
| 404 | depends on ISA && SCSI && ISA_DMA_API | 415 | depends on ISA && SCSI && ISA_DMA_API |
| @@ -621,6 +632,14 @@ config SCSI_FLASHPOINT | |||
| 621 | substantial, so users of MultiMaster Host Adapters may not | 632 | substantial, so users of MultiMaster Host Adapters may not |
| 622 | wish to include it. | 633 | wish to include it. |
| 623 | 634 | ||
| 635 | config VMWARE_PVSCSI | ||
| 636 | tristate "VMware PVSCSI driver support" | ||
| 637 | depends on PCI && SCSI && X86 | ||
| 638 | help | ||
| 639 | This driver supports VMware's para virtualized SCSI HBA. | ||
| 640 | To compile this driver as a module, choose M here: the | ||
| 641 | module will be called vmw_pvscsi. | ||
| 642 | |||
| 624 | config LIBFC | 643 | config LIBFC |
| 625 | tristate "LibFC module" | 644 | tristate "LibFC module" |
| 626 | select SCSI_FC_ATTRS | 645 | select SCSI_FC_ATTRS |
| @@ -644,7 +663,7 @@ config FCOE | |||
| 644 | config FCOE_FNIC | 663 | config FCOE_FNIC |
| 645 | tristate "Cisco FNIC Driver" | 664 | tristate "Cisco FNIC Driver" |
| 646 | depends on PCI && X86 | 665 | depends on PCI && X86 |
| 647 | select LIBFC | 666 | select LIBFCOE |
| 648 | help | 667 | help |
| 649 | This is support for the Cisco PCI-Express FCoE HBA. | 668 | This is support for the Cisco PCI-Express FCoE HBA. |
| 650 | 669 | ||
| @@ -1818,6 +1837,14 @@ config SCSI_PMCRAID | |||
| 1818 | ---help--- | 1837 | ---help--- |
| 1819 | This driver supports the PMC SIERRA MaxRAID adapters. | 1838 | This driver supports the PMC SIERRA MaxRAID adapters. |
| 1820 | 1839 | ||
| 1840 | config SCSI_PM8001 | ||
| 1841 | tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver" | ||
| 1842 | depends on PCI && SCSI | ||
| 1843 | select SCSI_SAS_LIBSAS | ||
| 1844 | help | ||
| 1845 | This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip | ||
| 1846 | based host adapters. | ||
| 1847 | |||
| 1821 | config SCSI_SRP | 1848 | config SCSI_SRP |
| 1822 | tristate "SCSI RDMA Protocol helper library" | 1849 | tristate "SCSI RDMA Protocol helper library" |
| 1823 | depends on SCSI && PCI | 1850 | depends on SCSI && PCI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3ad61db5e3fa..280d3c657d60 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
| @@ -70,6 +70,7 @@ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ | |||
| 70 | obj-$(CONFIG_SCSI_AACRAID) += aacraid/ | 70 | obj-$(CONFIG_SCSI_AACRAID) += aacraid/ |
| 71 | obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o | 71 | obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o |
| 72 | obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ | 72 | obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ |
| 73 | obj-$(CONFIG_SCSI_PM8001) += pm8001/ | ||
| 73 | obj-$(CONFIG_SCSI_IPS) += ips.o | 74 | obj-$(CONFIG_SCSI_IPS) += ips.o |
| 74 | obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o | 75 | obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o |
| 75 | obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o | 76 | obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o |
| @@ -113,6 +114,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o | |||
| 113 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o | 114 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o |
| 114 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o | 115 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o |
| 115 | obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o | 116 | obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o |
| 117 | obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o | ||
| 116 | obj-$(CONFIG_SCSI_PPA) += ppa.o | 118 | obj-$(CONFIG_SCSI_PPA) += ppa.o |
| 117 | obj-$(CONFIG_SCSI_IMM) += imm.o | 119 | obj-$(CONFIG_SCSI_IMM) += imm.o |
| 118 | obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o | 120 | obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o |
| @@ -133,6 +135,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ | |||
| 133 | obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ | 135 | obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ |
| 134 | obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ | 136 | obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ |
| 135 | obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o | 137 | obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o |
| 138 | obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o | ||
| 136 | 139 | ||
| 137 | obj-$(CONFIG_ARM) += arm/ | 140 | obj-$(CONFIG_ARM) += arm/ |
| 138 | 141 | ||
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index cdbdec9f4fb2..83986ed86556 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -526,10 +526,10 @@ struct aac_driver_ident | |||
| 526 | 526 | ||
| 527 | /* | 527 | /* |
| 528 | * The adapter interface specs all queues to be located in the same | 528 | * The adapter interface specs all queues to be located in the same |
| 529 | * physically contigous block. The host structure that defines the | 529 | * physically contiguous block. The host structure that defines the |
| 530 | * commuication queues will assume they are each a separate physically | 530 | * commuication queues will assume they are each a separate physically |
| 531 | * contigous memory region that will support them all being one big | 531 | * contiguous memory region that will support them all being one big |
| 532 | * contigous block. | 532 | * contiguous block. |
| 533 | * There is a command and response queue for each level and direction of | 533 | * There is a command and response queue for each level and direction of |
| 534 | * commuication. These regions are accessed by both the host and adapter. | 534 | * commuication. These regions are accessed by both the host and adapter. |
| 535 | */ | 535 | */ |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index d598eba630d0..666d5151d628 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
| @@ -226,7 +226,7 @@ static int aac_comm_init(struct aac_dev * dev) | |||
| 226 | spin_lock_init(&dev->fib_lock); | 226 | spin_lock_init(&dev->fib_lock); |
| 227 | 227 | ||
| 228 | /* | 228 | /* |
| 229 | * Allocate the physically contigous space for the commuication | 229 | * Allocate the physically contiguous space for the commuication |
| 230 | * queue headers. | 230 | * queue headers. |
| 231 | */ | 231 | */ |
| 232 | 232 | ||
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 9b97c3e016fe..e9373a2d14fa 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
| @@ -472,8 +472,12 @@ static int aac_slave_configure(struct scsi_device *sdev) | |||
| 472 | * total capacity and the queue depth supported by the target device. | 472 | * total capacity and the queue depth supported by the target device. |
| 473 | */ | 473 | */ |
| 474 | 474 | ||
| 475 | static int aac_change_queue_depth(struct scsi_device *sdev, int depth) | 475 | static int aac_change_queue_depth(struct scsi_device *sdev, int depth, |
| 476 | int reason) | ||
| 476 | { | 477 | { |
| 478 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 479 | return -EOPNOTSUPP; | ||
| 480 | |||
| 477 | if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && | 481 | if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && |
| 478 | (sdev_channel(sdev) == CONTAINER_CHANNEL)) { | 482 | (sdev_channel(sdev) == CONTAINER_CHANNEL)) { |
| 479 | struct scsi_device * dev; | 483 | struct scsi_device * dev; |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index b756041f0b26..22626abdb630 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
| @@ -7969,7 +7969,7 @@ static int advansys_reset(struct scsi_cmnd *scp) | |||
| 7969 | ASC_DBG(1, "before AscInitAsc1000Driver()\n"); | 7969 | ASC_DBG(1, "before AscInitAsc1000Driver()\n"); |
| 7970 | status = AscInitAsc1000Driver(asc_dvc); | 7970 | status = AscInitAsc1000Driver(asc_dvc); |
| 7971 | 7971 | ||
| 7972 | /* Refer to ASC_IERR_* defintions for meaning of 'err_code'. */ | 7972 | /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ |
| 7973 | if (asc_dvc->err_code) { | 7973 | if (asc_dvc->err_code) { |
| 7974 | scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " | 7974 | scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " |
| 7975 | "0x%x\n", asc_dvc->err_code); | 7975 | "0x%x\n", asc_dvc->err_code); |
diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq index 58bc17591b54..2fb78e35a9e5 100644 --- a/drivers/scsi/aic7xxx/aic79xx.seq +++ b/drivers/scsi/aic7xxx/aic79xx.seq | |||
| @@ -217,7 +217,7 @@ BEGIN_CRITICAL; | |||
| 217 | scbdma_tohost_done: | 217 | scbdma_tohost_done: |
| 218 | test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; | 218 | test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; |
| 219 | /* | 219 | /* |
| 220 | * An SCB has been succesfully uploaded to the host. | 220 | * An SCB has been successfully uploaded to the host. |
| 221 | * If the SCB was uploaded for some reason other than | 221 | * If the SCB was uploaded for some reason other than |
| 222 | * bad SCSI status (currently only for underruns), we | 222 | * bad SCSI status (currently only for underruns), we |
| 223 | * queue the SCB for normal completion. Otherwise, we | 223 | * queue the SCB for normal completion. Otherwise, we |
| @@ -1281,7 +1281,7 @@ END_CRITICAL; | |||
| 1281 | * Is it a disconnect message? Set a flag in the SCB to remind us | 1281 | * Is it a disconnect message? Set a flag in the SCB to remind us |
| 1282 | * and await the bus going free. If this is an untagged transaction | 1282 | * and await the bus going free. If this is an untagged transaction |
| 1283 | * store the SCB id for it in our untagged target table for lookup on | 1283 | * store the SCB id for it in our untagged target table for lookup on |
| 1284 | * a reselction. | 1284 | * a reselection. |
| 1285 | */ | 1285 | */ |
| 1286 | mesgin_disconnect: | 1286 | mesgin_disconnect: |
| 1287 | /* | 1287 | /* |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 63b521d615f2..4d419c155ce9 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
| @@ -2487,7 +2487,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) | |||
| 2487 | /* | 2487 | /* |
| 2488 | * Although the driver does not care about the | 2488 | * Although the driver does not care about the |
| 2489 | * 'Selection in Progress' status bit, the busy | 2489 | * 'Selection in Progress' status bit, the busy |
| 2490 | * LED does. SELINGO is only cleared by a sucessfull | 2490 | * LED does. SELINGO is only cleared by a successfull |
| 2491 | * selection, so we must manually clear it to insure | 2491 | * selection, so we must manually clear it to insure |
| 2492 | * the LED turns off just incase no future successful | 2492 | * the LED turns off just incase no future successful |
| 2493 | * selections occur (e.g. no devices on the bus). | 2493 | * selections occur (e.g. no devices on the bus). |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 75b23317bd26..1222a7ac698a 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
| @@ -2335,7 +2335,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd) | |||
| 2335 | /* | 2335 | /* |
| 2336 | * The sequencer will never re-reference the | 2336 | * The sequencer will never re-reference the |
| 2337 | * in-core SCB. To make sure we are notified | 2337 | * in-core SCB. To make sure we are notified |
| 2338 | * during reslection, set the MK_MESSAGE flag in | 2338 | * during reselection, set the MK_MESSAGE flag in |
| 2339 | * the card's copy of the SCB. | 2339 | * the card's copy of the SCB. |
| 2340 | */ | 2340 | */ |
| 2341 | ahd_outb(ahd, SCB_CONTROL, | 2341 | ahd_outb(ahd, SCB_CONTROL, |
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq index 15196390e28d..5a4cfc954a9f 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.seq +++ b/drivers/scsi/aic7xxx/aic7xxx.seq | |||
| @@ -1693,7 +1693,7 @@ if ((ahc->flags & AHC_INITIATORROLE) != 0) { | |||
| 1693 | * Is it a disconnect message? Set a flag in the SCB to remind us | 1693 | * Is it a disconnect message? Set a flag in the SCB to remind us |
| 1694 | * and await the bus going free. If this is an untagged transaction | 1694 | * and await the bus going free. If this is an untagged transaction |
| 1695 | * store the SCB id for it in our untagged target table for lookup on | 1695 | * store the SCB id for it in our untagged target table for lookup on |
| 1696 | * a reselction. | 1696 | * a reselection. |
| 1697 | */ | 1697 | */ |
| 1698 | mesgin_disconnect: | 1698 | mesgin_disconnect: |
| 1699 | /* | 1699 | /* |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 8dfb59d58992..45aa728a76b2 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c | |||
| @@ -1733,7 +1733,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) | |||
| 1733 | /* | 1733 | /* |
| 1734 | * Although the driver does not care about the | 1734 | * Although the driver does not care about the |
| 1735 | * 'Selection in Progress' status bit, the busy | 1735 | * 'Selection in Progress' status bit, the busy |
| 1736 | * LED does. SELINGO is only cleared by a sucessfull | 1736 | * LED does. SELINGO is only cleared by a successfull |
| 1737 | * selection, so we must manually clear it to insure | 1737 | * selection, so we must manually clear it to insure |
| 1738 | * the LED turns off just incase no future successful | 1738 | * the LED turns off just incase no future successful |
| 1739 | * selections occur (e.g. no devices on the bus). | 1739 | * selections occur (e.g. no devices on the bus). |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index fd2b9785ff4f..8cb05dc8e6a1 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
| @@ -2290,7 +2290,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) | |||
| 2290 | * In the non-paging case, the sequencer will | 2290 | * In the non-paging case, the sequencer will |
| 2291 | * never re-reference the in-core SCB. | 2291 | * never re-reference the in-core SCB. |
| 2292 | * To make sure we are notified during | 2292 | * To make sure we are notified during |
| 2293 | * reslection, set the MK_MESSAGE flag in | 2293 | * reselection, set the MK_MESSAGE flag in |
| 2294 | * the card's copy of the SCB. | 2294 | * the card's copy of the SCB. |
| 2295 | */ | 2295 | */ |
| 2296 | if ((ahc->flags & AHC_PAGESCBS) == 0) { | 2296 | if ((ahc->flags & AHC_PAGESCBS) == 0) { |
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h index a43e8cdf4ee4..28aaf349c111 100644 --- a/drivers/scsi/aic94xx/aic94xx_reg_def.h +++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Aic94xx SAS/SATA driver hardware registers defintions. | 2 | * Aic94xx SAS/SATA driver hardware registers definitions. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2004 Adaptec, Inc. All rights reserved. | 4 | * Copyright (C) 2004 Adaptec, Inc. All rights reserved. |
| 5 | * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com> | 5 | * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com> |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 80aac01b5a6f..47d5d19f8c92 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
| @@ -98,8 +98,11 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); | |||
| 98 | static const char *arcmsr_info(struct Scsi_Host *); | 98 | static const char *arcmsr_info(struct Scsi_Host *); |
| 99 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); | 99 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); |
| 100 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, | 100 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, |
| 101 | int queue_depth) | 101 | int queue_depth, int reason) |
| 102 | { | 102 | { |
| 103 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 104 | return -EOPNOTSUPP; | ||
| 105 | |||
| 103 | if (queue_depth > ARCMSR_MAX_CMD_PERLUN) | 106 | if (queue_depth > ARCMSR_MAX_CMD_PERLUN) |
| 104 | queue_depth = ARCMSR_MAX_CMD_PERLUN; | 107 | queue_depth = ARCMSR_MAX_CMD_PERLUN; |
| 105 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | 108 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index b36020dcf012..a93a5040f087 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h | |||
| @@ -20,8 +20,10 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
| 22 | #include <linux/if_vlan.h> | 22 | #include <linux/if_vlan.h> |
| 23 | 23 | #include <linux/blk-iopoll.h> | |
| 24 | #define FW_VER_LEN 32 | 24 | #define FW_VER_LEN 32 |
| 25 | #define MCC_Q_LEN 128 | ||
| 26 | #define MCC_CQ_LEN 256 | ||
| 25 | 27 | ||
| 26 | struct be_dma_mem { | 28 | struct be_dma_mem { |
| 27 | void *va; | 29 | void *va; |
| @@ -74,18 +76,14 @@ static inline void queue_tail_inc(struct be_queue_info *q) | |||
| 74 | 76 | ||
| 75 | struct be_eq_obj { | 77 | struct be_eq_obj { |
| 76 | struct be_queue_info q; | 78 | struct be_queue_info q; |
| 77 | char desc[32]; | 79 | struct beiscsi_hba *phba; |
| 78 | 80 | struct be_queue_info *cq; | |
| 79 | /* Adaptive interrupt coalescing (AIC) info */ | 81 | struct blk_iopoll iopoll; |
| 80 | bool enable_aic; | ||
| 81 | u16 min_eqd; /* in usecs */ | ||
| 82 | u16 max_eqd; /* in usecs */ | ||
| 83 | u16 cur_eqd; /* in usecs */ | ||
| 84 | }; | 82 | }; |
| 85 | 83 | ||
| 86 | struct be_mcc_obj { | 84 | struct be_mcc_obj { |
| 87 | struct be_queue_info *q; | 85 | struct be_queue_info q; |
| 88 | struct be_queue_info *cq; | 86 | struct be_queue_info cq; |
| 89 | }; | 87 | }; |
| 90 | 88 | ||
| 91 | struct be_ctrl_info { | 89 | struct be_ctrl_info { |
| @@ -176,8 +174,4 @@ static inline void swap_dws(void *wrb, int len) | |||
| 176 | } while (len); | 174 | } while (len); |
| 177 | #endif /* __BIG_ENDIAN */ | 175 | #endif /* __BIG_ENDIAN */ |
| 178 | } | 176 | } |
| 179 | |||
| 180 | extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, | ||
| 181 | u16 num_popped); | ||
| 182 | |||
| 183 | #endif /* BEISCSI_H */ | 177 | #endif /* BEISCSI_H */ |
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 08007b6e42df..698a527d6cca 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c | |||
| @@ -19,6 +19,16 @@ | |||
| 19 | #include "be_mgmt.h" | 19 | #include "be_mgmt.h" |
| 20 | #include "be_main.h" | 20 | #include "be_main.h" |
| 21 | 21 | ||
| 22 | static void be_mcc_notify(struct beiscsi_hba *phba) | ||
| 23 | { | ||
| 24 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; | ||
| 25 | u32 val = 0; | ||
| 26 | |||
| 27 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | ||
| 28 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | ||
| 29 | iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); | ||
| 30 | } | ||
| 31 | |||
| 22 | static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) | 32 | static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) |
| 23 | { | 33 | { |
| 24 | if (compl->flags != 0) { | 34 | if (compl->flags != 0) { |
| @@ -54,13 +64,56 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, | |||
| 54 | return 0; | 64 | return 0; |
| 55 | } | 65 | } |
| 56 | 66 | ||
| 67 | |||
| 57 | static inline bool is_link_state_evt(u32 trailer) | 68 | static inline bool is_link_state_evt(u32 trailer) |
| 58 | { | 69 | { |
| 59 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | 70 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & |
| 60 | ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE); | 71 | ASYNC_TRAILER_EVENT_CODE_MASK) == |
| 72 | ASYNC_EVENT_CODE_LINK_STATE); | ||
| 73 | } | ||
| 74 | |||
| 75 | static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) | ||
| 76 | { | ||
| 77 | struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq; | ||
| 78 | struct be_mcc_compl *compl = queue_tail_node(mcc_cq); | ||
| 79 | |||
| 80 | if (be_mcc_compl_is_new(compl)) { | ||
| 81 | queue_tail_inc(mcc_cq); | ||
| 82 | return compl; | ||
| 83 | } | ||
| 84 | return NULL; | ||
| 85 | } | ||
| 86 | |||
| 87 | static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) | ||
| 88 | { | ||
| 89 | iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); | ||
| 90 | } | ||
| 91 | |||
| 92 | static void beiscsi_async_link_state_process(struct beiscsi_hba *phba, | ||
| 93 | struct be_async_event_link_state *evt) | ||
| 94 | { | ||
| 95 | switch (evt->port_link_status) { | ||
| 96 | case ASYNC_EVENT_LINK_DOWN: | ||
| 97 | SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", | ||
| 98 | evt->physical_port); | ||
| 99 | phba->state |= BE_ADAPTER_LINK_DOWN; | ||
| 100 | break; | ||
| 101 | case ASYNC_EVENT_LINK_UP: | ||
| 102 | phba->state = BE_ADAPTER_UP; | ||
| 103 | SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", | ||
| 104 | evt->physical_port); | ||
| 105 | iscsi_host_for_each_session(phba->shost, | ||
| 106 | be2iscsi_fail_session); | ||
| 107 | break; | ||
| 108 | default: | ||
| 109 | SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" | ||
| 110 | "Physical Port %d \n", | ||
| 111 | evt->port_link_status, | ||
| 112 | evt->physical_port); | ||
| 113 | } | ||
| 61 | } | 114 | } |
| 62 | 115 | ||
| 63 | void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, | 116 | static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, |
| 64 | u16 num_popped) | 117 | u16 num_popped) |
| 65 | { | 118 | { |
| 66 | u32 val = 0; | 119 | u32 val = 0; |
| @@ -68,7 +121,66 @@ void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, | |||
| 68 | if (arm) | 121 | if (arm) |
| 69 | val |= 1 << DB_CQ_REARM_SHIFT; | 122 | val |= 1 << DB_CQ_REARM_SHIFT; |
| 70 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; | 123 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; |
| 71 | iowrite32(val, ctrl->db + DB_CQ_OFFSET); | 124 | iowrite32(val, phba->db_va + DB_CQ_OFFSET); |
| 125 | } | ||
| 126 | |||
| 127 | |||
| 128 | int beiscsi_process_mcc(struct beiscsi_hba *phba) | ||
| 129 | { | ||
| 130 | struct be_mcc_compl *compl; | ||
| 131 | int num = 0, status = 0; | ||
| 132 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 133 | |||
| 134 | spin_lock_bh(&phba->ctrl.mcc_cq_lock); | ||
| 135 | while ((compl = be_mcc_compl_get(phba))) { | ||
| 136 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | ||
| 137 | /* Interpret flags as an async trailer */ | ||
| 138 | BUG_ON(!is_link_state_evt(compl->flags)); | ||
| 139 | |||
| 140 | /* Interpret compl as a async link evt */ | ||
| 141 | beiscsi_async_link_state_process(phba, | ||
| 142 | (struct be_async_event_link_state *) compl); | ||
| 143 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { | ||
| 144 | status = be_mcc_compl_process(ctrl, compl); | ||
| 145 | atomic_dec(&phba->ctrl.mcc_obj.q.used); | ||
| 146 | } | ||
| 147 | be_mcc_compl_use(compl); | ||
| 148 | num++; | ||
| 149 | } | ||
| 150 | |||
| 151 | if (num) | ||
| 152 | beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); | ||
| 153 | |||
| 154 | spin_unlock_bh(&phba->ctrl.mcc_cq_lock); | ||
| 155 | return status; | ||
| 156 | } | ||
| 157 | |||
| 158 | /* Wait till no more pending mcc requests are present */ | ||
| 159 | static int be_mcc_wait_compl(struct beiscsi_hba *phba) | ||
| 160 | { | ||
| 161 | #define mcc_timeout 120000 /* 5s timeout */ | ||
| 162 | int i, status; | ||
| 163 | for (i = 0; i < mcc_timeout; i++) { | ||
| 164 | status = beiscsi_process_mcc(phba); | ||
| 165 | if (status) | ||
| 166 | return status; | ||
| 167 | |||
| 168 | if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0) | ||
| 169 | break; | ||
| 170 | udelay(100); | ||
| 171 | } | ||
| 172 | if (i == mcc_timeout) { | ||
| 173 | dev_err(&phba->pcidev->dev, "mccq poll timed out\n"); | ||
| 174 | return -1; | ||
| 175 | } | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | /* Notify MCC requests and wait for completion */ | ||
| 180 | int be_mcc_notify_wait(struct beiscsi_hba *phba) | ||
| 181 | { | ||
| 182 | be_mcc_notify(phba); | ||
| 183 | return be_mcc_wait_compl(phba); | ||
| 72 | } | 184 | } |
| 73 | 185 | ||
| 74 | static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) | 186 | static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) |
| @@ -142,6 +254,52 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) | |||
| 142 | return 0; | 254 | return 0; |
| 143 | } | 255 | } |
| 144 | 256 | ||
| 257 | /* | ||
| 258 | * Insert the mailbox address into the doorbell in two steps | ||
| 259 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs | ||
| 260 | */ | ||
| 261 | static int be_mbox_notify_wait(struct beiscsi_hba *phba) | ||
| 262 | { | ||
| 263 | int status; | ||
| 264 | u32 val = 0; | ||
| 265 | void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET; | ||
| 266 | struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem; | ||
| 267 | struct be_mcc_mailbox *mbox = mbox_mem->va; | ||
| 268 | struct be_mcc_compl *compl = &mbox->compl; | ||
| 269 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 270 | |||
| 271 | val |= MPU_MAILBOX_DB_HI_MASK; | ||
| 272 | /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ | ||
| 273 | val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; | ||
| 274 | iowrite32(val, db); | ||
| 275 | |||
| 276 | /* wait for ready to be set */ | ||
| 277 | status = be_mbox_db_ready_wait(ctrl); | ||
| 278 | if (status != 0) | ||
| 279 | return status; | ||
| 280 | |||
| 281 | val = 0; | ||
| 282 | /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ | ||
| 283 | val |= (u32)(mbox_mem->dma >> 4) << 2; | ||
| 284 | iowrite32(val, db); | ||
| 285 | |||
| 286 | status = be_mbox_db_ready_wait(ctrl); | ||
| 287 | if (status != 0) | ||
| 288 | return status; | ||
| 289 | |||
| 290 | /* A cq entry has been made now */ | ||
| 291 | if (be_mcc_compl_is_new(compl)) { | ||
| 292 | status = be_mcc_compl_process(ctrl, &mbox->compl); | ||
| 293 | be_mcc_compl_use(compl); | ||
| 294 | if (status) | ||
| 295 | return status; | ||
| 296 | } else { | ||
| 297 | dev_err(&phba->pcidev->dev, "invalid mailbox completion\n"); | ||
| 298 | return -1; | ||
| 299 | } | ||
| 300 | return 0; | ||
| 301 | } | ||
| 302 | |||
| 145 | void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | 303 | void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, |
| 146 | bool embedded, u8 sge_cnt) | 304 | bool embedded, u8 sge_cnt) |
| 147 | { | 305 | { |
| @@ -203,6 +361,20 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) | |||
| 203 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | 361 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; |
| 204 | } | 362 | } |
| 205 | 363 | ||
| 364 | struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) | ||
| 365 | { | ||
| 366 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; | ||
| 367 | struct be_mcc_wrb *wrb; | ||
| 368 | |||
| 369 | BUG_ON(atomic_read(&mccq->used) >= mccq->len); | ||
| 370 | wrb = queue_head_node(mccq); | ||
| 371 | queue_head_inc(mccq); | ||
| 372 | atomic_inc(&mccq->used); | ||
| 373 | memset(wrb, 0, sizeof(*wrb)); | ||
| 374 | return wrb; | ||
| 375 | } | ||
| 376 | |||
| 377 | |||
| 206 | int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, | 378 | int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, |
| 207 | struct be_queue_info *eq, int eq_delay) | 379 | struct be_queue_info *eq, int eq_delay) |
| 208 | { | 380 | { |
| @@ -212,6 +384,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, | |||
| 212 | struct be_dma_mem *q_mem = &eq->dma_mem; | 384 | struct be_dma_mem *q_mem = &eq->dma_mem; |
| 213 | int status; | 385 | int status; |
| 214 | 386 | ||
| 387 | SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n"); | ||
| 215 | spin_lock(&ctrl->mbox_lock); | 388 | spin_lock(&ctrl->mbox_lock); |
| 216 | memset(wrb, 0, sizeof(*wrb)); | 389 | memset(wrb, 0, sizeof(*wrb)); |
| 217 | 390 | ||
| @@ -249,6 +422,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) | |||
| 249 | int status; | 422 | int status; |
| 250 | u8 *endian_check; | 423 | u8 *endian_check; |
| 251 | 424 | ||
| 425 | SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n"); | ||
| 252 | spin_lock(&ctrl->mbox_lock); | 426 | spin_lock(&ctrl->mbox_lock); |
| 253 | memset(wrb, 0, sizeof(*wrb)); | 427 | memset(wrb, 0, sizeof(*wrb)); |
| 254 | 428 | ||
| @@ -282,6 +456,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
| 282 | void *ctxt = &req->context; | 456 | void *ctxt = &req->context; |
| 283 | int status; | 457 | int status; |
| 284 | 458 | ||
| 459 | SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n"); | ||
| 285 | spin_lock(&ctrl->mbox_lock); | 460 | spin_lock(&ctrl->mbox_lock); |
| 286 | memset(wrb, 0, sizeof(*wrb)); | 461 | memset(wrb, 0, sizeof(*wrb)); |
| 287 | 462 | ||
| @@ -289,7 +464,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
| 289 | 464 | ||
| 290 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 465 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
| 291 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | 466 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); |
| 292 | |||
| 293 | if (!q_mem->va) | 467 | if (!q_mem->va) |
| 294 | SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n"); | 468 | SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n"); |
| 295 | 469 | ||
| @@ -329,6 +503,53 @@ static u32 be_encoded_q_len(int q_len) | |||
| 329 | len_encoded = 0; | 503 | len_encoded = 0; |
| 330 | return len_encoded; | 504 | return len_encoded; |
| 331 | } | 505 | } |
| 506 | |||
| 507 | int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, | ||
| 508 | struct be_queue_info *mccq, | ||
| 509 | struct be_queue_info *cq) | ||
| 510 | { | ||
| 511 | struct be_mcc_wrb *wrb; | ||
| 512 | struct be_cmd_req_mcc_create *req; | ||
| 513 | struct be_dma_mem *q_mem = &mccq->dma_mem; | ||
| 514 | struct be_ctrl_info *ctrl; | ||
| 515 | void *ctxt; | ||
| 516 | int status; | ||
| 517 | |||
| 518 | spin_lock(&phba->ctrl.mbox_lock); | ||
| 519 | ctrl = &phba->ctrl; | ||
| 520 | wrb = wrb_from_mbox(&ctrl->mbox_mem); | ||
| 521 | req = embedded_payload(wrb); | ||
| 522 | ctxt = &req->context; | ||
| 523 | |||
| 524 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | ||
| 525 | |||
| 526 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
| 527 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | ||
| 528 | |||
| 529 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | ||
| 530 | |||
| 531 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, | ||
| 532 | PCI_FUNC(phba->pcidev->devfn)); | ||
| 533 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); | ||
| 534 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | ||
| 535 | be_encoded_q_len(mccq->len)); | ||
| 536 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | ||
| 537 | |||
| 538 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | ||
| 539 | |||
| 540 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | ||
| 541 | |||
| 542 | status = be_mbox_notify_wait(phba); | ||
| 543 | if (!status) { | ||
| 544 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | ||
| 545 | mccq->id = le16_to_cpu(resp->id); | ||
| 546 | mccq->created = true; | ||
| 547 | } | ||
| 548 | spin_unlock(&phba->ctrl.mbox_lock); | ||
| 549 | |||
| 550 | return status; | ||
| 551 | } | ||
| 552 | |||
| 332 | int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | 553 | int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, |
| 333 | int queue_type) | 554 | int queue_type) |
| 334 | { | 555 | { |
| @@ -337,6 +558,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
| 337 | u8 subsys = 0, opcode = 0; | 558 | u8 subsys = 0, opcode = 0; |
| 338 | int status; | 559 | int status; |
| 339 | 560 | ||
| 561 | SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n"); | ||
| 340 | spin_lock(&ctrl->mbox_lock); | 562 | spin_lock(&ctrl->mbox_lock); |
| 341 | memset(wrb, 0, sizeof(*wrb)); | 563 | memset(wrb, 0, sizeof(*wrb)); |
| 342 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 564 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
| @@ -350,6 +572,10 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
| 350 | subsys = CMD_SUBSYSTEM_COMMON; | 572 | subsys = CMD_SUBSYSTEM_COMMON; |
| 351 | opcode = OPCODE_COMMON_CQ_DESTROY; | 573 | opcode = OPCODE_COMMON_CQ_DESTROY; |
| 352 | break; | 574 | break; |
| 575 | case QTYPE_MCCQ: | ||
| 576 | subsys = CMD_SUBSYSTEM_COMMON; | ||
| 577 | opcode = OPCODE_COMMON_MCC_DESTROY; | ||
| 578 | break; | ||
| 353 | case QTYPE_WRBQ: | 579 | case QTYPE_WRBQ: |
| 354 | subsys = CMD_SUBSYSTEM_ISCSI; | 580 | subsys = CMD_SUBSYSTEM_ISCSI; |
| 355 | opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; | 581 | opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; |
| @@ -377,30 +603,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
| 377 | return status; | 603 | return status; |
| 378 | } | 604 | } |
| 379 | 605 | ||
| 380 | int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr) | ||
| 381 | { | ||
| 382 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | ||
| 383 | struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); | ||
| 384 | int status; | ||
| 385 | |||
| 386 | spin_lock(&ctrl->mbox_lock); | ||
| 387 | memset(wrb, 0, sizeof(*wrb)); | ||
| 388 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | ||
| 389 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | ||
| 390 | OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, | ||
| 391 | sizeof(*req)); | ||
| 392 | |||
| 393 | status = be_mbox_notify(ctrl); | ||
| 394 | if (!status) { | ||
| 395 | struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb); | ||
| 396 | |||
| 397 | memcpy(mac_addr, resp->mac_address, ETH_ALEN); | ||
| 398 | } | ||
| 399 | |||
| 400 | spin_unlock(&ctrl->mbox_lock); | ||
| 401 | return status; | ||
| 402 | } | ||
| 403 | |||
| 404 | int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, | 606 | int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, |
| 405 | struct be_queue_info *cq, | 607 | struct be_queue_info *cq, |
| 406 | struct be_queue_info *dq, int length, | 608 | struct be_queue_info *dq, int length, |
| @@ -412,6 +614,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, | |||
| 412 | void *ctxt = &req->context; | 614 | void *ctxt = &req->context; |
| 413 | int status; | 615 | int status; |
| 414 | 616 | ||
| 617 | SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n"); | ||
| 415 | spin_lock(&ctrl->mbox_lock); | 618 | spin_lock(&ctrl->mbox_lock); |
| 416 | memset(wrb, 0, sizeof(*wrb)); | 619 | memset(wrb, 0, sizeof(*wrb)); |
| 417 | 620 | ||
| @@ -468,8 +671,10 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, | |||
| 468 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | 671 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); |
| 469 | 672 | ||
| 470 | status = be_mbox_notify(ctrl); | 673 | status = be_mbox_notify(ctrl); |
| 471 | if (!status) | 674 | if (!status) { |
| 472 | wrbq->id = le16_to_cpu(resp->cid); | 675 | wrbq->id = le16_to_cpu(resp->cid); |
| 676 | wrbq->created = true; | ||
| 677 | } | ||
| 473 | spin_unlock(&ctrl->mbox_lock); | 678 | spin_unlock(&ctrl->mbox_lock); |
| 474 | return status; | 679 | return status; |
| 475 | } | 680 | } |
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index c20d686cbb43..5de8acb924cb 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h | |||
| @@ -47,6 +47,8 @@ struct be_mcc_wrb { | |||
| 47 | 47 | ||
| 48 | #define CQE_FLAGS_VALID_MASK (1 << 31) | 48 | #define CQE_FLAGS_VALID_MASK (1 << 31) |
| 49 | #define CQE_FLAGS_ASYNC_MASK (1 << 30) | 49 | #define CQE_FLAGS_ASYNC_MASK (1 << 30) |
| 50 | #define CQE_FLAGS_COMPLETED_MASK (1 << 28) | ||
| 51 | #define CQE_FLAGS_CONSUMED_MASK (1 << 27) | ||
| 50 | 52 | ||
| 51 | /* Completion Status */ | 53 | /* Completion Status */ |
| 52 | #define MCC_STATUS_SUCCESS 0x0 | 54 | #define MCC_STATUS_SUCCESS 0x0 |
| @@ -173,7 +175,7 @@ struct be_cmd_req_hdr { | |||
| 173 | u8 domain; /* dword 0 */ | 175 | u8 domain; /* dword 0 */ |
| 174 | u32 timeout; /* dword 1 */ | 176 | u32 timeout; /* dword 1 */ |
| 175 | u32 request_length; /* dword 2 */ | 177 | u32 request_length; /* dword 2 */ |
| 176 | u32 rsvd; /* dword 3 */ | 178 | u32 rsvd0; /* dword 3 */ |
| 177 | }; | 179 | }; |
| 178 | 180 | ||
| 179 | struct be_cmd_resp_hdr { | 181 | struct be_cmd_resp_hdr { |
| @@ -382,7 +384,6 @@ struct be_cmd_req_modify_eq_delay { | |||
| 382 | 384 | ||
| 383 | #define ETH_ALEN 6 | 385 | #define ETH_ALEN 6 |
| 384 | 386 | ||
| 385 | |||
| 386 | struct be_cmd_req_get_mac_addr { | 387 | struct be_cmd_req_get_mac_addr { |
| 387 | struct be_cmd_req_hdr hdr; | 388 | struct be_cmd_req_hdr hdr; |
| 388 | u32 nic_port_count; | 389 | u32 nic_port_count; |
| @@ -417,14 +418,21 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
| 417 | 418 | ||
| 418 | int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | 419 | int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, |
| 419 | int type); | 420 | int type); |
| 421 | int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, | ||
| 422 | struct be_queue_info *mccq, | ||
| 423 | struct be_queue_info *cq); | ||
| 424 | |||
| 420 | int be_poll_mcc(struct be_ctrl_info *ctrl); | 425 | int be_poll_mcc(struct be_ctrl_info *ctrl); |
| 421 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl); | 426 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, |
| 422 | int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr); | 427 | struct beiscsi_hba *phba); |
| 428 | int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr); | ||
| 423 | 429 | ||
| 424 | /*ISCSI Functuions */ | 430 | /*ISCSI Functuions */ |
| 425 | int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); | 431 | int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); |
| 426 | 432 | ||
| 427 | struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); | 433 | struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); |
| 434 | struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); | ||
| 435 | int be_mcc_notify_wait(struct beiscsi_hba *phba); | ||
| 428 | 436 | ||
| 429 | int be_mbox_notify(struct be_ctrl_info *ctrl); | 437 | int be_mbox_notify(struct be_ctrl_info *ctrl); |
| 430 | 438 | ||
| @@ -531,6 +539,23 @@ struct amap_sol_cqe { | |||
| 531 | u8 valid; /* dword 3 */ | 539 | u8 valid; /* dword 3 */ |
| 532 | } __packed; | 540 | } __packed; |
| 533 | 541 | ||
| 542 | #define SOL_ICD_INDEX_MASK 0x0003FFC0 | ||
| 543 | struct amap_sol_cqe_ring { | ||
| 544 | u8 hw_sts[8]; /* dword 0 */ | ||
| 545 | u8 i_sts[8]; /* dword 0 */ | ||
| 546 | u8 i_resp[8]; /* dword 0 */ | ||
| 547 | u8 i_flags[7]; /* dword 0 */ | ||
| 548 | u8 s; /* dword 0 */ | ||
| 549 | u8 i_exp_cmd_sn[32]; /* dword 1 */ | ||
| 550 | u8 code[6]; /* dword 2 */ | ||
| 551 | u8 icd_index[12]; /* dword 2 */ | ||
| 552 | u8 rsvd[6]; /* dword 2 */ | ||
| 553 | u8 i_cmd_wnd[8]; /* dword 2 */ | ||
| 554 | u8 i_res_cnt[31]; /* dword 3 */ | ||
| 555 | u8 valid; /* dword 3 */ | ||
| 556 | } __packed; | ||
| 557 | |||
| 558 | |||
| 534 | 559 | ||
| 535 | /** | 560 | /** |
| 536 | * Post WRB Queue Doorbell Register used by the host Storage | 561 | * Post WRB Queue Doorbell Register used by the host Storage |
| @@ -664,8 +689,8 @@ struct be_fw_cfg { | |||
| 664 | #define OPCODE_COMMON_TCP_UPLOAD 56 | 689 | #define OPCODE_COMMON_TCP_UPLOAD 56 |
| 665 | #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 | 690 | #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 |
| 666 | /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ | 691 | /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ |
| 667 | #define CMD_ISCSI_CONNECTION_INVALIDATE 1 | 692 | #define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 |
| 668 | #define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2 | 693 | #define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 |
| 669 | #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 | 694 | #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 |
| 670 | 695 | ||
| 671 | #define INI_WR_CMD 1 /* Initiator write command */ | 696 | #define INI_WR_CMD 1 /* Initiator write command */ |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 2fd25442cfaf..d587b0362f18 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
| @@ -67,11 +67,11 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, | |||
| 67 | cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; | 67 | cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, | 70 | cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, |
| 71 | shost, cmds_max, | 71 | shost, cmds_max, |
| 72 | sizeof(*beiscsi_sess), | 72 | sizeof(*beiscsi_sess), |
| 73 | sizeof(*io_task), | 73 | sizeof(*io_task), |
| 74 | initial_cmdsn, ISCSI_MAX_TARGET); | 74 | initial_cmdsn, ISCSI_MAX_TARGET); |
| 75 | if (!cls_session) | 75 | if (!cls_session) |
| 76 | return NULL; | 76 | return NULL; |
| 77 | sess = cls_session->dd_data; | 77 | sess = cls_session->dd_data; |
| @@ -297,7 +297,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
| 297 | 297 | ||
| 298 | switch (param) { | 298 | switch (param) { |
| 299 | case ISCSI_HOST_PARAM_HWADDRESS: | 299 | case ISCSI_HOST_PARAM_HWADDRESS: |
| 300 | be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address); | 300 | be_cmd_get_mac_addr(phba, phba->mac_address); |
| 301 | len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); | 301 | len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); |
| 302 | break; | 302 | break; |
| 303 | default: | 303 | default: |
| @@ -377,16 +377,12 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) | |||
| 377 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; | 377 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; |
| 378 | struct beiscsi_endpoint *beiscsi_ep; | 378 | struct beiscsi_endpoint *beiscsi_ep; |
| 379 | struct beiscsi_offload_params params; | 379 | struct beiscsi_offload_params params; |
| 380 | struct iscsi_session *session = conn->session; | ||
| 381 | struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); | ||
| 382 | struct beiscsi_hba *phba = iscsi_host_priv(shost); | ||
| 383 | 380 | ||
| 384 | memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); | 381 | memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); |
| 385 | beiscsi_ep = beiscsi_conn->ep; | 382 | beiscsi_ep = beiscsi_conn->ep; |
| 386 | if (!beiscsi_ep) | 383 | if (!beiscsi_ep) |
| 387 | SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n"); | 384 | SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n"); |
| 388 | 385 | ||
| 389 | free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle); | ||
| 390 | beiscsi_conn->login_in_progress = 0; | 386 | beiscsi_conn->login_in_progress = 0; |
| 391 | beiscsi_set_params_for_offld(beiscsi_conn, ¶ms); | 387 | beiscsi_set_params_for_offld(beiscsi_conn, ¶ms); |
| 392 | beiscsi_offload_connection(beiscsi_conn, ¶ms); | 388 | beiscsi_offload_connection(beiscsi_conn, ¶ms); |
| @@ -498,6 +494,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
| 498 | SE_DEBUG(DBG_LVL_1, "shost is NULL \n"); | 494 | SE_DEBUG(DBG_LVL_1, "shost is NULL \n"); |
| 499 | return ERR_PTR(ret); | 495 | return ERR_PTR(ret); |
| 500 | } | 496 | } |
| 497 | |||
| 498 | if (phba->state) { | ||
| 499 | ret = -EBUSY; | ||
| 500 | SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n"); | ||
| 501 | return ERR_PTR(ret); | ||
| 502 | } | ||
| 503 | |||
| 501 | ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); | 504 | ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); |
| 502 | if (!ep) { | 505 | if (!ep) { |
| 503 | ret = -ENOMEM; | 506 | ret = -ENOMEM; |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4f1aca346e38..1a557fa77888 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
| @@ -39,7 +39,8 @@ | |||
| 39 | 39 | ||
| 40 | static unsigned int be_iopoll_budget = 10; | 40 | static unsigned int be_iopoll_budget = 10; |
| 41 | static unsigned int be_max_phys_size = 64; | 41 | static unsigned int be_max_phys_size = 64; |
| 42 | static unsigned int enable_msix; | 42 | static unsigned int enable_msix = 1; |
| 43 | static unsigned int ring_mode; | ||
| 43 | 44 | ||
| 44 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | 45 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); |
| 45 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); | 46 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); |
| @@ -58,6 +59,17 @@ static int beiscsi_slave_configure(struct scsi_device *sdev) | |||
| 58 | return 0; | 59 | return 0; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 62 | /*------------------- PCI Driver operations and data ----------------- */ | ||
| 63 | static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { | ||
| 64 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | ||
| 65 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | ||
| 66 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | ||
| 67 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, | ||
| 68 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) }, | ||
| 69 | { 0 } | ||
| 70 | }; | ||
| 71 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | ||
| 72 | |||
| 61 | static struct scsi_host_template beiscsi_sht = { | 73 | static struct scsi_host_template beiscsi_sht = { |
| 62 | .module = THIS_MODULE, | 74 | .module = THIS_MODULE, |
| 63 | .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", | 75 | .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", |
| @@ -76,16 +88,8 @@ static struct scsi_host_template beiscsi_sht = { | |||
| 76 | .cmd_per_lun = BEISCSI_CMD_PER_LUN, | 88 | .cmd_per_lun = BEISCSI_CMD_PER_LUN, |
| 77 | .use_clustering = ENABLE_CLUSTERING, | 89 | .use_clustering = ENABLE_CLUSTERING, |
| 78 | }; | 90 | }; |
| 79 | static struct scsi_transport_template *beiscsi_scsi_transport; | ||
| 80 | 91 | ||
| 81 | /*------------------- PCI Driver operations and data ----------------- */ | 92 | static struct scsi_transport_template *beiscsi_scsi_transport; |
| 82 | static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { | ||
| 83 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | ||
| 84 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | ||
| 85 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | ||
| 86 | { 0 } | ||
| 87 | }; | ||
| 88 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | ||
| 89 | 93 | ||
| 90 | static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) | 94 | static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) |
| 91 | { | 95 | { |
| @@ -104,7 +108,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) | |||
| 104 | shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; | 108 | shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; |
| 105 | shost->max_lun = BEISCSI_NUM_MAX_LUN; | 109 | shost->max_lun = BEISCSI_NUM_MAX_LUN; |
| 106 | shost->transportt = beiscsi_scsi_transport; | 110 | shost->transportt = beiscsi_scsi_transport; |
| 107 | |||
| 108 | phba = iscsi_host_priv(shost); | 111 | phba = iscsi_host_priv(shost); |
| 109 | memset(phba, 0, sizeof(*phba)); | 112 | memset(phba, 0, sizeof(*phba)); |
| 110 | phba->shost = shost; | 113 | phba->shost = shost; |
| @@ -181,6 +184,7 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) | |||
| 181 | return ret; | 184 | return ret; |
| 182 | } | 185 | } |
| 183 | 186 | ||
| 187 | pci_set_master(pcidev); | ||
| 184 | if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { | 188 | if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { |
| 185 | ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); | 189 | ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); |
| 186 | if (ret) { | 190 | if (ret) { |
| @@ -203,7 +207,6 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) | |||
| 203 | status = beiscsi_map_pci_bars(phba, pdev); | 207 | status = beiscsi_map_pci_bars(phba, pdev); |
| 204 | if (status) | 208 | if (status) |
| 205 | return status; | 209 | return status; |
| 206 | |||
| 207 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 210 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
| 208 | mbox_mem_alloc->va = pci_alloc_consistent(pdev, | 211 | mbox_mem_alloc->va = pci_alloc_consistent(pdev, |
| 209 | mbox_mem_alloc->size, | 212 | mbox_mem_alloc->size, |
| @@ -219,6 +222,9 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) | |||
| 219 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 222 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
| 220 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | 223 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); |
| 221 | spin_lock_init(&ctrl->mbox_lock); | 224 | spin_lock_init(&ctrl->mbox_lock); |
| 225 | spin_lock_init(&phba->ctrl.mcc_lock); | ||
| 226 | spin_lock_init(&phba->ctrl.mcc_cq_lock); | ||
| 227 | |||
| 222 | return status; | 228 | return status; |
| 223 | } | 229 | } |
| 224 | 230 | ||
| @@ -268,6 +274,113 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba, | |||
| 268 | } | 274 | } |
| 269 | 275 | ||
| 270 | /** | 276 | /** |
| 277 | * be_isr_mcc - The isr routine of the driver. | ||
| 278 | * @irq: Not used | ||
| 279 | * @dev_id: Pointer to host adapter structure | ||
| 280 | */ | ||
| 281 | static irqreturn_t be_isr_mcc(int irq, void *dev_id) | ||
| 282 | { | ||
| 283 | struct beiscsi_hba *phba; | ||
| 284 | struct be_eq_entry *eqe = NULL; | ||
| 285 | struct be_queue_info *eq; | ||
| 286 | struct be_queue_info *mcc; | ||
| 287 | unsigned int num_eq_processed; | ||
| 288 | struct be_eq_obj *pbe_eq; | ||
| 289 | unsigned long flags; | ||
| 290 | |||
| 291 | pbe_eq = dev_id; | ||
| 292 | eq = &pbe_eq->q; | ||
| 293 | phba = pbe_eq->phba; | ||
| 294 | mcc = &phba->ctrl.mcc_obj.cq; | ||
| 295 | eqe = queue_tail_node(eq); | ||
| 296 | if (!eqe) | ||
| 297 | SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); | ||
| 298 | |||
| 299 | num_eq_processed = 0; | ||
| 300 | |||
| 301 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
| 302 | & EQE_VALID_MASK) { | ||
| 303 | if (((eqe->dw[offsetof(struct amap_eq_entry, | ||
| 304 | resource_id) / 32] & | ||
| 305 | EQE_RESID_MASK) >> 16) == mcc->id) { | ||
| 306 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
| 307 | phba->todo_mcc_cq = 1; | ||
| 308 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
| 309 | } | ||
| 310 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
| 311 | queue_tail_inc(eq); | ||
| 312 | eqe = queue_tail_node(eq); | ||
| 313 | num_eq_processed++; | ||
| 314 | } | ||
| 315 | if (phba->todo_mcc_cq) | ||
| 316 | queue_work(phba->wq, &phba->work_cqs); | ||
| 317 | if (num_eq_processed) | ||
| 318 | hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); | ||
| 319 | |||
| 320 | return IRQ_HANDLED; | ||
| 321 | } | ||
| 322 | |||
| 323 | /** | ||
| 324 | * be_isr_msix - The isr routine of the driver. | ||
| 325 | * @irq: Not used | ||
| 326 | * @dev_id: Pointer to host adapter structure | ||
| 327 | */ | ||
| 328 | static irqreturn_t be_isr_msix(int irq, void *dev_id) | ||
| 329 | { | ||
| 330 | struct beiscsi_hba *phba; | ||
| 331 | struct be_eq_entry *eqe = NULL; | ||
| 332 | struct be_queue_info *eq; | ||
| 333 | struct be_queue_info *cq; | ||
| 334 | unsigned int num_eq_processed; | ||
| 335 | struct be_eq_obj *pbe_eq; | ||
| 336 | unsigned long flags; | ||
| 337 | |||
| 338 | pbe_eq = dev_id; | ||
| 339 | eq = &pbe_eq->q; | ||
| 340 | cq = pbe_eq->cq; | ||
| 341 | eqe = queue_tail_node(eq); | ||
| 342 | if (!eqe) | ||
| 343 | SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); | ||
| 344 | |||
| 345 | phba = pbe_eq->phba; | ||
| 346 | num_eq_processed = 0; | ||
| 347 | if (blk_iopoll_enabled) { | ||
| 348 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
| 349 | & EQE_VALID_MASK) { | ||
| 350 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) | ||
| 351 | blk_iopoll_sched(&pbe_eq->iopoll); | ||
| 352 | |||
| 353 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
| 354 | queue_tail_inc(eq); | ||
| 355 | eqe = queue_tail_node(eq); | ||
| 356 | num_eq_processed++; | ||
| 357 | } | ||
| 358 | if (num_eq_processed) | ||
| 359 | hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); | ||
| 360 | |||
| 361 | return IRQ_HANDLED; | ||
| 362 | } else { | ||
| 363 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
| 364 | & EQE_VALID_MASK) { | ||
| 365 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
| 366 | phba->todo_cq = 1; | ||
| 367 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
| 368 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
| 369 | queue_tail_inc(eq); | ||
| 370 | eqe = queue_tail_node(eq); | ||
| 371 | num_eq_processed++; | ||
| 372 | } | ||
| 373 | if (phba->todo_cq) | ||
| 374 | queue_work(phba->wq, &phba->work_cqs); | ||
| 375 | |||
| 376 | if (num_eq_processed) | ||
| 377 | hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); | ||
| 378 | |||
| 379 | return IRQ_HANDLED; | ||
| 380 | } | ||
| 381 | } | ||
| 382 | |||
| 383 | /** | ||
| 271 | * be_isr - The isr routine of the driver. | 384 | * be_isr - The isr routine of the driver. |
| 272 | * @irq: Not used | 385 | * @irq: Not used |
| 273 | * @dev_id: Pointer to host adapter structure | 386 | * @dev_id: Pointer to host adapter structure |
| @@ -280,48 +393,70 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
| 280 | struct be_eq_entry *eqe = NULL; | 393 | struct be_eq_entry *eqe = NULL; |
| 281 | struct be_queue_info *eq; | 394 | struct be_queue_info *eq; |
| 282 | struct be_queue_info *cq; | 395 | struct be_queue_info *cq; |
| 396 | struct be_queue_info *mcc; | ||
| 283 | unsigned long flags, index; | 397 | unsigned long flags, index; |
| 284 | unsigned int num_eq_processed; | 398 | unsigned int num_mcceq_processed, num_ioeq_processed; |
| 285 | struct be_ctrl_info *ctrl; | 399 | struct be_ctrl_info *ctrl; |
| 400 | struct be_eq_obj *pbe_eq; | ||
| 286 | int isr; | 401 | int isr; |
| 287 | 402 | ||
| 288 | phba = dev_id; | 403 | phba = dev_id; |
| 289 | if (!enable_msix) { | 404 | ctrl = &phba->ctrl;; |
| 290 | ctrl = &phba->ctrl;; | 405 | isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + |
| 291 | isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + | 406 | (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); |
| 292 | (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); | 407 | if (!isr) |
| 293 | if (!isr) | 408 | return IRQ_NONE; |
| 294 | return IRQ_NONE; | ||
| 295 | } | ||
| 296 | 409 | ||
| 297 | phwi_ctrlr = phba->phwi_ctrlr; | 410 | phwi_ctrlr = phba->phwi_ctrlr; |
| 298 | phwi_context = phwi_ctrlr->phwi_ctxt; | 411 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 299 | eq = &phwi_context->be_eq.q; | 412 | pbe_eq = &phwi_context->be_eq[0]; |
| 300 | cq = &phwi_context->be_cq; | 413 | |
| 414 | eq = &phwi_context->be_eq[0].q; | ||
| 415 | mcc = &phba->ctrl.mcc_obj.cq; | ||
| 301 | index = 0; | 416 | index = 0; |
| 302 | eqe = queue_tail_node(eq); | 417 | eqe = queue_tail_node(eq); |
| 303 | if (!eqe) | 418 | if (!eqe) |
| 304 | SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); | 419 | SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); |
| 305 | 420 | ||
| 306 | num_eq_processed = 0; | 421 | num_ioeq_processed = 0; |
| 422 | num_mcceq_processed = 0; | ||
| 307 | if (blk_iopoll_enabled) { | 423 | if (blk_iopoll_enabled) { |
| 308 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 424 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] |
| 309 | & EQE_VALID_MASK) { | 425 | & EQE_VALID_MASK) { |
| 310 | if (!blk_iopoll_sched_prep(&phba->iopoll)) | 426 | if (((eqe->dw[offsetof(struct amap_eq_entry, |
| 311 | blk_iopoll_sched(&phba->iopoll); | 427 | resource_id) / 32] & |
| 312 | 428 | EQE_RESID_MASK) >> 16) == mcc->id) { | |
| 429 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
| 430 | phba->todo_mcc_cq = 1; | ||
| 431 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
| 432 | num_mcceq_processed++; | ||
| 433 | } else { | ||
| 434 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) | ||
| 435 | blk_iopoll_sched(&pbe_eq->iopoll); | ||
| 436 | num_ioeq_processed++; | ||
| 437 | } | ||
| 313 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | 438 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); |
| 314 | queue_tail_inc(eq); | 439 | queue_tail_inc(eq); |
| 315 | eqe = queue_tail_node(eq); | 440 | eqe = queue_tail_node(eq); |
| 316 | num_eq_processed++; | ||
| 317 | SE_DEBUG(DBG_LVL_8, "Valid EQE\n"); | ||
| 318 | } | 441 | } |
| 319 | if (num_eq_processed) { | 442 | if (num_ioeq_processed || num_mcceq_processed) { |
| 320 | hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1); | 443 | if (phba->todo_mcc_cq) |
| 444 | queue_work(phba->wq, &phba->work_cqs); | ||
| 445 | |||
| 446 | if ((num_mcceq_processed) && (!num_ioeq_processed)) | ||
| 447 | hwi_ring_eq_db(phba, eq->id, 0, | ||
| 448 | (num_ioeq_processed + | ||
| 449 | num_mcceq_processed) , 1, 1); | ||
| 450 | else | ||
| 451 | hwi_ring_eq_db(phba, eq->id, 0, | ||
| 452 | (num_ioeq_processed + | ||
| 453 | num_mcceq_processed), 0, 1); | ||
| 454 | |||
| 321 | return IRQ_HANDLED; | 455 | return IRQ_HANDLED; |
| 322 | } else | 456 | } else |
| 323 | return IRQ_NONE; | 457 | return IRQ_NONE; |
| 324 | } else { | 458 | } else { |
| 459 | cq = &phwi_context->be_cq[0]; | ||
| 325 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 460 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] |
| 326 | & EQE_VALID_MASK) { | 461 | & EQE_VALID_MASK) { |
| 327 | 462 | ||
| @@ -339,13 +474,14 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
| 339 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | 474 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); |
| 340 | queue_tail_inc(eq); | 475 | queue_tail_inc(eq); |
| 341 | eqe = queue_tail_node(eq); | 476 | eqe = queue_tail_node(eq); |
| 342 | num_eq_processed++; | 477 | num_ioeq_processed++; |
| 343 | } | 478 | } |
| 344 | if (phba->todo_cq || phba->todo_mcc_cq) | 479 | if (phba->todo_cq || phba->todo_mcc_cq) |
| 345 | queue_work(phba->wq, &phba->work_cqs); | 480 | queue_work(phba->wq, &phba->work_cqs); |
| 346 | 481 | ||
| 347 | if (num_eq_processed) { | 482 | if (num_ioeq_processed) { |
| 348 | hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1); | 483 | hwi_ring_eq_db(phba, eq->id, 0, |
| 484 | num_ioeq_processed, 1, 1); | ||
| 349 | return IRQ_HANDLED; | 485 | return IRQ_HANDLED; |
| 350 | } else | 486 | } else |
| 351 | return IRQ_NONE; | 487 | return IRQ_NONE; |
| @@ -355,13 +491,32 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
| 355 | static int beiscsi_init_irqs(struct beiscsi_hba *phba) | 491 | static int beiscsi_init_irqs(struct beiscsi_hba *phba) |
| 356 | { | 492 | { |
| 357 | struct pci_dev *pcidev = phba->pcidev; | 493 | struct pci_dev *pcidev = phba->pcidev; |
| 358 | int ret; | 494 | struct hwi_controller *phwi_ctrlr; |
| 495 | struct hwi_context_memory *phwi_context; | ||
| 496 | int ret, msix_vec, i = 0; | ||
| 497 | char desc[32]; | ||
| 359 | 498 | ||
| 360 | ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); | 499 | phwi_ctrlr = phba->phwi_ctrlr; |
| 361 | if (ret) { | 500 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 362 | shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" | 501 | |
| 363 | "Failed to register irq\\n"); | 502 | if (phba->msix_enabled) { |
| 364 | return ret; | 503 | for (i = 0; i < phba->num_cpus; i++) { |
| 504 | sprintf(desc, "beiscsi_msix_%04x", i); | ||
| 505 | msix_vec = phba->msix_entries[i].vector; | ||
| 506 | ret = request_irq(msix_vec, be_isr_msix, 0, desc, | ||
| 507 | &phwi_context->be_eq[i]); | ||
| 508 | } | ||
| 509 | msix_vec = phba->msix_entries[i].vector; | ||
| 510 | ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc", | ||
| 511 | &phwi_context->be_eq[i]); | ||
| 512 | } else { | ||
| 513 | ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, | ||
| 514 | "beiscsi", phba); | ||
| 515 | if (ret) { | ||
| 516 | shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" | ||
| 517 | "Failed to register irq\\n"); | ||
| 518 | return ret; | ||
| 519 | } | ||
| 365 | } | 520 | } |
| 366 | return 0; | 521 | return 0; |
| 367 | } | 522 | } |
| @@ -378,15 +533,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, | |||
| 378 | iowrite32(val, phba->db_va + DB_CQ_OFFSET); | 533 | iowrite32(val, phba->db_va + DB_CQ_OFFSET); |
| 379 | } | 534 | } |
| 380 | 535 | ||
| 381 | /* | ||
| 382 | * async pdus include | ||
| 383 | * a. unsolicited NOP-In (target initiated NOP-In) | ||
| 384 | * b. Async Messages | ||
| 385 | * c. Reject PDU | ||
| 386 | * d. Login response | ||
| 387 | * These headers arrive unprocessed by the EP firmware and iSCSI layer | ||
| 388 | * process them | ||
| 389 | */ | ||
| 390 | static unsigned int | 536 | static unsigned int |
| 391 | beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, | 537 | beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, |
| 392 | struct beiscsi_hba *phba, | 538 | struct beiscsi_hba *phba, |
| @@ -397,6 +543,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, | |||
| 397 | { | 543 | { |
| 398 | struct iscsi_conn *conn = beiscsi_conn->conn; | 544 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 399 | struct iscsi_session *session = conn->session; | 545 | struct iscsi_session *session = conn->session; |
| 546 | struct iscsi_task *task; | ||
| 547 | struct beiscsi_io_task *io_task; | ||
| 548 | struct iscsi_hdr *login_hdr; | ||
| 400 | 549 | ||
| 401 | switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & | 550 | switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & |
| 402 | PDUBASE_OPCODE_MASK) { | 551 | PDUBASE_OPCODE_MASK) { |
| @@ -412,6 +561,10 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, | |||
| 412 | SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); | 561 | SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); |
| 413 | break; | 562 | break; |
| 414 | case ISCSI_OP_LOGIN_RSP: | 563 | case ISCSI_OP_LOGIN_RSP: |
| 564 | task = conn->login_task; | ||
| 565 | io_task = task->dd_data; | ||
| 566 | login_hdr = (struct iscsi_hdr *)ppdu; | ||
| 567 | login_hdr->itt = io_task->libiscsi_itt; | ||
| 415 | break; | 568 | break; |
| 416 | default: | 569 | default: |
| 417 | shost_printk(KERN_WARNING, phba->shost, | 570 | shost_printk(KERN_WARNING, phba->shost, |
| @@ -440,7 +593,8 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) | |||
| 440 | io_sgl_alloc_index]; | 593 | io_sgl_alloc_index]; |
| 441 | phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; | 594 | phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; |
| 442 | phba->io_sgl_hndl_avbl--; | 595 | phba->io_sgl_hndl_avbl--; |
| 443 | if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1)) | 596 | if (phba->io_sgl_alloc_index == (phba->params. |
| 597 | ios_per_ctrl - 1)) | ||
| 444 | phba->io_sgl_alloc_index = 0; | 598 | phba->io_sgl_alloc_index = 0; |
| 445 | else | 599 | else |
| 446 | phba->io_sgl_alloc_index++; | 600 | phba->io_sgl_alloc_index++; |
| @@ -490,9 +644,18 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, | |||
| 490 | 644 | ||
| 491 | phwi_ctrlr = phba->phwi_ctrlr; | 645 | phwi_ctrlr = phba->phwi_ctrlr; |
| 492 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; | 646 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; |
| 493 | pwrb_handle = pwrb_context->pwrb_handle_base[index]; | 647 | if (pwrb_context->wrb_handles_available) { |
| 494 | pwrb_handle->wrb_index = index; | 648 | pwrb_handle = pwrb_context->pwrb_handle_base[ |
| 495 | pwrb_handle->nxt_wrb_index = index; | 649 | pwrb_context->alloc_index]; |
| 650 | pwrb_context->wrb_handles_available--; | ||
| 651 | pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index; | ||
| 652 | if (pwrb_context->alloc_index == | ||
| 653 | (phba->params.wrbs_per_cxn - 1)) | ||
| 654 | pwrb_context->alloc_index = 0; | ||
| 655 | else | ||
| 656 | pwrb_context->alloc_index++; | ||
| 657 | } else | ||
| 658 | pwrb_handle = NULL; | ||
| 496 | return pwrb_handle; | 659 | return pwrb_handle; |
| 497 | } | 660 | } |
| 498 | 661 | ||
| @@ -508,11 +671,20 @@ static void | |||
| 508 | free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, | 671 | free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, |
| 509 | struct wrb_handle *pwrb_handle) | 672 | struct wrb_handle *pwrb_handle) |
| 510 | { | 673 | { |
| 674 | if (!ring_mode) | ||
| 675 | pwrb_context->pwrb_handle_base[pwrb_context->free_index] = | ||
| 676 | pwrb_handle; | ||
| 677 | pwrb_context->wrb_handles_available++; | ||
| 678 | if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) | ||
| 679 | pwrb_context->free_index = 0; | ||
| 680 | else | ||
| 681 | pwrb_context->free_index++; | ||
| 682 | |||
| 511 | SE_DEBUG(DBG_LVL_8, | 683 | SE_DEBUG(DBG_LVL_8, |
| 512 | "FREE WRB: pwrb_handle=%p free_index=%d=0x%x" | 684 | "FREE WRB: pwrb_handle=%p free_index=0x%x" |
| 513 | "wrb_handles_available=%d \n", | 685 | "wrb_handles_available=%d \n", |
| 514 | pwrb_handle, pwrb_context->free_index, | 686 | pwrb_handle, pwrb_context->free_index, |
| 515 | pwrb_context->free_index, pwrb_context->wrb_handles_available); | 687 | pwrb_context->wrb_handles_available); |
| 516 | } | 688 | } |
| 517 | 689 | ||
| 518 | static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) | 690 | static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) |
| @@ -540,6 +712,8 @@ void | |||
| 540 | free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) | 712 | free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) |
| 541 | { | 713 | { |
| 542 | 714 | ||
| 715 | SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n", | ||
| 716 | phba->eh_sgl_free_index); | ||
| 543 | if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { | 717 | if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { |
| 544 | /* | 718 | /* |
| 545 | * this can happen if clean_task is called on a task that | 719 | * this can happen if clean_task is called on a task that |
| @@ -572,10 +746,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, | |||
| 572 | u32 resid = 0, exp_cmdsn, max_cmdsn; | 746 | u32 resid = 0, exp_cmdsn, max_cmdsn; |
| 573 | u8 rsp, status, flags; | 747 | u8 rsp, status, flags; |
| 574 | 748 | ||
| 575 | exp_cmdsn = be32_to_cpu(psol-> | 749 | exp_cmdsn = (psol-> |
| 576 | dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] | 750 | dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] |
| 577 | & SOL_EXP_CMD_SN_MASK); | 751 | & SOL_EXP_CMD_SN_MASK); |
| 578 | max_cmdsn = be32_to_cpu((psol-> | 752 | max_cmdsn = ((psol-> |
| 579 | dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] | 753 | dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] |
| 580 | & SOL_EXP_CMD_SN_MASK) + | 754 | & SOL_EXP_CMD_SN_MASK) + |
| 581 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) | 755 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) |
| @@ -610,9 +784,9 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, | |||
| 610 | } | 784 | } |
| 611 | 785 | ||
| 612 | if (status == SAM_STAT_CHECK_CONDITION) { | 786 | if (status == SAM_STAT_CHECK_CONDITION) { |
| 787 | unsigned short *slen = (unsigned short *)sts_bhs->sense_info; | ||
| 613 | sense = sts_bhs->sense_info + sizeof(unsigned short); | 788 | sense = sts_bhs->sense_info + sizeof(unsigned short); |
| 614 | sense_len = | 789 | sense_len = cpu_to_be16(*slen); |
| 615 | cpu_to_be16((unsigned short)(sts_bhs->sense_info[0])); | ||
| 616 | memcpy(task->sc->sense_buffer, sense, | 790 | memcpy(task->sc->sense_buffer, sense, |
| 617 | min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); | 791 | min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); |
| 618 | } | 792 | } |
| @@ -620,8 +794,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, | |||
| 620 | if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] | 794 | if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] |
| 621 | & SOL_RES_CNT_MASK) | 795 | & SOL_RES_CNT_MASK) |
| 622 | conn->rxdata_octets += (psol-> | 796 | conn->rxdata_octets += (psol-> |
| 623 | dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] | 797 | dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] |
| 624 | & SOL_RES_CNT_MASK); | 798 | & SOL_RES_CNT_MASK); |
| 625 | } | 799 | } |
| 626 | unmap: | 800 | unmap: |
| 627 | scsi_dma_unmap(io_task->scsi_cmnd); | 801 | scsi_dma_unmap(io_task->scsi_cmnd); |
| @@ -633,6 +807,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, | |||
| 633 | struct iscsi_task *task, struct sol_cqe *psol) | 807 | struct iscsi_task *task, struct sol_cqe *psol) |
| 634 | { | 808 | { |
| 635 | struct iscsi_logout_rsp *hdr; | 809 | struct iscsi_logout_rsp *hdr; |
| 810 | struct beiscsi_io_task *io_task = task->dd_data; | ||
| 636 | struct iscsi_conn *conn = beiscsi_conn->conn; | 811 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 637 | 812 | ||
| 638 | hdr = (struct iscsi_logout_rsp *)task->hdr; | 813 | hdr = (struct iscsi_logout_rsp *)task->hdr; |
| @@ -651,7 +826,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, | |||
| 651 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) | 826 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) |
| 652 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); | 827 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); |
| 653 | hdr->hlength = 0; | 828 | hdr->hlength = 0; |
| 654 | 829 | hdr->itt = io_task->libiscsi_itt; | |
| 655 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); | 830 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); |
| 656 | } | 831 | } |
| 657 | 832 | ||
| @@ -661,6 +836,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, | |||
| 661 | { | 836 | { |
| 662 | struct iscsi_tm_rsp *hdr; | 837 | struct iscsi_tm_rsp *hdr; |
| 663 | struct iscsi_conn *conn = beiscsi_conn->conn; | 838 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 839 | struct beiscsi_io_task *io_task = task->dd_data; | ||
| 664 | 840 | ||
| 665 | hdr = (struct iscsi_tm_rsp *)task->hdr; | 841 | hdr = (struct iscsi_tm_rsp *)task->hdr; |
| 666 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] | 842 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] |
| @@ -668,11 +844,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, | |||
| 668 | hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / | 844 | hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / |
| 669 | 32] & SOL_RESP_MASK); | 845 | 32] & SOL_RESP_MASK); |
| 670 | hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, | 846 | hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, |
| 671 | i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); | 847 | i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); |
| 672 | hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, | 848 | hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, |
| 673 | i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + | 849 | i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + |
| 674 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) | 850 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) |
| 675 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); | 851 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); |
| 852 | hdr->itt = io_task->libiscsi_itt; | ||
| 676 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); | 853 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); |
| 677 | } | 854 | } |
| 678 | 855 | ||
| @@ -681,18 +858,36 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, | |||
| 681 | struct beiscsi_hba *phba, struct sol_cqe *psol) | 858 | struct beiscsi_hba *phba, struct sol_cqe *psol) |
| 682 | { | 859 | { |
| 683 | struct hwi_wrb_context *pwrb_context; | 860 | struct hwi_wrb_context *pwrb_context; |
| 684 | struct wrb_handle *pwrb_handle; | 861 | struct wrb_handle *pwrb_handle = NULL; |
| 862 | struct sgl_handle *psgl_handle = NULL; | ||
| 685 | struct hwi_controller *phwi_ctrlr; | 863 | struct hwi_controller *phwi_ctrlr; |
| 864 | struct iscsi_task *task; | ||
| 865 | struct beiscsi_io_task *io_task; | ||
| 686 | struct iscsi_conn *conn = beiscsi_conn->conn; | 866 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 687 | struct iscsi_session *session = conn->session; | 867 | struct iscsi_session *session = conn->session; |
| 688 | 868 | ||
| 689 | phwi_ctrlr = phba->phwi_ctrlr; | 869 | phwi_ctrlr = phba->phwi_ctrlr; |
| 690 | pwrb_context = &phwi_ctrlr->wrb_context[((psol-> | 870 | if (ring_mode) { |
| 871 | psgl_handle = phba->sgl_hndl_array[((psol-> | ||
| 872 | dw[offsetof(struct amap_sol_cqe_ring, icd_index) / | ||
| 873 | 32] & SOL_ICD_INDEX_MASK) >> 6)]; | ||
| 874 | pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid]; | ||
| 875 | task = psgl_handle->task; | ||
| 876 | pwrb_handle = NULL; | ||
| 877 | } else { | ||
| 878 | pwrb_context = &phwi_ctrlr->wrb_context[((psol-> | ||
| 691 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & | 879 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & |
| 692 | SOL_CID_MASK) >> 6)]; | 880 | SOL_CID_MASK) >> 6)]; |
| 693 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | 881 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> |
| 694 | dw[offsetof(struct amap_sol_cqe, wrb_index) / | 882 | dw[offsetof(struct amap_sol_cqe, wrb_index) / |
| 695 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; | 883 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; |
| 884 | task = pwrb_handle->pio_handle; | ||
| 885 | } | ||
| 886 | |||
| 887 | io_task = task->dd_data; | ||
| 888 | spin_lock(&phba->mgmt_sgl_lock); | ||
| 889 | free_mgmt_sgl_handle(phba, io_task->psgl_handle); | ||
| 890 | spin_unlock(&phba->mgmt_sgl_lock); | ||
| 696 | spin_lock_bh(&session->lock); | 891 | spin_lock_bh(&session->lock); |
| 697 | free_wrb_handle(phba, pwrb_context, pwrb_handle); | 892 | free_wrb_handle(phba, pwrb_context, pwrb_handle); |
| 698 | spin_unlock_bh(&session->lock); | 893 | spin_unlock_bh(&session->lock); |
| @@ -704,6 +899,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, | |||
| 704 | { | 899 | { |
| 705 | struct iscsi_nopin *hdr; | 900 | struct iscsi_nopin *hdr; |
| 706 | struct iscsi_conn *conn = beiscsi_conn->conn; | 901 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 902 | struct beiscsi_io_task *io_task = task->dd_data; | ||
| 707 | 903 | ||
| 708 | hdr = (struct iscsi_nopin *)task->hdr; | 904 | hdr = (struct iscsi_nopin *)task->hdr; |
| 709 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] | 905 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] |
| @@ -715,6 +911,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, | |||
| 715 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) | 911 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) |
| 716 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); | 912 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); |
| 717 | hdr->opcode = ISCSI_OP_NOOP_IN; | 913 | hdr->opcode = ISCSI_OP_NOOP_IN; |
| 914 | hdr->itt = io_task->libiscsi_itt; | ||
| 718 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); | 915 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); |
| 719 | } | 916 | } |
| 720 | 917 | ||
| @@ -726,25 +923,33 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, | |||
| 726 | struct iscsi_wrb *pwrb = NULL; | 923 | struct iscsi_wrb *pwrb = NULL; |
| 727 | struct hwi_controller *phwi_ctrlr; | 924 | struct hwi_controller *phwi_ctrlr; |
| 728 | struct iscsi_task *task; | 925 | struct iscsi_task *task; |
| 729 | struct beiscsi_io_task *io_task; | 926 | struct sgl_handle *psgl_handle = NULL; |
| 927 | unsigned int type; | ||
| 730 | struct iscsi_conn *conn = beiscsi_conn->conn; | 928 | struct iscsi_conn *conn = beiscsi_conn->conn; |
| 731 | struct iscsi_session *session = conn->session; | 929 | struct iscsi_session *session = conn->session; |
| 732 | 930 | ||
| 733 | phwi_ctrlr = phba->phwi_ctrlr; | 931 | phwi_ctrlr = phba->phwi_ctrlr; |
| 734 | 932 | if (ring_mode) { | |
| 735 | pwrb_context = &phwi_ctrlr-> | 933 | psgl_handle = phba->sgl_hndl_array[((psol-> |
| 736 | wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] | 934 | dw[offsetof(struct amap_sol_cqe_ring, icd_index) / |
| 737 | & SOL_CID_MASK) >> 6)]; | 935 | 32] & SOL_ICD_INDEX_MASK) >> 6)]; |
| 738 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | 936 | task = psgl_handle->task; |
| 937 | type = psgl_handle->type; | ||
| 938 | } else { | ||
| 939 | pwrb_context = &phwi_ctrlr-> | ||
| 940 | wrb_context[((psol->dw[offsetof | ||
| 941 | (struct amap_sol_cqe, cid) / 32] | ||
| 942 | & SOL_CID_MASK) >> 6)]; | ||
| 943 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | ||
| 739 | dw[offsetof(struct amap_sol_cqe, wrb_index) / | 944 | dw[offsetof(struct amap_sol_cqe, wrb_index) / |
| 740 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; | 945 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; |
| 741 | 946 | task = pwrb_handle->pio_handle; | |
| 742 | task = pwrb_handle->pio_handle; | 947 | pwrb = pwrb_handle->pwrb; |
| 743 | io_task = task->dd_data; | 948 | type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & |
| 949 | WRB_TYPE_MASK) >> 28; | ||
| 950 | } | ||
| 744 | spin_lock_bh(&session->lock); | 951 | spin_lock_bh(&session->lock); |
| 745 | pwrb = pwrb_handle->pwrb; | 952 | switch (type) { |
| 746 | switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & | ||
| 747 | WRB_TYPE_MASK) >> 28) { | ||
| 748 | case HWH_TYPE_IO: | 953 | case HWH_TYPE_IO: |
| 749 | case HWH_TYPE_IO_RD: | 954 | case HWH_TYPE_IO_RD: |
| 750 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == | 955 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == |
| @@ -773,12 +978,21 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, | |||
| 773 | break; | 978 | break; |
| 774 | 979 | ||
| 775 | default: | 980 | default: |
| 776 | shost_printk(KERN_WARNING, phba->shost, | 981 | if (ring_mode) |
| 777 | "wrb_index 0x%x CID 0x%x\n", | 982 | shost_printk(KERN_WARNING, phba->shost, |
| 778 | ((psol->dw[offsetof(struct amap_iscsi_wrb, type) / | 983 | "In hwi_complete_cmd, unknown type = %d" |
| 779 | 32] & SOL_WRB_INDEX_MASK) >> 16), | 984 | "icd_index 0x%x CID 0x%x\n", type, |
| 780 | ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] | 985 | ((psol->dw[offsetof(struct amap_sol_cqe_ring, |
| 781 | & SOL_CID_MASK) >> 6)); | 986 | icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6), |
| 987 | psgl_handle->cid); | ||
| 988 | else | ||
| 989 | shost_printk(KERN_WARNING, phba->shost, | ||
| 990 | "In hwi_complete_cmd, unknown type = %d" | ||
| 991 | "wrb_index 0x%x CID 0x%x\n", type, | ||
| 992 | ((psol->dw[offsetof(struct amap_iscsi_wrb, | ||
| 993 | type) / 32] & SOL_WRB_INDEX_MASK) >> 16), | ||
| 994 | ((psol->dw[offsetof(struct amap_sol_cqe, | ||
| 995 | cid) / 32] & SOL_CID_MASK) >> 6)); | ||
| 782 | break; | 996 | break; |
| 783 | } | 997 | } |
| 784 | 998 | ||
| @@ -1208,40 +1422,55 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, | |||
| 1208 | hwi_post_async_buffers(phba, pasync_handle->is_header); | 1422 | hwi_post_async_buffers(phba, pasync_handle->is_header); |
| 1209 | } | 1423 | } |
| 1210 | 1424 | ||
| 1211 | static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | 1425 | |
| 1426 | static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | ||
| 1212 | { | 1427 | { |
| 1213 | struct hwi_controller *phwi_ctrlr; | ||
| 1214 | struct hwi_context_memory *phwi_context; | ||
| 1215 | struct be_queue_info *cq; | 1428 | struct be_queue_info *cq; |
| 1216 | struct sol_cqe *sol; | 1429 | struct sol_cqe *sol; |
| 1217 | struct dmsg_cqe *dmsg; | 1430 | struct dmsg_cqe *dmsg; |
| 1218 | unsigned int num_processed = 0; | 1431 | unsigned int num_processed = 0; |
| 1219 | unsigned int tot_nump = 0; | 1432 | unsigned int tot_nump = 0; |
| 1220 | struct beiscsi_conn *beiscsi_conn; | 1433 | struct beiscsi_conn *beiscsi_conn; |
| 1434 | struct sgl_handle *psgl_handle = NULL; | ||
| 1435 | struct beiscsi_hba *phba; | ||
| 1221 | 1436 | ||
| 1222 | phwi_ctrlr = phba->phwi_ctrlr; | 1437 | cq = pbe_eq->cq; |
| 1223 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 1224 | cq = &phwi_context->be_cq; | ||
| 1225 | sol = queue_tail_node(cq); | 1438 | sol = queue_tail_node(cq); |
| 1439 | phba = pbe_eq->phba; | ||
| 1226 | 1440 | ||
| 1227 | while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & | 1441 | while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & |
| 1228 | CQE_VALID_MASK) { | 1442 | CQE_VALID_MASK) { |
| 1229 | be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); | 1443 | be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); |
| 1230 | 1444 | ||
| 1231 | beiscsi_conn = phba->conn_table[(u32) (sol-> | 1445 | if (ring_mode) { |
| 1446 | psgl_handle = phba->sgl_hndl_array[((sol-> | ||
| 1447 | dw[offsetof(struct amap_sol_cqe_ring, | ||
| 1448 | icd_index) / 32] & SOL_ICD_INDEX_MASK) | ||
| 1449 | >> 6)]; | ||
| 1450 | beiscsi_conn = phba->conn_table[psgl_handle->cid]; | ||
| 1451 | if (!beiscsi_conn || !beiscsi_conn->ep) { | ||
| 1452 | shost_printk(KERN_WARNING, phba->shost, | ||
| 1453 | "Connection table empty for cid = %d\n", | ||
| 1454 | psgl_handle->cid); | ||
| 1455 | return 0; | ||
| 1456 | } | ||
| 1457 | |||
| 1458 | } else { | ||
| 1459 | beiscsi_conn = phba->conn_table[(u32) (sol-> | ||
| 1232 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & | 1460 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & |
| 1233 | SOL_CID_MASK) >> 6]; | 1461 | SOL_CID_MASK) >> 6]; |
| 1234 | 1462 | ||
| 1235 | if (!beiscsi_conn || !beiscsi_conn->ep) { | 1463 | if (!beiscsi_conn || !beiscsi_conn->ep) { |
| 1236 | shost_printk(KERN_WARNING, phba->shost, | 1464 | shost_printk(KERN_WARNING, phba->shost, |
| 1237 | "Connection table empty for cid = %d\n", | 1465 | "Connection table empty for cid = %d\n", |
| 1238 | (u32)(sol->dw[offsetof(struct amap_sol_cqe, | 1466 | (u32)(sol->dw[offsetof(struct amap_sol_cqe, |
| 1239 | cid) / 32] & SOL_CID_MASK) >> 6); | 1467 | cid) / 32] & SOL_CID_MASK) >> 6); |
| 1240 | return 0; | 1468 | return 0; |
| 1469 | } | ||
| 1241 | } | 1470 | } |
| 1242 | 1471 | ||
| 1243 | if (num_processed >= 32) { | 1472 | if (num_processed >= 32) { |
| 1244 | hwi_ring_cq_db(phba, phwi_context->be_cq.id, | 1473 | hwi_ring_cq_db(phba, cq->id, |
| 1245 | num_processed, 0, 0); | 1474 | num_processed, 0, 0); |
| 1246 | tot_nump += num_processed; | 1475 | tot_nump += num_processed; |
| 1247 | num_processed = 0; | 1476 | num_processed = 0; |
| @@ -1258,8 +1487,12 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | |||
| 1258 | hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); | 1487 | hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); |
| 1259 | break; | 1488 | break; |
| 1260 | case UNSOL_HDR_NOTIFY: | 1489 | case UNSOL_HDR_NOTIFY: |
| 1490 | SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n"); | ||
| 1491 | hwi_process_default_pdu_ring(beiscsi_conn, phba, | ||
| 1492 | (struct i_t_dpdu_cqe *)sol); | ||
| 1493 | break; | ||
| 1261 | case UNSOL_DATA_NOTIFY: | 1494 | case UNSOL_DATA_NOTIFY: |
| 1262 | SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n"); | 1495 | SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n"); |
| 1263 | hwi_process_default_pdu_ring(beiscsi_conn, phba, | 1496 | hwi_process_default_pdu_ring(beiscsi_conn, phba, |
| 1264 | (struct i_t_dpdu_cqe *)sol); | 1497 | (struct i_t_dpdu_cqe *)sol); |
| 1265 | break; | 1498 | break; |
| @@ -1278,13 +1511,21 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | |||
| 1278 | case CMD_CXN_KILLED_ITT_INVALID: | 1511 | case CMD_CXN_KILLED_ITT_INVALID: |
| 1279 | case CMD_CXN_KILLED_SEQ_OUTOFORDER: | 1512 | case CMD_CXN_KILLED_SEQ_OUTOFORDER: |
| 1280 | case CMD_CXN_KILLED_INVALID_DATASN_RCVD: | 1513 | case CMD_CXN_KILLED_INVALID_DATASN_RCVD: |
| 1281 | SE_DEBUG(DBG_LVL_1, | 1514 | if (ring_mode) { |
| 1515 | SE_DEBUG(DBG_LVL_1, | ||
| 1516 | "CQ Error notification for cmd.. " | ||
| 1517 | "code %d cid 0x%x\n", | ||
| 1518 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
| 1519 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
| 1520 | } else { | ||
| 1521 | SE_DEBUG(DBG_LVL_1, | ||
| 1282 | "CQ Error notification for cmd.. " | 1522 | "CQ Error notification for cmd.. " |
| 1283 | "code %d cid 0x%x\n", | 1523 | "code %d cid 0x%x\n", |
| 1284 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1524 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
| 1285 | 32] & CQE_CODE_MASK, | 1525 | 32] & CQE_CODE_MASK, |
| 1286 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1526 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / |
| 1287 | 32] & SOL_CID_MASK)); | 1527 | 32] & SOL_CID_MASK)); |
| 1528 | } | ||
| 1288 | break; | 1529 | break; |
| 1289 | case UNSOL_DATA_DIGEST_ERROR_NOTIFY: | 1530 | case UNSOL_DATA_DIGEST_ERROR_NOTIFY: |
| 1290 | SE_DEBUG(DBG_LVL_1, | 1531 | SE_DEBUG(DBG_LVL_1, |
| @@ -1306,23 +1547,37 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | |||
| 1306 | case CXN_KILLED_OVER_RUN_RESIDUAL: | 1547 | case CXN_KILLED_OVER_RUN_RESIDUAL: |
| 1307 | case CXN_KILLED_UNDER_RUN_RESIDUAL: | 1548 | case CXN_KILLED_UNDER_RUN_RESIDUAL: |
| 1308 | case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: | 1549 | case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: |
| 1309 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID " | 1550 | if (ring_mode) { |
| 1551 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " | ||
| 1552 | "0x%x...\n", | ||
| 1553 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
| 1554 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
| 1555 | } else { | ||
| 1556 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " | ||
| 1310 | "0x%x...\n", | 1557 | "0x%x...\n", |
| 1311 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1558 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
| 1312 | 32] & CQE_CODE_MASK, | 1559 | 32] & CQE_CODE_MASK, |
| 1313 | sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1560 | sol->dw[offsetof(struct amap_sol_cqe, cid) / |
| 1314 | 32] & CQE_CID_MASK); | 1561 | 32] & CQE_CID_MASK); |
| 1562 | } | ||
| 1315 | iscsi_conn_failure(beiscsi_conn->conn, | 1563 | iscsi_conn_failure(beiscsi_conn->conn, |
| 1316 | ISCSI_ERR_CONN_FAILED); | 1564 | ISCSI_ERR_CONN_FAILED); |
| 1317 | break; | 1565 | break; |
| 1318 | case CXN_KILLED_RST_SENT: | 1566 | case CXN_KILLED_RST_SENT: |
| 1319 | case CXN_KILLED_RST_RCVD: | 1567 | case CXN_KILLED_RST_RCVD: |
| 1320 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent " | 1568 | if (ring_mode) { |
| 1321 | "on CID 0x%x...\n", | 1569 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" |
| 1570 | "received/sent on CID 0x%x...\n", | ||
| 1571 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
| 1572 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
| 1573 | } else { | ||
| 1574 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" | ||
| 1575 | "received/sent on CID 0x%x...\n", | ||
| 1322 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1576 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
| 1323 | 32] & CQE_CODE_MASK, | 1577 | 32] & CQE_CODE_MASK, |
| 1324 | sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1578 | sol->dw[offsetof(struct amap_sol_cqe, cid) / |
| 1325 | 32] & CQE_CID_MASK); | 1579 | 32] & CQE_CID_MASK); |
| 1580 | } | ||
| 1326 | iscsi_conn_failure(beiscsi_conn->conn, | 1581 | iscsi_conn_failure(beiscsi_conn->conn, |
| 1327 | ISCSI_ERR_CONN_FAILED); | 1582 | ISCSI_ERR_CONN_FAILED); |
| 1328 | break; | 1583 | break; |
| @@ -1344,8 +1599,7 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | |||
| 1344 | 1599 | ||
| 1345 | if (num_processed > 0) { | 1600 | if (num_processed > 0) { |
| 1346 | tot_nump += num_processed; | 1601 | tot_nump += num_processed; |
| 1347 | hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed, | 1602 | hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); |
| 1348 | 1, 0); | ||
| 1349 | } | 1603 | } |
| 1350 | return tot_nump; | 1604 | return tot_nump; |
| 1351 | } | 1605 | } |
| @@ -1353,21 +1607,30 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) | |||
| 1353 | static void beiscsi_process_all_cqs(struct work_struct *work) | 1607 | static void beiscsi_process_all_cqs(struct work_struct *work) |
| 1354 | { | 1608 | { |
| 1355 | unsigned long flags; | 1609 | unsigned long flags; |
| 1610 | struct hwi_controller *phwi_ctrlr; | ||
| 1611 | struct hwi_context_memory *phwi_context; | ||
| 1612 | struct be_eq_obj *pbe_eq; | ||
| 1356 | struct beiscsi_hba *phba = | 1613 | struct beiscsi_hba *phba = |
| 1357 | container_of(work, struct beiscsi_hba, work_cqs); | 1614 | container_of(work, struct beiscsi_hba, work_cqs); |
| 1358 | 1615 | ||
| 1616 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 1617 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 1618 | if (phba->msix_enabled) | ||
| 1619 | pbe_eq = &phwi_context->be_eq[phba->num_cpus]; | ||
| 1620 | else | ||
| 1621 | pbe_eq = &phwi_context->be_eq[0]; | ||
| 1622 | |||
| 1359 | if (phba->todo_mcc_cq) { | 1623 | if (phba->todo_mcc_cq) { |
| 1360 | spin_lock_irqsave(&phba->isr_lock, flags); | 1624 | spin_lock_irqsave(&phba->isr_lock, flags); |
| 1361 | phba->todo_mcc_cq = 0; | 1625 | phba->todo_mcc_cq = 0; |
| 1362 | spin_unlock_irqrestore(&phba->isr_lock, flags); | 1626 | spin_unlock_irqrestore(&phba->isr_lock, flags); |
| 1363 | SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n"); | ||
| 1364 | } | 1627 | } |
| 1365 | 1628 | ||
| 1366 | if (phba->todo_cq) { | 1629 | if (phba->todo_cq) { |
| 1367 | spin_lock_irqsave(&phba->isr_lock, flags); | 1630 | spin_lock_irqsave(&phba->isr_lock, flags); |
| 1368 | phba->todo_cq = 0; | 1631 | phba->todo_cq = 0; |
| 1369 | spin_unlock_irqrestore(&phba->isr_lock, flags); | 1632 | spin_unlock_irqrestore(&phba->isr_lock, flags); |
| 1370 | beiscsi_process_cq(phba); | 1633 | beiscsi_process_cq(pbe_eq); |
| 1371 | } | 1634 | } |
| 1372 | } | 1635 | } |
| 1373 | 1636 | ||
| @@ -1375,19 +1638,15 @@ static int be_iopoll(struct blk_iopoll *iop, int budget) | |||
| 1375 | { | 1638 | { |
| 1376 | static unsigned int ret; | 1639 | static unsigned int ret; |
| 1377 | struct beiscsi_hba *phba; | 1640 | struct beiscsi_hba *phba; |
| 1641 | struct be_eq_obj *pbe_eq; | ||
| 1378 | 1642 | ||
| 1379 | phba = container_of(iop, struct beiscsi_hba, iopoll); | 1643 | pbe_eq = container_of(iop, struct be_eq_obj, iopoll); |
| 1380 | 1644 | ret = beiscsi_process_cq(pbe_eq); | |
| 1381 | ret = beiscsi_process_cq(phba); | ||
| 1382 | if (ret < budget) { | 1645 | if (ret < budget) { |
| 1383 | struct hwi_controller *phwi_ctrlr; | 1646 | phba = pbe_eq->phba; |
| 1384 | struct hwi_context_memory *phwi_context; | ||
| 1385 | |||
| 1386 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 1387 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 1388 | blk_iopoll_complete(iop); | 1647 | blk_iopoll_complete(iop); |
| 1389 | hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0, | 1648 | SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id); |
| 1390 | 0, 1, 1); | 1649 | hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); |
| 1391 | } | 1650 | } |
| 1392 | return ret; | 1651 | return ret; |
| 1393 | } | 1652 | } |
| @@ -1537,14 +1796,12 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) | |||
| 1537 | 1796 | ||
| 1538 | static void beiscsi_find_mem_req(struct beiscsi_hba *phba) | 1797 | static void beiscsi_find_mem_req(struct beiscsi_hba *phba) |
| 1539 | { | 1798 | { |
| 1540 | unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages; | 1799 | unsigned int num_cq_pages, num_async_pdu_buf_pages; |
| 1541 | unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; | 1800 | unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; |
| 1542 | unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; | 1801 | unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; |
| 1543 | 1802 | ||
| 1544 | num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ | 1803 | num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ |
| 1545 | sizeof(struct sol_cqe)); | 1804 | sizeof(struct sol_cqe)); |
| 1546 | num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ | ||
| 1547 | sizeof(struct be_eq_entry)); | ||
| 1548 | num_async_pdu_buf_pages = | 1805 | num_async_pdu_buf_pages = |
| 1549 | PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ | 1806 | PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ |
| 1550 | phba->params.defpdu_hdr_sz); | 1807 | phba->params.defpdu_hdr_sz); |
| @@ -1565,8 +1822,6 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) | |||
| 1565 | phba->mem_req[HWI_MEM_ADDN_CONTEXT] = | 1822 | phba->mem_req[HWI_MEM_ADDN_CONTEXT] = |
| 1566 | sizeof(struct hwi_context_memory); | 1823 | sizeof(struct hwi_context_memory); |
| 1567 | 1824 | ||
| 1568 | phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE; | ||
| 1569 | phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE; | ||
| 1570 | 1825 | ||
| 1571 | phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) | 1826 | phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) |
| 1572 | * (phba->params.wrbs_per_cxn) | 1827 | * (phba->params.wrbs_per_cxn) |
| @@ -1751,8 +2006,6 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) | |||
| 1751 | 2006 | ||
| 1752 | for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { | 2007 | for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { |
| 1753 | pwrb_context = &phwi_ctrlr->wrb_context[index]; | 2008 | pwrb_context = &phwi_ctrlr->wrb_context[index]; |
| 1754 | SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index, | ||
| 1755 | pwrb_context); | ||
| 1756 | pwrb_context->pwrb_handle_base = | 2009 | pwrb_context->pwrb_handle_base = |
| 1757 | kzalloc(sizeof(struct wrb_handle *) * | 2010 | kzalloc(sizeof(struct wrb_handle *) * |
| 1758 | phba->params.wrbs_per_cxn, GFP_KERNEL); | 2011 | phba->params.wrbs_per_cxn, GFP_KERNEL); |
| @@ -1767,6 +2020,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) | |||
| 1767 | pwrb_context->pwrb_handle_basestd[j] = | 2020 | pwrb_context->pwrb_handle_basestd[j] = |
| 1768 | pwrb_handle; | 2021 | pwrb_handle; |
| 1769 | pwrb_context->wrb_handles_available++; | 2022 | pwrb_context->wrb_handles_available++; |
| 2023 | pwrb_handle->wrb_index = j; | ||
| 1770 | pwrb_handle++; | 2024 | pwrb_handle++; |
| 1771 | } | 2025 | } |
| 1772 | pwrb_context->free_index = 0; | 2026 | pwrb_context->free_index = 0; |
| @@ -1785,6 +2039,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) | |||
| 1785 | pwrb_context->pwrb_handle_basestd[j] = | 2039 | pwrb_context->pwrb_handle_basestd[j] = |
| 1786 | pwrb_handle; | 2040 | pwrb_handle; |
| 1787 | pwrb_context->wrb_handles_available++; | 2041 | pwrb_context->wrb_handles_available++; |
| 2042 | pwrb_handle->wrb_index = j; | ||
| 1788 | pwrb_handle++; | 2043 | pwrb_handle++; |
| 1789 | } | 2044 | } |
| 1790 | pwrb_context->free_index = 0; | 2045 | pwrb_context->free_index = 0; |
| @@ -2042,79 +2297,126 @@ static int be_fill_queue(struct be_queue_info *q, | |||
| 2042 | return 0; | 2297 | return 0; |
| 2043 | } | 2298 | } |
| 2044 | 2299 | ||
| 2045 | static int beiscsi_create_eq(struct beiscsi_hba *phba, | 2300 | static int beiscsi_create_eqs(struct beiscsi_hba *phba, |
| 2046 | struct hwi_context_memory *phwi_context) | 2301 | struct hwi_context_memory *phwi_context) |
| 2047 | { | 2302 | { |
| 2048 | unsigned int idx; | 2303 | unsigned int i, num_eq_pages; |
| 2049 | int ret; | 2304 | int ret, eq_for_mcc; |
| 2050 | struct be_queue_info *eq; | 2305 | struct be_queue_info *eq; |
| 2051 | struct be_dma_mem *mem; | 2306 | struct be_dma_mem *mem; |
| 2052 | struct be_mem_descriptor *mem_descr; | ||
| 2053 | void *eq_vaddress; | 2307 | void *eq_vaddress; |
| 2308 | dma_addr_t paddr; | ||
| 2054 | 2309 | ||
| 2055 | idx = 0; | 2310 | num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ |
| 2056 | eq = &phwi_context->be_eq.q; | 2311 | sizeof(struct be_eq_entry)); |
| 2057 | mem = &eq->dma_mem; | ||
| 2058 | mem_descr = phba->init_mem; | ||
| 2059 | mem_descr += HWI_MEM_EQ; | ||
| 2060 | eq_vaddress = mem_descr->mem_array[idx].virtual_address; | ||
| 2061 | |||
| 2062 | ret = be_fill_queue(eq, phba->params.num_eq_entries, | ||
| 2063 | sizeof(struct be_eq_entry), eq_vaddress); | ||
| 2064 | if (ret) { | ||
| 2065 | shost_printk(KERN_ERR, phba->shost, | ||
| 2066 | "be_fill_queue Failed for EQ \n"); | ||
| 2067 | return ret; | ||
| 2068 | } | ||
| 2069 | 2312 | ||
| 2070 | mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; | 2313 | if (phba->msix_enabled) |
| 2314 | eq_for_mcc = 1; | ||
| 2315 | else | ||
| 2316 | eq_for_mcc = 0; | ||
| 2317 | for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { | ||
| 2318 | eq = &phwi_context->be_eq[i].q; | ||
| 2319 | mem = &eq->dma_mem; | ||
| 2320 | phwi_context->be_eq[i].phba = phba; | ||
| 2321 | eq_vaddress = pci_alloc_consistent(phba->pcidev, | ||
| 2322 | num_eq_pages * PAGE_SIZE, | ||
| 2323 | &paddr); | ||
| 2324 | if (!eq_vaddress) | ||
| 2325 | goto create_eq_error; | ||
| 2326 | |||
| 2327 | mem->va = eq_vaddress; | ||
| 2328 | ret = be_fill_queue(eq, phba->params.num_eq_entries, | ||
| 2329 | sizeof(struct be_eq_entry), eq_vaddress); | ||
| 2330 | if (ret) { | ||
| 2331 | shost_printk(KERN_ERR, phba->shost, | ||
| 2332 | "be_fill_queue Failed for EQ \n"); | ||
| 2333 | goto create_eq_error; | ||
| 2334 | } | ||
| 2071 | 2335 | ||
| 2072 | ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, | 2336 | mem->dma = paddr; |
| 2073 | phwi_context->be_eq.cur_eqd); | 2337 | ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, |
| 2074 | if (ret) { | 2338 | phwi_context->cur_eqd); |
| 2075 | shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create" | 2339 | if (ret) { |
| 2076 | "Failedfor EQ \n"); | 2340 | shost_printk(KERN_ERR, phba->shost, |
| 2077 | return ret; | 2341 | "beiscsi_cmd_eq_create" |
| 2342 | "Failedfor EQ \n"); | ||
| 2343 | goto create_eq_error; | ||
| 2344 | } | ||
| 2345 | SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id); | ||
| 2078 | } | 2346 | } |
| 2079 | SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id); | ||
| 2080 | return 0; | 2347 | return 0; |
| 2348 | create_eq_error: | ||
| 2349 | for (i = 0; i < (phba->num_cpus + 1); i++) { | ||
| 2350 | eq = &phwi_context->be_eq[i].q; | ||
| 2351 | mem = &eq->dma_mem; | ||
| 2352 | if (mem->va) | ||
| 2353 | pci_free_consistent(phba->pcidev, num_eq_pages | ||
| 2354 | * PAGE_SIZE, | ||
| 2355 | mem->va, mem->dma); | ||
| 2356 | } | ||
| 2357 | return ret; | ||
| 2081 | } | 2358 | } |
| 2082 | 2359 | ||
| 2083 | static int beiscsi_create_cq(struct beiscsi_hba *phba, | 2360 | static int beiscsi_create_cqs(struct beiscsi_hba *phba, |
| 2084 | struct hwi_context_memory *phwi_context) | 2361 | struct hwi_context_memory *phwi_context) |
| 2085 | { | 2362 | { |
| 2086 | unsigned int idx; | 2363 | unsigned int i, num_cq_pages; |
| 2087 | int ret; | 2364 | int ret; |
| 2088 | struct be_queue_info *cq, *eq; | 2365 | struct be_queue_info *cq, *eq; |
| 2089 | struct be_dma_mem *mem; | 2366 | struct be_dma_mem *mem; |
| 2090 | struct be_mem_descriptor *mem_descr; | 2367 | struct be_eq_obj *pbe_eq; |
| 2091 | void *cq_vaddress; | 2368 | void *cq_vaddress; |
| 2369 | dma_addr_t paddr; | ||
| 2092 | 2370 | ||
| 2093 | idx = 0; | 2371 | num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ |
| 2094 | cq = &phwi_context->be_cq; | 2372 | sizeof(struct sol_cqe)); |
| 2095 | eq = &phwi_context->be_eq.q; | ||
| 2096 | mem = &cq->dma_mem; | ||
| 2097 | mem_descr = phba->init_mem; | ||
| 2098 | mem_descr += HWI_MEM_CQ; | ||
| 2099 | cq_vaddress = mem_descr->mem_array[idx].virtual_address; | ||
| 2100 | ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, | ||
| 2101 | sizeof(struct sol_cqe), cq_vaddress); | ||
| 2102 | if (ret) { | ||
| 2103 | shost_printk(KERN_ERR, phba->shost, | ||
| 2104 | "be_fill_queue Failed for ISCSI CQ \n"); | ||
| 2105 | return ret; | ||
| 2106 | } | ||
| 2107 | 2373 | ||
| 2108 | mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; | 2374 | for (i = 0; i < phba->num_cpus; i++) { |
| 2109 | ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0); | 2375 | cq = &phwi_context->be_cq[i]; |
| 2110 | if (ret) { | 2376 | eq = &phwi_context->be_eq[i].q; |
| 2111 | shost_printk(KERN_ERR, phba->shost, | 2377 | pbe_eq = &phwi_context->be_eq[i]; |
| 2112 | "beiscsi_cmd_eq_create Failed for ISCSI CQ \n"); | 2378 | pbe_eq->cq = cq; |
| 2113 | return ret; | 2379 | pbe_eq->phba = phba; |
| 2380 | mem = &cq->dma_mem; | ||
| 2381 | cq_vaddress = pci_alloc_consistent(phba->pcidev, | ||
| 2382 | num_cq_pages * PAGE_SIZE, | ||
| 2383 | &paddr); | ||
| 2384 | if (!cq_vaddress) | ||
| 2385 | goto create_cq_error; | ||
| 2386 | ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, | ||
| 2387 | sizeof(struct sol_cqe), cq_vaddress); | ||
| 2388 | if (ret) { | ||
| 2389 | shost_printk(KERN_ERR, phba->shost, | ||
| 2390 | "be_fill_queue Failed for ISCSI CQ \n"); | ||
| 2391 | goto create_cq_error; | ||
| 2392 | } | ||
| 2393 | |||
| 2394 | mem->dma = paddr; | ||
| 2395 | ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, | ||
| 2396 | false, 0); | ||
| 2397 | if (ret) { | ||
| 2398 | shost_printk(KERN_ERR, phba->shost, | ||
| 2399 | "beiscsi_cmd_eq_create" | ||
| 2400 | "Failed for ISCSI CQ \n"); | ||
| 2401 | goto create_cq_error; | ||
| 2402 | } | ||
| 2403 | SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n", | ||
| 2404 | cq->id, eq->id); | ||
| 2405 | SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); | ||
| 2114 | } | 2406 | } |
| 2115 | SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id); | ||
| 2116 | SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); | ||
| 2117 | return 0; | 2407 | return 0; |
| 2408 | |||
| 2409 | create_cq_error: | ||
| 2410 | for (i = 0; i < phba->num_cpus; i++) { | ||
| 2411 | cq = &phwi_context->be_cq[i]; | ||
| 2412 | mem = &cq->dma_mem; | ||
| 2413 | if (mem->va) | ||
| 2414 | pci_free_consistent(phba->pcidev, num_cq_pages | ||
| 2415 | * PAGE_SIZE, | ||
| 2416 | mem->va, mem->dma); | ||
| 2417 | } | ||
| 2418 | return ret; | ||
| 2419 | |||
| 2118 | } | 2420 | } |
| 2119 | 2421 | ||
| 2120 | static int | 2422 | static int |
| @@ -2132,7 +2434,7 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba, | |||
| 2132 | 2434 | ||
| 2133 | idx = 0; | 2435 | idx = 0; |
| 2134 | dq = &phwi_context->be_def_hdrq; | 2436 | dq = &phwi_context->be_def_hdrq; |
| 2135 | cq = &phwi_context->be_cq; | 2437 | cq = &phwi_context->be_cq[0]; |
| 2136 | mem = &dq->dma_mem; | 2438 | mem = &dq->dma_mem; |
| 2137 | mem_descr = phba->init_mem; | 2439 | mem_descr = phba->init_mem; |
| 2138 | mem_descr += HWI_MEM_ASYNC_HEADER_RING; | 2440 | mem_descr += HWI_MEM_ASYNC_HEADER_RING; |
| @@ -2176,7 +2478,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba, | |||
| 2176 | 2478 | ||
| 2177 | idx = 0; | 2479 | idx = 0; |
| 2178 | dataq = &phwi_context->be_def_dataq; | 2480 | dataq = &phwi_context->be_def_dataq; |
| 2179 | cq = &phwi_context->be_cq; | 2481 | cq = &phwi_context->be_cq[0]; |
| 2180 | mem = &dataq->dma_mem; | 2482 | mem = &dataq->dma_mem; |
| 2181 | mem_descr = phba->init_mem; | 2483 | mem_descr = phba->init_mem; |
| 2182 | mem_descr += HWI_MEM_ASYNC_DATA_RING; | 2484 | mem_descr += HWI_MEM_ASYNC_DATA_RING; |
| @@ -2239,6 +2541,30 @@ beiscsi_post_pages(struct beiscsi_hba *phba) | |||
| 2239 | return 0; | 2541 | return 0; |
| 2240 | } | 2542 | } |
| 2241 | 2543 | ||
| 2544 | static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) | ||
| 2545 | { | ||
| 2546 | struct be_dma_mem *mem = &q->dma_mem; | ||
| 2547 | if (mem->va) | ||
| 2548 | pci_free_consistent(phba->pcidev, mem->size, | ||
| 2549 | mem->va, mem->dma); | ||
| 2550 | } | ||
| 2551 | |||
| 2552 | static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, | ||
| 2553 | u16 len, u16 entry_size) | ||
| 2554 | { | ||
| 2555 | struct be_dma_mem *mem = &q->dma_mem; | ||
| 2556 | |||
| 2557 | memset(q, 0, sizeof(*q)); | ||
| 2558 | q->len = len; | ||
| 2559 | q->entry_size = entry_size; | ||
| 2560 | mem->size = len * entry_size; | ||
| 2561 | mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); | ||
| 2562 | if (!mem->va) | ||
| 2563 | return -1; | ||
| 2564 | memset(mem->va, 0, mem->size); | ||
| 2565 | return 0; | ||
| 2566 | } | ||
| 2567 | |||
| 2242 | static int | 2568 | static int |
| 2243 | beiscsi_create_wrb_rings(struct beiscsi_hba *phba, | 2569 | beiscsi_create_wrb_rings(struct beiscsi_hba *phba, |
| 2244 | struct hwi_context_memory *phwi_context, | 2570 | struct hwi_context_memory *phwi_context, |
| @@ -2328,13 +2654,29 @@ static void free_wrb_handles(struct beiscsi_hba *phba) | |||
| 2328 | } | 2654 | } |
| 2329 | } | 2655 | } |
| 2330 | 2656 | ||
| 2657 | static void be_mcc_queues_destroy(struct beiscsi_hba *phba) | ||
| 2658 | { | ||
| 2659 | struct be_queue_info *q; | ||
| 2660 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 2661 | |||
| 2662 | q = &phba->ctrl.mcc_obj.q; | ||
| 2663 | if (q->created) | ||
| 2664 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); | ||
| 2665 | be_queue_free(phba, q); | ||
| 2666 | |||
| 2667 | q = &phba->ctrl.mcc_obj.cq; | ||
| 2668 | if (q->created) | ||
| 2669 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); | ||
| 2670 | be_queue_free(phba, q); | ||
| 2671 | } | ||
| 2672 | |||
| 2331 | static void hwi_cleanup(struct beiscsi_hba *phba) | 2673 | static void hwi_cleanup(struct beiscsi_hba *phba) |
| 2332 | { | 2674 | { |
| 2333 | struct be_queue_info *q; | 2675 | struct be_queue_info *q; |
| 2334 | struct be_ctrl_info *ctrl = &phba->ctrl; | 2676 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 2335 | struct hwi_controller *phwi_ctrlr; | 2677 | struct hwi_controller *phwi_ctrlr; |
| 2336 | struct hwi_context_memory *phwi_context; | 2678 | struct hwi_context_memory *phwi_context; |
| 2337 | int i; | 2679 | int i, eq_num; |
| 2338 | 2680 | ||
| 2339 | phwi_ctrlr = phba->phwi_ctrlr; | 2681 | phwi_ctrlr = phba->phwi_ctrlr; |
| 2340 | phwi_context = phwi_ctrlr->phwi_ctxt; | 2682 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| @@ -2343,7 +2685,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba) | |||
| 2343 | if (q->created) | 2685 | if (q->created) |
| 2344 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); | 2686 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); |
| 2345 | } | 2687 | } |
| 2346 | |||
| 2347 | free_wrb_handles(phba); | 2688 | free_wrb_handles(phba); |
| 2348 | 2689 | ||
| 2349 | q = &phwi_context->be_def_hdrq; | 2690 | q = &phwi_context->be_def_hdrq; |
| @@ -2356,13 +2697,76 @@ static void hwi_cleanup(struct beiscsi_hba *phba) | |||
| 2356 | 2697 | ||
| 2357 | beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); | 2698 | beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); |
| 2358 | 2699 | ||
| 2359 | q = &phwi_context->be_cq; | 2700 | for (i = 0; i < (phba->num_cpus); i++) { |
| 2360 | if (q->created) | 2701 | q = &phwi_context->be_cq[i]; |
| 2361 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); | 2702 | if (q->created) |
| 2703 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); | ||
| 2704 | } | ||
| 2705 | if (phba->msix_enabled) | ||
| 2706 | eq_num = 1; | ||
| 2707 | else | ||
| 2708 | eq_num = 0; | ||
| 2709 | for (i = 0; i < (phba->num_cpus + eq_num); i++) { | ||
| 2710 | q = &phwi_context->be_eq[i].q; | ||
| 2711 | if (q->created) | ||
| 2712 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); | ||
| 2713 | } | ||
| 2714 | be_mcc_queues_destroy(phba); | ||
| 2715 | } | ||
| 2362 | 2716 | ||
| 2363 | q = &phwi_context->be_eq.q; | 2717 | static int be_mcc_queues_create(struct beiscsi_hba *phba, |
| 2364 | if (q->created) | 2718 | struct hwi_context_memory *phwi_context) |
| 2365 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); | 2719 | { |
| 2720 | struct be_queue_info *q, *cq; | ||
| 2721 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 2722 | |||
| 2723 | /* Alloc MCC compl queue */ | ||
| 2724 | cq = &phba->ctrl.mcc_obj.cq; | ||
| 2725 | if (be_queue_alloc(phba, cq, MCC_CQ_LEN, | ||
| 2726 | sizeof(struct be_mcc_compl))) | ||
| 2727 | goto err; | ||
| 2728 | /* Ask BE to create MCC compl queue; */ | ||
| 2729 | if (phba->msix_enabled) { | ||
| 2730 | if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq | ||
| 2731 | [phba->num_cpus].q, false, true, 0)) | ||
| 2732 | goto mcc_cq_free; | ||
| 2733 | } else { | ||
| 2734 | if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, | ||
| 2735 | false, true, 0)) | ||
| 2736 | goto mcc_cq_free; | ||
| 2737 | } | ||
| 2738 | |||
| 2739 | /* Alloc MCC queue */ | ||
| 2740 | q = &phba->ctrl.mcc_obj.q; | ||
| 2741 | if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) | ||
| 2742 | goto mcc_cq_destroy; | ||
| 2743 | |||
| 2744 | /* Ask BE to create MCC queue */ | ||
| 2745 | if (beiscsi_cmd_mccq_create(phba, q, cq)) | ||
| 2746 | goto mcc_q_free; | ||
| 2747 | |||
| 2748 | return 0; | ||
| 2749 | |||
| 2750 | mcc_q_free: | ||
| 2751 | be_queue_free(phba, q); | ||
| 2752 | mcc_cq_destroy: | ||
| 2753 | beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); | ||
| 2754 | mcc_cq_free: | ||
| 2755 | be_queue_free(phba, cq); | ||
| 2756 | err: | ||
| 2757 | return -1; | ||
| 2758 | } | ||
| 2759 | |||
| 2760 | static int find_num_cpus(void) | ||
| 2761 | { | ||
| 2762 | int num_cpus = 0; | ||
| 2763 | |||
| 2764 | num_cpus = num_online_cpus(); | ||
| 2765 | if (num_cpus >= MAX_CPUS) | ||
| 2766 | num_cpus = MAX_CPUS - 1; | ||
| 2767 | |||
| 2768 | SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus); | ||
| 2769 | return num_cpus; | ||
| 2366 | } | 2770 | } |
| 2367 | 2771 | ||
| 2368 | static int hwi_init_port(struct beiscsi_hba *phba) | 2772 | static int hwi_init_port(struct beiscsi_hba *phba) |
| @@ -2376,26 +2780,33 @@ static int hwi_init_port(struct beiscsi_hba *phba) | |||
| 2376 | def_pdu_ring_sz = | 2780 | def_pdu_ring_sz = |
| 2377 | phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); | 2781 | phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); |
| 2378 | phwi_ctrlr = phba->phwi_ctrlr; | 2782 | phwi_ctrlr = phba->phwi_ctrlr; |
| 2379 | |||
| 2380 | phwi_context = phwi_ctrlr->phwi_ctxt; | 2783 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 2381 | phwi_context->be_eq.max_eqd = 0; | 2784 | phwi_context->max_eqd = 0; |
| 2382 | phwi_context->be_eq.min_eqd = 0; | 2785 | phwi_context->min_eqd = 0; |
| 2383 | phwi_context->be_eq.cur_eqd = 64; | 2786 | phwi_context->cur_eqd = 64; |
| 2384 | phwi_context->be_eq.enable_aic = false; | ||
| 2385 | be_cmd_fw_initialize(&phba->ctrl); | 2787 | be_cmd_fw_initialize(&phba->ctrl); |
| 2386 | status = beiscsi_create_eq(phba, phwi_context); | 2788 | |
| 2789 | status = beiscsi_create_eqs(phba, phwi_context); | ||
| 2387 | if (status != 0) { | 2790 | if (status != 0) { |
| 2388 | shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); | 2791 | shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); |
| 2389 | goto error; | 2792 | goto error; |
| 2390 | } | 2793 | } |
| 2391 | 2794 | ||
| 2392 | status = mgmt_check_supported_fw(ctrl); | 2795 | status = be_mcc_queues_create(phba, phwi_context); |
| 2796 | if (status != 0) | ||
| 2797 | goto error; | ||
| 2798 | |||
| 2799 | status = mgmt_check_supported_fw(ctrl, phba); | ||
| 2393 | if (status != 0) { | 2800 | if (status != 0) { |
| 2394 | shost_printk(KERN_ERR, phba->shost, | 2801 | shost_printk(KERN_ERR, phba->shost, |
| 2395 | "Unsupported fw version \n"); | 2802 | "Unsupported fw version \n"); |
| 2396 | goto error; | 2803 | goto error; |
| 2397 | } | 2804 | } |
| 2398 | 2805 | ||
| 2806 | if (phba->fw_config.iscsi_features == 0x1) | ||
| 2807 | ring_mode = 1; | ||
| 2808 | else | ||
| 2809 | ring_mode = 0; | ||
| 2399 | status = mgmt_get_fw_config(ctrl, phba); | 2810 | status = mgmt_get_fw_config(ctrl, phba); |
| 2400 | if (status != 0) { | 2811 | if (status != 0) { |
| 2401 | shost_printk(KERN_ERR, phba->shost, | 2812 | shost_printk(KERN_ERR, phba->shost, |
| @@ -2403,7 +2814,7 @@ static int hwi_init_port(struct beiscsi_hba *phba) | |||
| 2403 | goto error; | 2814 | goto error; |
| 2404 | } | 2815 | } |
| 2405 | 2816 | ||
| 2406 | status = beiscsi_create_cq(phba, phwi_context); | 2817 | status = beiscsi_create_cqs(phba, phwi_context); |
| 2407 | if (status != 0) { | 2818 | if (status != 0) { |
| 2408 | shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); | 2819 | shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); |
| 2409 | goto error; | 2820 | goto error; |
| @@ -2447,7 +2858,6 @@ error: | |||
| 2447 | return -ENOMEM; | 2858 | return -ENOMEM; |
| 2448 | } | 2859 | } |
| 2449 | 2860 | ||
| 2450 | |||
| 2451 | static int hwi_init_controller(struct beiscsi_hba *phba) | 2861 | static int hwi_init_controller(struct beiscsi_hba *phba) |
| 2452 | { | 2862 | { |
| 2453 | struct hwi_controller *phwi_ctrlr; | 2863 | struct hwi_controller *phwi_ctrlr; |
| @@ -2530,6 +2940,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) | |||
| 2530 | 2940 | ||
| 2531 | phba->io_sgl_hndl_avbl = 0; | 2941 | phba->io_sgl_hndl_avbl = 0; |
| 2532 | phba->eh_sgl_hndl_avbl = 0; | 2942 | phba->eh_sgl_hndl_avbl = 0; |
| 2943 | |||
| 2944 | if (ring_mode) { | ||
| 2945 | phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) * | ||
| 2946 | phba->params.icds_per_ctrl, | ||
| 2947 | GFP_KERNEL); | ||
| 2948 | if (!phba->sgl_hndl_array) { | ||
| 2949 | shost_printk(KERN_ERR, phba->shost, | ||
| 2950 | "Mem Alloc Failed. Failing to load\n"); | ||
| 2951 | return -ENOMEM; | ||
| 2952 | } | ||
| 2953 | } | ||
| 2954 | |||
| 2533 | mem_descr_sglh = phba->init_mem; | 2955 | mem_descr_sglh = phba->init_mem; |
| 2534 | mem_descr_sglh += HWI_MEM_SGLH; | 2956 | mem_descr_sglh += HWI_MEM_SGLH; |
| 2535 | if (1 == mem_descr_sglh->num_elements) { | 2957 | if (1 == mem_descr_sglh->num_elements) { |
| @@ -2537,6 +2959,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) | |||
| 2537 | phba->params.ios_per_ctrl, | 2959 | phba->params.ios_per_ctrl, |
| 2538 | GFP_KERNEL); | 2960 | GFP_KERNEL); |
| 2539 | if (!phba->io_sgl_hndl_base) { | 2961 | if (!phba->io_sgl_hndl_base) { |
| 2962 | if (ring_mode) | ||
| 2963 | kfree(phba->sgl_hndl_array); | ||
| 2540 | shost_printk(KERN_ERR, phba->shost, | 2964 | shost_printk(KERN_ERR, phba->shost, |
| 2541 | "Mem Alloc Failed. Failing to load\n"); | 2965 | "Mem Alloc Failed. Failing to load\n"); |
| 2542 | return -ENOMEM; | 2966 | return -ENOMEM; |
| @@ -2656,13 +3080,12 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) | |||
| 2656 | struct hwi_context_memory *phwi_context; | 3080 | struct hwi_context_memory *phwi_context; |
| 2657 | struct be_queue_info *eq; | 3081 | struct be_queue_info *eq; |
| 2658 | u8 __iomem *addr; | 3082 | u8 __iomem *addr; |
| 2659 | u32 reg; | 3083 | u32 reg, i; |
| 2660 | u32 enabled; | 3084 | u32 enabled; |
| 2661 | 3085 | ||
| 2662 | phwi_ctrlr = phba->phwi_ctrlr; | 3086 | phwi_ctrlr = phba->phwi_ctrlr; |
| 2663 | phwi_context = phwi_ctrlr->phwi_ctxt; | 3087 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 2664 | 3088 | ||
| 2665 | eq = &phwi_context->be_eq.q; | ||
| 2666 | addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + | 3089 | addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + |
| 2667 | PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); | 3090 | PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); |
| 2668 | reg = ioread32(addr); | 3091 | reg = ioread32(addr); |
| @@ -2673,9 +3096,11 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) | |||
| 2673 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | 3096 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; |
| 2674 | SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); | 3097 | SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); |
| 2675 | iowrite32(reg, addr); | 3098 | iowrite32(reg, addr); |
| 2676 | SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); | 3099 | for (i = 0; i <= phba->num_cpus; i++) { |
| 2677 | 3100 | eq = &phwi_context->be_eq[i].q; | |
| 2678 | hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); | 3101 | SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); |
| 3102 | hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); | ||
| 3103 | } | ||
| 2679 | } else | 3104 | } else |
| 2680 | shost_printk(KERN_WARNING, phba->shost, | 3105 | shost_printk(KERN_WARNING, phba->shost, |
| 2681 | "In hwi_enable_intr, Not Enabled \n"); | 3106 | "In hwi_enable_intr, Not Enabled \n"); |
| @@ -2720,6 +3145,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba) | |||
| 2720 | if (hba_setup_cid_tbls(phba)) { | 3145 | if (hba_setup_cid_tbls(phba)) { |
| 2721 | shost_printk(KERN_ERR, phba->shost, | 3146 | shost_printk(KERN_ERR, phba->shost, |
| 2722 | "Failed in hba_setup_cid_tbls\n"); | 3147 | "Failed in hba_setup_cid_tbls\n"); |
| 3148 | if (ring_mode) | ||
| 3149 | kfree(phba->sgl_hndl_array); | ||
| 2723 | kfree(phba->io_sgl_hndl_base); | 3150 | kfree(phba->io_sgl_hndl_base); |
| 2724 | kfree(phba->eh_sgl_hndl_base); | 3151 | kfree(phba->eh_sgl_hndl_base); |
| 2725 | goto do_cleanup_ctrlr; | 3152 | goto do_cleanup_ctrlr; |
| @@ -2738,17 +3165,25 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) | |||
| 2738 | struct hwi_context_memory *phwi_context; | 3165 | struct hwi_context_memory *phwi_context; |
| 2739 | struct be_queue_info *eq; | 3166 | struct be_queue_info *eq; |
| 2740 | struct be_eq_entry *eqe = NULL; | 3167 | struct be_eq_entry *eqe = NULL; |
| 3168 | int i, eq_msix; | ||
| 2741 | 3169 | ||
| 2742 | phwi_ctrlr = phba->phwi_ctrlr; | 3170 | phwi_ctrlr = phba->phwi_ctrlr; |
| 2743 | phwi_context = phwi_ctrlr->phwi_ctxt; | 3171 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 2744 | eq = &phwi_context->be_eq.q; | 3172 | if (phba->msix_enabled) |
| 2745 | eqe = queue_tail_node(eq); | 3173 | eq_msix = 1; |
| 3174 | else | ||
| 3175 | eq_msix = 0; | ||
| 2746 | 3176 | ||
| 2747 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 3177 | for (i = 0; i < (phba->num_cpus + eq_msix); i++) { |
| 2748 | & EQE_VALID_MASK) { | 3178 | eq = &phwi_context->be_eq[i].q; |
| 2749 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
| 2750 | queue_tail_inc(eq); | ||
| 2751 | eqe = queue_tail_node(eq); | 3179 | eqe = queue_tail_node(eq); |
| 3180 | |||
| 3181 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
| 3182 | & EQE_VALID_MASK) { | ||
| 3183 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
| 3184 | queue_tail_inc(eq); | ||
| 3185 | eqe = queue_tail_node(eq); | ||
| 3186 | } | ||
| 2752 | } | 3187 | } |
| 2753 | } | 3188 | } |
| 2754 | 3189 | ||
| @@ -2762,6 +3197,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) | |||
| 2762 | "mgmt_epfw_cleanup FAILED \n"); | 3197 | "mgmt_epfw_cleanup FAILED \n"); |
| 2763 | hwi_cleanup(phba); | 3198 | hwi_cleanup(phba); |
| 2764 | hwi_purge_eq(phba); | 3199 | hwi_purge_eq(phba); |
| 3200 | if (ring_mode) | ||
| 3201 | kfree(phba->sgl_hndl_array); | ||
| 2765 | kfree(phba->io_sgl_hndl_base); | 3202 | kfree(phba->io_sgl_hndl_base); |
| 2766 | kfree(phba->eh_sgl_hndl_base); | 3203 | kfree(phba->eh_sgl_hndl_base); |
| 2767 | kfree(phba->cid_array); | 3204 | kfree(phba->cid_array); |
| @@ -2846,8 +3283,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, | |||
| 2846 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); | 3283 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); |
| 2847 | 3284 | ||
| 2848 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; | 3285 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; |
| 2849 | doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << | 3286 | if (!ring_mode) |
| 2850 | DB_DEF_PDU_WRB_INDEX_SHIFT; | 3287 | doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) |
| 3288 | << DB_DEF_PDU_WRB_INDEX_SHIFT; | ||
| 2851 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3289 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
| 2852 | 3290 | ||
| 2853 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); | 3291 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); |
| @@ -2856,7 +3294,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, | |||
| 2856 | static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, | 3294 | static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, |
| 2857 | int *index, int *age) | 3295 | int *index, int *age) |
| 2858 | { | 3296 | { |
| 2859 | *index = be32_to_cpu(itt) >> 16; | 3297 | *index = (int)itt; |
| 2860 | if (age) | 3298 | if (age) |
| 2861 | *age = conn->session->age; | 3299 | *age = conn->session->age; |
| 2862 | } | 3300 | } |
| @@ -2885,15 +3323,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
| 2885 | 3323 | ||
| 2886 | io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, | 3324 | io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, |
| 2887 | GFP_KERNEL, &paddr); | 3325 | GFP_KERNEL, &paddr); |
| 2888 | |||
| 2889 | if (!io_task->cmd_bhs) | 3326 | if (!io_task->cmd_bhs) |
| 2890 | return -ENOMEM; | 3327 | return -ENOMEM; |
| 2891 | |||
| 2892 | io_task->bhs_pa.u.a64.address = paddr; | 3328 | io_task->bhs_pa.u.a64.address = paddr; |
| 3329 | io_task->libiscsi_itt = (itt_t)task->itt; | ||
| 2893 | io_task->pwrb_handle = alloc_wrb_handle(phba, | 3330 | io_task->pwrb_handle = alloc_wrb_handle(phba, |
| 2894 | beiscsi_conn->beiscsi_conn_cid, | 3331 | beiscsi_conn->beiscsi_conn_cid, |
| 2895 | task->itt); | 3332 | task->itt); |
| 2896 | io_task->pwrb_handle->pio_handle = task; | ||
| 2897 | io_task->conn = beiscsi_conn; | 3333 | io_task->conn = beiscsi_conn; |
| 2898 | 3334 | ||
| 2899 | task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; | 3335 | task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; |
| @@ -2905,7 +3341,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
| 2905 | spin_unlock(&phba->io_sgl_lock); | 3341 | spin_unlock(&phba->io_sgl_lock); |
| 2906 | if (!io_task->psgl_handle) | 3342 | if (!io_task->psgl_handle) |
| 2907 | goto free_hndls; | 3343 | goto free_hndls; |
| 2908 | |||
| 2909 | } else { | 3344 | } else { |
| 2910 | io_task->scsi_cmnd = NULL; | 3345 | io_task->scsi_cmnd = NULL; |
| 2911 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { | 3346 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { |
| @@ -2932,8 +3367,18 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
| 2932 | goto free_hndls; | 3367 | goto free_hndls; |
| 2933 | } | 3368 | } |
| 2934 | } | 3369 | } |
| 2935 | itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) | | 3370 | itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> |
| 2936 | (unsigned int)(io_task->psgl_handle->sgl_index)); | 3371 | wrb_index << 16) | (unsigned int) |
| 3372 | (io_task->psgl_handle->sgl_index)); | ||
| 3373 | if (ring_mode) { | ||
| 3374 | phba->sgl_hndl_array[io_task->psgl_handle->sgl_index - | ||
| 3375 | phba->fw_config.iscsi_cid_start] = | ||
| 3376 | io_task->psgl_handle; | ||
| 3377 | io_task->psgl_handle->task = task; | ||
| 3378 | io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid; | ||
| 3379 | } else | ||
| 3380 | io_task->pwrb_handle->pio_handle = task; | ||
| 3381 | |||
| 2937 | io_task->cmd_bhs->iscsi_hdr.itt = itt; | 3382 | io_task->cmd_bhs->iscsi_hdr.itt = itt; |
| 2938 | return 0; | 3383 | return 0; |
| 2939 | 3384 | ||
| @@ -3006,7 +3451,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, | |||
| 3006 | io_task->bhs_len = sizeof(struct be_cmd_bhs); | 3451 | io_task->bhs_len = sizeof(struct be_cmd_bhs); |
| 3007 | 3452 | ||
| 3008 | if (writedir) { | 3453 | if (writedir) { |
| 3009 | SE_DEBUG(DBG_LVL_4, " WRITE Command \t"); | ||
| 3010 | memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); | 3454 | memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); |
| 3011 | AMAP_SET_BITS(struct amap_pdu_data_out, itt, | 3455 | AMAP_SET_BITS(struct amap_pdu_data_out, itt, |
| 3012 | &io_task->cmd_bhs->iscsi_data_pdu, | 3456 | &io_task->cmd_bhs->iscsi_data_pdu, |
| @@ -3016,11 +3460,18 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, | |||
| 3016 | ISCSI_OPCODE_SCSI_DATA_OUT); | 3460 | ISCSI_OPCODE_SCSI_DATA_OUT); |
| 3017 | AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, | 3461 | AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, |
| 3018 | &io_task->cmd_bhs->iscsi_data_pdu, 1); | 3462 | &io_task->cmd_bhs->iscsi_data_pdu, 1); |
| 3019 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); | 3463 | if (ring_mode) |
| 3464 | io_task->psgl_handle->type = INI_WR_CMD; | ||
| 3465 | else | ||
| 3466 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3467 | INI_WR_CMD); | ||
| 3020 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); | 3468 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); |
| 3021 | } else { | 3469 | } else { |
| 3022 | SE_DEBUG(DBG_LVL_4, "READ Command \t"); | 3470 | if (ring_mode) |
| 3023 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); | 3471 | io_task->psgl_handle->type = INI_RD_CMD; |
| 3472 | else | ||
| 3473 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3474 | INI_RD_CMD); | ||
| 3024 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); | 3475 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); |
| 3025 | } | 3476 | } |
| 3026 | memcpy(&io_task->cmd_bhs->iscsi_data_pdu. | 3477 | memcpy(&io_task->cmd_bhs->iscsi_data_pdu. |
| @@ -3045,7 +3496,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, | |||
| 3045 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); | 3496 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); |
| 3046 | 3497 | ||
| 3047 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; | 3498 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; |
| 3048 | doorbell |= (io_task->pwrb_handle->wrb_index & | 3499 | if (!ring_mode) |
| 3500 | doorbell |= (io_task->pwrb_handle->wrb_index & | ||
| 3049 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; | 3501 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; |
| 3050 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3502 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
| 3051 | 3503 | ||
| @@ -3059,10 +3511,16 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
| 3059 | struct iscsi_conn *conn = task->conn; | 3511 | struct iscsi_conn *conn = task->conn; |
| 3060 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; | 3512 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; |
| 3061 | struct beiscsi_hba *phba = beiscsi_conn->phba; | 3513 | struct beiscsi_hba *phba = beiscsi_conn->phba; |
| 3514 | struct iscsi_session *session; | ||
| 3062 | struct iscsi_wrb *pwrb = NULL; | 3515 | struct iscsi_wrb *pwrb = NULL; |
| 3516 | struct hwi_controller *phwi_ctrlr; | ||
| 3517 | struct hwi_wrb_context *pwrb_context; | ||
| 3518 | struct wrb_handle *pwrb_handle; | ||
| 3063 | unsigned int doorbell = 0; | 3519 | unsigned int doorbell = 0; |
| 3520 | unsigned int i, cid; | ||
| 3064 | struct iscsi_task *aborted_task; | 3521 | struct iscsi_task *aborted_task; |
| 3065 | 3522 | ||
| 3523 | cid = beiscsi_conn->beiscsi_conn_cid; | ||
| 3066 | pwrb = io_task->pwrb_handle->pwrb; | 3524 | pwrb = io_task->pwrb_handle->pwrb; |
| 3067 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, | 3525 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, |
| 3068 | be32_to_cpu(task->cmdsn)); | 3526 | be32_to_cpu(task->cmdsn)); |
| @@ -3073,38 +3531,63 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
| 3073 | 3531 | ||
| 3074 | switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { | 3532 | switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { |
| 3075 | case ISCSI_OP_LOGIN: | 3533 | case ISCSI_OP_LOGIN: |
| 3076 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD); | 3534 | if (ring_mode) |
| 3535 | io_task->psgl_handle->type = TGT_DM_CMD; | ||
| 3536 | else | ||
| 3537 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3538 | TGT_DM_CMD); | ||
| 3077 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3539 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
| 3078 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); | 3540 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); |
| 3079 | hwi_write_buffer(pwrb, task); | 3541 | hwi_write_buffer(pwrb, task); |
| 3080 | break; | 3542 | break; |
| 3081 | case ISCSI_OP_NOOP_OUT: | 3543 | case ISCSI_OP_NOOP_OUT: |
| 3082 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); | 3544 | if (ring_mode) |
| 3545 | io_task->psgl_handle->type = INI_RD_CMD; | ||
| 3546 | else | ||
| 3547 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3548 | INI_RD_CMD); | ||
| 3083 | hwi_write_buffer(pwrb, task); | 3549 | hwi_write_buffer(pwrb, task); |
| 3084 | break; | 3550 | break; |
| 3085 | case ISCSI_OP_TEXT: | 3551 | case ISCSI_OP_TEXT: |
| 3086 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); | 3552 | if (ring_mode) |
| 3553 | io_task->psgl_handle->type = INI_WR_CMD; | ||
| 3554 | else | ||
| 3555 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3556 | INI_WR_CMD); | ||
| 3087 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); | 3557 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); |
| 3088 | hwi_write_buffer(pwrb, task); | 3558 | hwi_write_buffer(pwrb, task); |
| 3089 | break; | 3559 | break; |
| 3090 | case ISCSI_OP_SCSI_TMFUNC: | 3560 | case ISCSI_OP_SCSI_TMFUNC: |
| 3091 | aborted_task = iscsi_itt_to_task(conn, | 3561 | session = conn->session; |
| 3092 | ((struct iscsi_tm *)task->hdr)->rtt); | 3562 | i = ((struct iscsi_tm *)task->hdr)->rtt; |
| 3563 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 3564 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; | ||
| 3565 | pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) | ||
| 3566 | >> 16]; | ||
| 3567 | aborted_task = pwrb_handle->pio_handle; | ||
| 3093 | if (!aborted_task) | 3568 | if (!aborted_task) |
| 3094 | return 0; | 3569 | return 0; |
| 3570 | |||
| 3095 | aborted_io_task = aborted_task->dd_data; | 3571 | aborted_io_task = aborted_task->dd_data; |
| 3096 | if (!aborted_io_task->scsi_cmnd) | 3572 | if (!aborted_io_task->scsi_cmnd) |
| 3097 | return 0; | 3573 | return 0; |
| 3098 | 3574 | ||
| 3099 | mgmt_invalidate_icds(phba, | 3575 | mgmt_invalidate_icds(phba, |
| 3100 | aborted_io_task->psgl_handle->sgl_index, | 3576 | aborted_io_task->psgl_handle->sgl_index, |
| 3101 | beiscsi_conn->beiscsi_conn_cid); | 3577 | cid); |
| 3102 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD); | 3578 | if (ring_mode) |
| 3579 | io_task->psgl_handle->type = INI_TMF_CMD; | ||
| 3580 | else | ||
| 3581 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
| 3582 | INI_TMF_CMD); | ||
| 3103 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3583 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
| 3104 | hwi_write_buffer(pwrb, task); | 3584 | hwi_write_buffer(pwrb, task); |
| 3105 | break; | 3585 | break; |
| 3106 | case ISCSI_OP_LOGOUT: | 3586 | case ISCSI_OP_LOGOUT: |
| 3107 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3587 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
| 3588 | if (ring_mode) | ||
| 3589 | io_task->psgl_handle->type = HWH_TYPE_LOGOUT; | ||
| 3590 | else | ||
| 3108 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | 3591 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
| 3109 | HWH_TYPE_LOGOUT); | 3592 | HWH_TYPE_LOGOUT); |
| 3110 | hwi_write_buffer(pwrb, task); | 3593 | hwi_write_buffer(pwrb, task); |
| @@ -3122,8 +3605,9 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
| 3122 | io_task->pwrb_handle->nxt_wrb_index); | 3605 | io_task->pwrb_handle->nxt_wrb_index); |
| 3123 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); | 3606 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); |
| 3124 | 3607 | ||
| 3125 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; | 3608 | doorbell |= cid & DB_WRB_POST_CID_MASK; |
| 3126 | doorbell |= (io_task->pwrb_handle->wrb_index & | 3609 | if (!ring_mode) |
| 3610 | doorbell |= (io_task->pwrb_handle->wrb_index & | ||
| 3127 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; | 3611 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; |
| 3128 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3612 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
| 3129 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); | 3613 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); |
| @@ -3165,9 +3649,14 @@ static int beiscsi_task_xmit(struct iscsi_task *task) | |||
| 3165 | return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); | 3649 | return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); |
| 3166 | } | 3650 | } |
| 3167 | 3651 | ||
| 3652 | |||
| 3168 | static void beiscsi_remove(struct pci_dev *pcidev) | 3653 | static void beiscsi_remove(struct pci_dev *pcidev) |
| 3169 | { | 3654 | { |
| 3170 | struct beiscsi_hba *phba = NULL; | 3655 | struct beiscsi_hba *phba = NULL; |
| 3656 | struct hwi_controller *phwi_ctrlr; | ||
| 3657 | struct hwi_context_memory *phwi_context; | ||
| 3658 | struct be_eq_obj *pbe_eq; | ||
| 3659 | unsigned int i, msix_vec; | ||
| 3171 | 3660 | ||
| 3172 | phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); | 3661 | phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); |
| 3173 | if (!phba) { | 3662 | if (!phba) { |
| @@ -3175,12 +3664,24 @@ static void beiscsi_remove(struct pci_dev *pcidev) | |||
| 3175 | return; | 3664 | return; |
| 3176 | } | 3665 | } |
| 3177 | 3666 | ||
| 3667 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 3668 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 3178 | hwi_disable_intr(phba); | 3669 | hwi_disable_intr(phba); |
| 3179 | if (phba->pcidev->irq) | 3670 | if (phba->msix_enabled) { |
| 3180 | free_irq(phba->pcidev->irq, phba); | 3671 | for (i = 0; i <= phba->num_cpus; i++) { |
| 3672 | msix_vec = phba->msix_entries[i].vector; | ||
| 3673 | free_irq(msix_vec, &phwi_context->be_eq[i]); | ||
| 3674 | } | ||
| 3675 | } else | ||
| 3676 | if (phba->pcidev->irq) | ||
| 3677 | free_irq(phba->pcidev->irq, phba); | ||
| 3678 | pci_disable_msix(phba->pcidev); | ||
| 3181 | destroy_workqueue(phba->wq); | 3679 | destroy_workqueue(phba->wq); |
| 3182 | if (blk_iopoll_enabled) | 3680 | if (blk_iopoll_enabled) |
| 3183 | blk_iopoll_disable(&phba->iopoll); | 3681 | for (i = 0; i < phba->num_cpus; i++) { |
| 3682 | pbe_eq = &phwi_context->be_eq[i]; | ||
| 3683 | blk_iopoll_disable(&pbe_eq->iopoll); | ||
| 3684 | } | ||
| 3184 | 3685 | ||
| 3185 | beiscsi_clean_port(phba); | 3686 | beiscsi_clean_port(phba); |
| 3186 | beiscsi_free_mem(phba); | 3687 | beiscsi_free_mem(phba); |
| @@ -3194,11 +3695,29 @@ static void beiscsi_remove(struct pci_dev *pcidev) | |||
| 3194 | iscsi_host_free(phba->shost); | 3695 | iscsi_host_free(phba->shost); |
| 3195 | } | 3696 | } |
| 3196 | 3697 | ||
| 3698 | static void beiscsi_msix_enable(struct beiscsi_hba *phba) | ||
| 3699 | { | ||
| 3700 | int i, status; | ||
| 3701 | |||
| 3702 | for (i = 0; i <= phba->num_cpus; i++) | ||
| 3703 | phba->msix_entries[i].entry = i; | ||
| 3704 | |||
| 3705 | status = pci_enable_msix(phba->pcidev, phba->msix_entries, | ||
| 3706 | (phba->num_cpus + 1)); | ||
| 3707 | if (!status) | ||
| 3708 | phba->msix_enabled = true; | ||
| 3709 | |||
| 3710 | return; | ||
| 3711 | } | ||
| 3712 | |||
| 3197 | static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | 3713 | static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, |
| 3198 | const struct pci_device_id *id) | 3714 | const struct pci_device_id *id) |
| 3199 | { | 3715 | { |
| 3200 | struct beiscsi_hba *phba = NULL; | 3716 | struct beiscsi_hba *phba = NULL; |
| 3201 | int ret; | 3717 | struct hwi_controller *phwi_ctrlr; |
| 3718 | struct hwi_context_memory *phwi_context; | ||
| 3719 | struct be_eq_obj *pbe_eq; | ||
| 3720 | int ret, msix_vec, num_cpus, i; | ||
| 3202 | 3721 | ||
| 3203 | ret = beiscsi_enable_pci(pcidev); | 3722 | ret = beiscsi_enable_pci(pcidev); |
| 3204 | if (ret < 0) { | 3723 | if (ret < 0) { |
| @@ -3213,8 +3732,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 3213 | " Failed in beiscsi_hba_alloc \n"); | 3732 | " Failed in beiscsi_hba_alloc \n"); |
| 3214 | goto disable_pci; | 3733 | goto disable_pci; |
| 3215 | } | 3734 | } |
| 3735 | SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); | ||
| 3216 | 3736 | ||
| 3217 | pci_set_drvdata(pcidev, phba); | 3737 | pci_set_drvdata(pcidev, phba); |
| 3738 | if (enable_msix) | ||
| 3739 | num_cpus = find_num_cpus(); | ||
| 3740 | else | ||
| 3741 | num_cpus = 1; | ||
| 3742 | phba->num_cpus = num_cpus; | ||
| 3743 | SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus); | ||
| 3744 | |||
| 3745 | if (enable_msix) | ||
| 3746 | beiscsi_msix_enable(phba); | ||
| 3218 | ret = be_ctrl_init(phba, pcidev); | 3747 | ret = be_ctrl_init(phba, pcidev); |
| 3219 | if (ret) { | 3748 | if (ret) { |
| 3220 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" | 3749 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" |
| @@ -3235,7 +3764,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 3235 | 3764 | ||
| 3236 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", | 3765 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", |
| 3237 | phba->shost->host_no); | 3766 | phba->shost->host_no); |
| 3238 | phba->wq = create_singlethread_workqueue(phba->wq_name); | 3767 | phba->wq = create_workqueue(phba->wq_name); |
| 3239 | if (!phba->wq) { | 3768 | if (!phba->wq) { |
| 3240 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" | 3769 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" |
| 3241 | "Failed to allocate work queue\n"); | 3770 | "Failed to allocate work queue\n"); |
| @@ -3244,11 +3773,16 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 3244 | 3773 | ||
| 3245 | INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); | 3774 | INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); |
| 3246 | 3775 | ||
| 3776 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 3777 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 3247 | if (blk_iopoll_enabled) { | 3778 | if (blk_iopoll_enabled) { |
| 3248 | blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll); | 3779 | for (i = 0; i < phba->num_cpus; i++) { |
| 3249 | blk_iopoll_enable(&phba->iopoll); | 3780 | pbe_eq = &phwi_context->be_eq[i]; |
| 3781 | blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, | ||
| 3782 | be_iopoll); | ||
| 3783 | blk_iopoll_enable(&pbe_eq->iopoll); | ||
| 3784 | } | ||
| 3250 | } | 3785 | } |
| 3251 | |||
| 3252 | ret = beiscsi_init_irqs(phba); | 3786 | ret = beiscsi_init_irqs(phba); |
| 3253 | if (ret < 0) { | 3787 | if (ret < 0) { |
| 3254 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" | 3788 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" |
| @@ -3261,17 +3795,26 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 3261 | "Failed to hwi_enable_intr\n"); | 3795 | "Failed to hwi_enable_intr\n"); |
| 3262 | goto free_ctrlr; | 3796 | goto free_ctrlr; |
| 3263 | } | 3797 | } |
| 3264 | |||
| 3265 | SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n"); | 3798 | SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n"); |
| 3266 | return 0; | 3799 | return 0; |
| 3267 | 3800 | ||
| 3268 | free_ctrlr: | 3801 | free_ctrlr: |
| 3269 | if (phba->pcidev->irq) | 3802 | if (phba->msix_enabled) { |
| 3270 | free_irq(phba->pcidev->irq, phba); | 3803 | for (i = 0; i <= phba->num_cpus; i++) { |
| 3804 | msix_vec = phba->msix_entries[i].vector; | ||
| 3805 | free_irq(msix_vec, &phwi_context->be_eq[i]); | ||
| 3806 | } | ||
| 3807 | } else | ||
| 3808 | if (phba->pcidev->irq) | ||
| 3809 | free_irq(phba->pcidev->irq, phba); | ||
| 3810 | pci_disable_msix(phba->pcidev); | ||
| 3271 | free_blkenbld: | 3811 | free_blkenbld: |
| 3272 | destroy_workqueue(phba->wq); | 3812 | destroy_workqueue(phba->wq); |
| 3273 | if (blk_iopoll_enabled) | 3813 | if (blk_iopoll_enabled) |
| 3274 | blk_iopoll_disable(&phba->iopoll); | 3814 | for (i = 0; i < phba->num_cpus; i++) { |
| 3815 | pbe_eq = &phwi_context->be_eq[i]; | ||
| 3816 | blk_iopoll_disable(&pbe_eq->iopoll); | ||
| 3817 | } | ||
| 3275 | free_twq: | 3818 | free_twq: |
| 3276 | beiscsi_clean_port(phba); | 3819 | beiscsi_clean_port(phba); |
| 3277 | beiscsi_free_mem(phba); | 3820 | beiscsi_free_mem(phba); |
| @@ -3316,7 +3859,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { | |||
| 3316 | ISCSI_USERNAME | ISCSI_PASSWORD | | 3859 | ISCSI_USERNAME | ISCSI_PASSWORD | |
| 3317 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 3860 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
| 3318 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 3861 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
| 3319 | ISCSI_LU_RESET_TMO | | 3862 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | |
| 3320 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | 3863 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
| 3321 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | 3864 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, |
| 3322 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | 3865 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | |
| @@ -3351,6 +3894,7 @@ static struct pci_driver beiscsi_pci_driver = { | |||
| 3351 | .id_table = beiscsi_pci_id_table | 3894 | .id_table = beiscsi_pci_id_table |
| 3352 | }; | 3895 | }; |
| 3353 | 3896 | ||
| 3897 | |||
| 3354 | static int __init beiscsi_module_init(void) | 3898 | static int __init beiscsi_module_init(void) |
| 3355 | { | 3899 | { |
| 3356 | int ret; | 3900 | int ret; |
| @@ -3373,6 +3917,7 @@ static int __init beiscsi_module_init(void) | |||
| 3373 | "beiscsi pci driver.\n"); | 3917 | "beiscsi pci driver.\n"); |
| 3374 | goto unregister_iscsi_transport; | 3918 | goto unregister_iscsi_transport; |
| 3375 | } | 3919 | } |
| 3920 | ring_mode = 0; | ||
| 3376 | return 0; | 3921 | return 0; |
| 3377 | 3922 | ||
| 3378 | unregister_iscsi_transport: | 3923 | unregister_iscsi_transport: |
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 53c9b70ac7ac..25e6b208b771 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h | |||
| @@ -21,11 +21,9 @@ | |||
| 21 | #ifndef _BEISCSI_MAIN_ | 21 | #ifndef _BEISCSI_MAIN_ |
| 22 | #define _BEISCSI_MAIN_ | 22 | #define _BEISCSI_MAIN_ |
| 23 | 23 | ||
| 24 | |||
| 25 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| 26 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
| 27 | #include <linux/in.h> | 26 | #include <linux/in.h> |
| 28 | #include <linux/blk-iopoll.h> | ||
| 29 | #include <scsi/scsi.h> | 27 | #include <scsi/scsi.h> |
| 30 | #include <scsi/scsi_cmnd.h> | 28 | #include <scsi/scsi_cmnd.h> |
| 31 | #include <scsi/scsi_device.h> | 29 | #include <scsi/scsi_device.h> |
| @@ -35,12 +33,8 @@ | |||
| 35 | #include <scsi/scsi_transport_iscsi.h> | 33 | #include <scsi/scsi_transport_iscsi.h> |
| 36 | 34 | ||
| 37 | #include "be.h" | 35 | #include "be.h" |
| 38 | |||
| 39 | |||
| 40 | |||
| 41 | #define DRV_NAME "be2iscsi" | 36 | #define DRV_NAME "be2iscsi" |
| 42 | #define BUILD_STR "2.0.527.0" | 37 | #define BUILD_STR "2.0.527.0" |
| 43 | |||
| 44 | #define BE_NAME "ServerEngines BladeEngine2" \ | 38 | #define BE_NAME "ServerEngines BladeEngine2" \ |
| 45 | "Linux iSCSI Driver version" BUILD_STR | 39 | "Linux iSCSI Driver version" BUILD_STR |
| 46 | #define DRV_DESC BE_NAME " " "Driver" | 40 | #define DRV_DESC BE_NAME " " "Driver" |
| @@ -49,6 +43,8 @@ | |||
| 49 | #define BE_DEVICE_ID1 0x212 | 43 | #define BE_DEVICE_ID1 0x212 |
| 50 | #define OC_DEVICE_ID1 0x702 | 44 | #define OC_DEVICE_ID1 0x702 |
| 51 | #define OC_DEVICE_ID2 0x703 | 45 | #define OC_DEVICE_ID2 0x703 |
| 46 | #define OC_DEVICE_ID3 0x712 | ||
| 47 | #define OC_DEVICE_ID4 0x222 | ||
| 52 | 48 | ||
| 53 | #define BE2_MAX_SESSIONS 64 | 49 | #define BE2_MAX_SESSIONS 64 |
| 54 | #define BE2_CMDS_PER_CXN 128 | 50 | #define BE2_CMDS_PER_CXN 128 |
| @@ -63,6 +59,7 @@ | |||
| 63 | #define BE2_IO_DEPTH \ | 59 | #define BE2_IO_DEPTH \ |
| 64 | (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ)) | 60 | (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ)) |
| 65 | 61 | ||
| 62 | #define MAX_CPUS 31 | ||
| 66 | #define BEISCSI_SGLIST_ELEMENTS BE2_SGE | 63 | #define BEISCSI_SGLIST_ELEMENTS BE2_SGE |
| 67 | 64 | ||
| 68 | #define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */ | 65 | #define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */ |
| @@ -79,7 +76,7 @@ | |||
| 79 | #define BE_SENSE_INFO_SIZE 258 | 76 | #define BE_SENSE_INFO_SIZE 258 |
| 80 | #define BE_ISCSI_PDU_HEADER_SIZE 64 | 77 | #define BE_ISCSI_PDU_HEADER_SIZE 64 |
| 81 | #define BE_MIN_MEM_SIZE 16384 | 78 | #define BE_MIN_MEM_SIZE 16384 |
| 82 | 79 | #define MAX_CMD_SZ 65536 | |
| 83 | #define IIOC_SCSI_DATA 0x05 /* Write Operation */ | 80 | #define IIOC_SCSI_DATA 0x05 /* Write Operation */ |
| 84 | 81 | ||
| 85 | #define DBG_LVL 0x00000001 | 82 | #define DBG_LVL 0x00000001 |
| @@ -100,6 +97,8 @@ do { \ | |||
| 100 | } \ | 97 | } \ |
| 101 | } while (0); | 98 | } while (0); |
| 102 | 99 | ||
| 100 | #define BE_ADAPTER_UP 0x00000000 | ||
| 101 | #define BE_ADAPTER_LINK_DOWN 0x00000001 | ||
| 103 | /** | 102 | /** |
| 104 | * hardware needs the async PDU buffers to be posted in multiples of 8 | 103 | * hardware needs the async PDU buffers to be posted in multiples of 8 |
| 105 | * So have atleast 8 of them by default | 104 | * So have atleast 8 of them by default |
| @@ -160,21 +159,19 @@ do { \ | |||
| 160 | 159 | ||
| 161 | enum be_mem_enum { | 160 | enum be_mem_enum { |
| 162 | HWI_MEM_ADDN_CONTEXT, | 161 | HWI_MEM_ADDN_CONTEXT, |
| 163 | HWI_MEM_CQ, | ||
| 164 | HWI_MEM_EQ, | ||
| 165 | HWI_MEM_WRB, | 162 | HWI_MEM_WRB, |
| 166 | HWI_MEM_WRBH, | 163 | HWI_MEM_WRBH, |
| 167 | HWI_MEM_SGLH, /* 5 */ | 164 | HWI_MEM_SGLH, |
| 168 | HWI_MEM_SGE, | 165 | HWI_MEM_SGE, |
| 169 | HWI_MEM_ASYNC_HEADER_BUF, | 166 | HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ |
| 170 | HWI_MEM_ASYNC_DATA_BUF, | 167 | HWI_MEM_ASYNC_DATA_BUF, |
| 171 | HWI_MEM_ASYNC_HEADER_RING, | 168 | HWI_MEM_ASYNC_HEADER_RING, |
| 172 | HWI_MEM_ASYNC_DATA_RING, /* 10 */ | 169 | HWI_MEM_ASYNC_DATA_RING, |
| 173 | HWI_MEM_ASYNC_HEADER_HANDLE, | 170 | HWI_MEM_ASYNC_HEADER_HANDLE, |
| 174 | HWI_MEM_ASYNC_DATA_HANDLE, | 171 | HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ |
| 175 | HWI_MEM_ASYNC_PDU_CONTEXT, | 172 | HWI_MEM_ASYNC_PDU_CONTEXT, |
| 176 | ISCSI_MEM_GLOBAL_HEADER, | 173 | ISCSI_MEM_GLOBAL_HEADER, |
| 177 | SE_MEM_MAX /* 15 */ | 174 | SE_MEM_MAX |
| 178 | }; | 175 | }; |
| 179 | 176 | ||
| 180 | struct be_bus_address32 { | 177 | struct be_bus_address32 { |
| @@ -212,6 +209,9 @@ struct be_mem_descriptor { | |||
| 212 | 209 | ||
| 213 | struct sgl_handle { | 210 | struct sgl_handle { |
| 214 | unsigned int sgl_index; | 211 | unsigned int sgl_index; |
| 212 | unsigned int type; | ||
| 213 | unsigned int cid; | ||
| 214 | struct iscsi_task *task; | ||
| 215 | struct iscsi_sge *pfrag; | 215 | struct iscsi_sge *pfrag; |
| 216 | }; | 216 | }; |
| 217 | 217 | ||
| @@ -274,13 +274,17 @@ struct beiscsi_hba { | |||
| 274 | struct pci_dev *pcidev; | 274 | struct pci_dev *pcidev; |
| 275 | unsigned int state; | 275 | unsigned int state; |
| 276 | unsigned short asic_revision; | 276 | unsigned short asic_revision; |
| 277 | struct blk_iopoll iopoll; | 277 | unsigned int num_cpus; |
| 278 | unsigned int nxt_cqid; | ||
| 279 | struct msix_entry msix_entries[MAX_CPUS + 1]; | ||
| 280 | bool msix_enabled; | ||
| 278 | struct be_mem_descriptor *init_mem; | 281 | struct be_mem_descriptor *init_mem; |
| 279 | 282 | ||
| 280 | unsigned short io_sgl_alloc_index; | 283 | unsigned short io_sgl_alloc_index; |
| 281 | unsigned short io_sgl_free_index; | 284 | unsigned short io_sgl_free_index; |
| 282 | unsigned short io_sgl_hndl_avbl; | 285 | unsigned short io_sgl_hndl_avbl; |
| 283 | struct sgl_handle **io_sgl_hndl_base; | 286 | struct sgl_handle **io_sgl_hndl_base; |
| 287 | struct sgl_handle **sgl_hndl_array; | ||
| 284 | 288 | ||
| 285 | unsigned short eh_sgl_alloc_index; | 289 | unsigned short eh_sgl_alloc_index; |
| 286 | unsigned short eh_sgl_free_index; | 290 | unsigned short eh_sgl_free_index; |
| @@ -315,6 +319,7 @@ struct beiscsi_hba { | |||
| 315 | unsigned short cid_alloc; | 319 | unsigned short cid_alloc; |
| 316 | unsigned short cid_free; | 320 | unsigned short cid_free; |
| 317 | unsigned short avlbl_cids; | 321 | unsigned short avlbl_cids; |
| 322 | unsigned short iscsi_features; | ||
| 318 | spinlock_t cid_lock; | 323 | spinlock_t cid_lock; |
| 319 | } fw_config; | 324 | } fw_config; |
| 320 | 325 | ||
| @@ -343,6 +348,7 @@ struct beiscsi_conn { | |||
| 343 | unsigned short login_in_progress; | 348 | unsigned short login_in_progress; |
| 344 | struct sgl_handle *plogin_sgl_handle; | 349 | struct sgl_handle *plogin_sgl_handle; |
| 345 | struct beiscsi_session *beiscsi_sess; | 350 | struct beiscsi_session *beiscsi_sess; |
| 351 | struct iscsi_task *task; | ||
| 346 | }; | 352 | }; |
| 347 | 353 | ||
| 348 | /* This structure is used by the chip */ | 354 | /* This structure is used by the chip */ |
| @@ -390,7 +396,7 @@ struct beiscsi_io_task { | |||
| 390 | unsigned int flags; | 396 | unsigned int flags; |
| 391 | unsigned short cid; | 397 | unsigned short cid; |
| 392 | unsigned short header_len; | 398 | unsigned short header_len; |
| 393 | 399 | itt_t libiscsi_itt; | |
| 394 | struct be_cmd_bhs *cmd_bhs; | 400 | struct be_cmd_bhs *cmd_bhs; |
| 395 | struct be_bus_address bhs_pa; | 401 | struct be_bus_address bhs_pa; |
| 396 | unsigned short bhs_len; | 402 | unsigned short bhs_len; |
| @@ -599,7 +605,6 @@ struct amap_cq_db { | |||
| 599 | 605 | ||
| 600 | void beiscsi_process_eq(struct beiscsi_hba *phba); | 606 | void beiscsi_process_eq(struct beiscsi_hba *phba); |
| 601 | 607 | ||
| 602 | |||
| 603 | struct iscsi_wrb { | 608 | struct iscsi_wrb { |
| 604 | u32 dw[16]; | 609 | u32 dw[16]; |
| 605 | } __packed; | 610 | } __packed; |
| @@ -820,10 +825,12 @@ struct wrb_handle { | |||
| 820 | }; | 825 | }; |
| 821 | 826 | ||
| 822 | struct hwi_context_memory { | 827 | struct hwi_context_memory { |
| 823 | struct be_eq_obj be_eq; | 828 | /* Adaptive interrupt coalescing (AIC) info */ |
| 824 | struct be_queue_info be_cq; | 829 | u16 min_eqd; /* in usecs */ |
| 825 | struct be_queue_info be_mcc_cq; | 830 | u16 max_eqd; /* in usecs */ |
| 826 | struct be_queue_info be_mcc; | 831 | u16 cur_eqd; /* in usecs */ |
| 832 | struct be_eq_obj be_eq[MAX_CPUS]; | ||
| 833 | struct be_queue_info be_cq[MAX_CPUS]; | ||
| 827 | 834 | ||
| 828 | struct be_queue_info be_def_hdrq; | 835 | struct be_queue_info be_def_hdrq; |
| 829 | struct be_queue_info be_def_dataq; | 836 | struct be_queue_info be_def_dataq; |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 12e644fc746e..79c2bd525a84 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -35,7 +35,6 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, | |||
| 35 | 35 | ||
| 36 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 36 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
| 37 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); | 37 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); |
| 38 | |||
| 39 | status = be_mbox_notify(ctrl); | 38 | status = be_mbox_notify(ctrl); |
| 40 | if (!status) { | 39 | if (!status) { |
| 41 | struct be_fw_cfg *pfw_cfg; | 40 | struct be_fw_cfg *pfw_cfg; |
| @@ -58,7 +57,8 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, | |||
| 58 | return status; | 57 | return status; |
| 59 | } | 58 | } |
| 60 | 59 | ||
| 61 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) | 60 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, |
| 61 | struct beiscsi_hba *phba) | ||
| 62 | { | 62 | { |
| 63 | struct be_dma_mem nonemb_cmd; | 63 | struct be_dma_mem nonemb_cmd; |
| 64 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 64 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); |
| @@ -85,7 +85,6 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) | |||
| 85 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); | 85 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); |
| 86 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); | 86 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); |
| 87 | sge->len = cpu_to_le32(nonemb_cmd.size); | 87 | sge->len = cpu_to_le32(nonemb_cmd.size); |
| 88 | |||
| 89 | status = be_mbox_notify(ctrl); | 88 | status = be_mbox_notify(ctrl); |
| 90 | if (!status) { | 89 | if (!status) { |
| 91 | struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; | 90 | struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; |
| @@ -95,21 +94,25 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) | |||
| 95 | resp->params.hba_attribs.firmware_version_string); | 94 | resp->params.hba_attribs.firmware_version_string); |
| 96 | SE_DEBUG(DBG_LVL_8, | 95 | SE_DEBUG(DBG_LVL_8, |
| 97 | "Developer Build, not performing version check...\n"); | 96 | "Developer Build, not performing version check...\n"); |
| 98 | 97 | phba->fw_config.iscsi_features = | |
| 98 | resp->params.hba_attribs.iscsi_features; | ||
| 99 | SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n", | ||
| 100 | phba->fw_config.iscsi_features); | ||
| 99 | } else | 101 | } else |
| 100 | SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n"); | 102 | SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n"); |
| 103 | spin_unlock(&ctrl->mbox_lock); | ||
| 101 | if (nonemb_cmd.va) | 104 | if (nonemb_cmd.va) |
| 102 | pci_free_consistent(ctrl->pdev, nonemb_cmd.size, | 105 | pci_free_consistent(ctrl->pdev, nonemb_cmd.size, |
| 103 | nonemb_cmd.va, nonemb_cmd.dma); | 106 | nonemb_cmd.va, nonemb_cmd.dma); |
| 104 | 107 | ||
| 105 | spin_unlock(&ctrl->mbox_lock); | ||
| 106 | return status; | 108 | return status; |
| 107 | } | 109 | } |
| 108 | 110 | ||
| 111 | |||
| 109 | unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) | 112 | unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) |
| 110 | { | 113 | { |
| 111 | struct be_ctrl_info *ctrl = &phba->ctrl; | 114 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 112 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 115 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); |
| 113 | struct iscsi_cleanup_req *req = embedded_payload(wrb); | 116 | struct iscsi_cleanup_req *req = embedded_payload(wrb); |
| 114 | int status = 0; | 117 | int status = 0; |
| 115 | 118 | ||
| @@ -124,7 +127,7 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) | |||
| 124 | req->hdr_ring_id = 0; | 127 | req->hdr_ring_id = 0; |
| 125 | req->data_ring_id = 0; | 128 | req->data_ring_id = 0; |
| 126 | 129 | ||
| 127 | status = be_mbox_notify(ctrl); | 130 | status = be_mcc_notify_wait(phba); |
| 128 | if (status) | 131 | if (status) |
| 129 | shost_printk(KERN_WARNING, phba->shost, | 132 | shost_printk(KERN_WARNING, phba->shost, |
| 130 | " mgmt_epfw_cleanup , FAILED\n"); | 133 | " mgmt_epfw_cleanup , FAILED\n"); |
| @@ -137,7 +140,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
| 137 | { | 140 | { |
| 138 | struct be_dma_mem nonemb_cmd; | 141 | struct be_dma_mem nonemb_cmd; |
| 139 | struct be_ctrl_info *ctrl = &phba->ctrl; | 142 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 140 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 143 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); |
| 141 | struct be_sge *sge = nonembedded_sgl(wrb); | 144 | struct be_sge *sge = nonembedded_sgl(wrb); |
| 142 | struct invalidate_commands_params_in *req; | 145 | struct invalidate_commands_params_in *req; |
| 143 | int status = 0; | 146 | int status = 0; |
| @@ -169,7 +172,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
| 169 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); | 172 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); |
| 170 | sge->len = cpu_to_le32(nonemb_cmd.size); | 173 | sge->len = cpu_to_le32(nonemb_cmd.size); |
| 171 | 174 | ||
| 172 | status = be_mbox_notify(ctrl); | 175 | status = be_mcc_notify_wait(phba); |
| 173 | if (status) | 176 | if (status) |
| 174 | SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n"); | 177 | SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n"); |
| 175 | spin_unlock(&ctrl->mbox_lock); | 178 | spin_unlock(&ctrl->mbox_lock); |
| @@ -186,7 +189,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
| 186 | unsigned short savecfg_flag) | 189 | unsigned short savecfg_flag) |
| 187 | { | 190 | { |
| 188 | struct be_ctrl_info *ctrl = &phba->ctrl; | 191 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 189 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 192 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); |
| 190 | struct iscsi_invalidate_connection_params_in *req = | 193 | struct iscsi_invalidate_connection_params_in *req = |
| 191 | embedded_payload(wrb); | 194 | embedded_payload(wrb); |
| 192 | int status = 0; | 195 | int status = 0; |
| @@ -205,7 +208,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
| 205 | else | 208 | else |
| 206 | req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; | 209 | req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; |
| 207 | req->save_cfg = savecfg_flag; | 210 | req->save_cfg = savecfg_flag; |
| 208 | status = be_mbox_notify(ctrl); | 211 | status = be_mcc_notify_wait(phba); |
| 209 | if (status) | 212 | if (status) |
| 210 | SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n"); | 213 | SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n"); |
| 211 | 214 | ||
| @@ -217,7 +220,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, | |||
| 217 | unsigned short cid, unsigned int upload_flag) | 220 | unsigned short cid, unsigned int upload_flag) |
| 218 | { | 221 | { |
| 219 | struct be_ctrl_info *ctrl = &phba->ctrl; | 222 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 220 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 223 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); |
| 221 | struct tcp_upload_params_in *req = embedded_payload(wrb); | 224 | struct tcp_upload_params_in *req = embedded_payload(wrb); |
| 222 | int status = 0; | 225 | int status = 0; |
| 223 | 226 | ||
| @@ -229,7 +232,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, | |||
| 229 | OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); | 232 | OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); |
| 230 | req->id = (unsigned short)cid; | 233 | req->id = (unsigned short)cid; |
| 231 | req->upload_type = (unsigned char)upload_flag; | 234 | req->upload_type = (unsigned char)upload_flag; |
| 232 | status = be_mbox_notify(ctrl); | 235 | status = be_mcc_notify_wait(phba); |
| 233 | if (status) | 236 | if (status) |
| 234 | SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n"); | 237 | SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n"); |
| 235 | spin_unlock(&ctrl->mbox_lock); | 238 | spin_unlock(&ctrl->mbox_lock); |
| @@ -245,13 +248,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 245 | struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; | 248 | struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; |
| 246 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; | 249 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; |
| 247 | struct be_ctrl_info *ctrl = &phba->ctrl; | 250 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 248 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 251 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); |
| 249 | struct tcp_connect_and_offload_in *req = embedded_payload(wrb); | 252 | struct tcp_connect_and_offload_in *req = embedded_payload(wrb); |
| 250 | unsigned short def_hdr_id; | 253 | unsigned short def_hdr_id; |
| 251 | unsigned short def_data_id; | 254 | unsigned short def_data_id; |
| 252 | struct phys_addr template_address = { 0, 0 }; | 255 | struct phys_addr template_address = { 0, 0 }; |
| 253 | struct phys_addr *ptemplate_address; | 256 | struct phys_addr *ptemplate_address; |
| 254 | int status = 0; | 257 | int status = 0; |
| 258 | unsigned int i; | ||
| 255 | unsigned short cid = beiscsi_ep->ep_cid; | 259 | unsigned short cid = beiscsi_ep->ep_cid; |
| 256 | 260 | ||
| 257 | phwi_ctrlr = phba->phwi_ctrlr; | 261 | phwi_ctrlr = phba->phwi_ctrlr; |
| @@ -296,14 +300,18 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 296 | 300 | ||
| 297 | } | 301 | } |
| 298 | req->cid = cid; | 302 | req->cid = cid; |
| 299 | req->cq_id = phwi_context->be_cq.id; | 303 | i = phba->nxt_cqid++; |
| 304 | if (phba->nxt_cqid == phba->num_cpus) | ||
| 305 | phba->nxt_cqid = 0; | ||
| 306 | req->cq_id = phwi_context->be_cq[i].id; | ||
| 307 | SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id); | ||
| 300 | req->defq_id = def_hdr_id; | 308 | req->defq_id = def_hdr_id; |
| 301 | req->hdr_ring_id = def_hdr_id; | 309 | req->hdr_ring_id = def_hdr_id; |
| 302 | req->data_ring_id = def_data_id; | 310 | req->data_ring_id = def_data_id; |
| 303 | req->do_offload = 1; | 311 | req->do_offload = 1; |
| 304 | req->dataout_template_pa.lo = ptemplate_address->lo; | 312 | req->dataout_template_pa.lo = ptemplate_address->lo; |
| 305 | req->dataout_template_pa.hi = ptemplate_address->hi; | 313 | req->dataout_template_pa.hi = ptemplate_address->hi; |
| 306 | status = be_mbox_notify(ctrl); | 314 | status = be_mcc_notify_wait(phba); |
| 307 | if (!status) { | 315 | if (!status) { |
| 308 | struct iscsi_endpoint *ep; | 316 | struct iscsi_endpoint *ep; |
| 309 | struct tcp_connect_and_offload_out *ptcpcnct_out = | 317 | struct tcp_connect_and_offload_out *ptcpcnct_out = |
| @@ -311,7 +319,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 311 | 319 | ||
| 312 | ep = phba->ep_array[ptcpcnct_out->cid]; | 320 | ep = phba->ep_array[ptcpcnct_out->cid]; |
| 313 | beiscsi_ep = ep->dd_data; | 321 | beiscsi_ep = ep->dd_data; |
| 314 | beiscsi_ep->fw_handle = 0; | 322 | beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; |
| 315 | beiscsi_ep->cid_vld = 1; | 323 | beiscsi_ep->cid_vld = 1; |
| 316 | SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); | 324 | SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); |
| 317 | } else | 325 | } else |
| @@ -319,3 +327,30 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 319 | spin_unlock(&ctrl->mbox_lock); | 327 | spin_unlock(&ctrl->mbox_lock); |
| 320 | return status; | 328 | return status; |
| 321 | } | 329 | } |
| 330 | |||
| 331 | int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr) | ||
| 332 | { | ||
| 333 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 334 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | ||
| 335 | struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); | ||
| 336 | int status; | ||
| 337 | |||
| 338 | SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n"); | ||
| 339 | spin_lock(&ctrl->mbox_lock); | ||
| 340 | memset(wrb, 0, sizeof(*wrb)); | ||
| 341 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | ||
| 342 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | ||
| 343 | OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, | ||
| 344 | sizeof(*req)); | ||
| 345 | |||
| 346 | status = be_mcc_notify_wait(phba); | ||
| 347 | if (!status) { | ||
| 348 | struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb); | ||
| 349 | |||
| 350 | memcpy(mac_addr, resp->mac_address, ETH_ALEN); | ||
| 351 | } | ||
| 352 | |||
| 353 | spin_unlock(&ctrl->mbox_lock); | ||
| 354 | return status; | ||
| 355 | } | ||
| 356 | |||
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 00e816ee8070..24eaff923f85 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h | |||
| @@ -175,7 +175,9 @@ struct mgmt_hba_attributes { | |||
| 175 | u8 phy_port; | 175 | u8 phy_port; |
| 176 | u32 firmware_post_status; | 176 | u32 firmware_post_status; |
| 177 | u32 hba_mtu[8]; | 177 | u32 hba_mtu[8]; |
| 178 | u32 future_u32[4]; | 178 | u8 iscsi_features; |
| 179 | u8 future_u8[3]; | ||
| 180 | u32 future_u32[3]; | ||
| 179 | } __packed; | 181 | } __packed; |
| 180 | 182 | ||
| 181 | struct mgmt_controller_attributes { | 183 | struct mgmt_controller_attributes { |
| @@ -246,4 +248,8 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
| 246 | unsigned short cid, | 248 | unsigned short cid, |
| 247 | unsigned short issue_reset, | 249 | unsigned short issue_reset, |
| 248 | unsigned short savecfg_flag); | 250 | unsigned short savecfg_flag); |
| 251 | |||
| 252 | unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl, | ||
| 253 | struct beiscsi_hba *phba, | ||
| 254 | char *buf, unsigned int len); | ||
| 249 | #endif | 255 | #endif |
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h index 0050c838c358..961fe439daad 100644 --- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h +++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h | |||
| @@ -51,7 +51,7 @@ bfad_int_to_lun(u32 luno) | |||
| 51 | lun.bfa_lun = 0; | 51 | lun.bfa_lun = 0; |
| 52 | lun.scsi_lun[0] = bfa_os_htons(luno); | 52 | lun.scsi_lun[0] = bfa_os_htons(luno); |
| 53 | 53 | ||
| 54 | return (lun.bfa_lun); | 54 | return lun.bfa_lun; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | /** | 57 | /** |
| @@ -68,7 +68,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) | |||
| 68 | { | 68 | { |
| 69 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | 69 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; |
| 70 | 70 | ||
| 71 | return ((u8 *) cmnd->cmnd); | 71 | return (u8 *) cmnd->cmnd; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | /** | 74 | /** |
| @@ -97,7 +97,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) | |||
| 97 | { | 97 | { |
| 98 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | 98 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; |
| 99 | 99 | ||
| 100 | return (scsi_bufflen(cmnd)); | 100 | return scsi_bufflen(cmnd); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | /** | 103 | /** |
| @@ -129,7 +129,7 @@ bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid) | |||
| 129 | sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid; | 129 | sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid; |
| 130 | addr = (u64) sg_dma_address(sge); | 130 | addr = (u64) sg_dma_address(sge); |
| 131 | 131 | ||
| 132 | return (*(union bfi_addr_u *) &addr); | 132 | return *((union bfi_addr_u *) &addr); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static inline u32 | 135 | static inline u32 |
| @@ -197,7 +197,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) | |||
| 197 | { | 197 | { |
| 198 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | 198 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; |
| 199 | 199 | ||
| 200 | return (cmnd->cmd_len); | 200 | return cmnd->cmd_len; |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | 203 | ||
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c index 7a959c34e789..2b917792c6bc 100644 --- a/drivers/scsi/bfa/bfa_cee.c +++ b/drivers/scsi/bfa/bfa_cee.c | |||
| @@ -228,7 +228,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) | |||
| 228 | u32 | 228 | u32 |
| 229 | bfa_cee_meminfo(void) | 229 | bfa_cee_meminfo(void) |
| 230 | { | 230 | { |
| 231 | return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo()); | 231 | return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | /** | 234 | /** |
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c index 1b71d349451a..caeb1143a4e6 100644 --- a/drivers/scsi/bfa/bfa_csdebug.c +++ b/drivers/scsi/bfa/bfa_csdebug.c | |||
| @@ -47,12 +47,12 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) | |||
| 47 | tqe = bfa_q_next(q); | 47 | tqe = bfa_q_next(q); |
| 48 | while (tqe != q) { | 48 | while (tqe != q) { |
| 49 | if (tqe == qe) | 49 | if (tqe == qe) |
| 50 | return (1); | 50 | return 1; |
| 51 | tqe = bfa_q_next(tqe); | 51 | tqe = bfa_q_next(tqe); |
| 52 | if (tqe == NULL) | 52 | if (tqe == NULL) |
| 53 | break; | 53 | break; |
| 54 | } | 54 | } |
| 55 | return (0); | 55 | return 0; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | 58 | ||
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 401babe3494e..790c945aeae6 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c | |||
| @@ -131,7 +131,7 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa) | |||
| 131 | { | 131 | { |
| 132 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | 132 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); |
| 133 | 133 | ||
| 134 | return (fcpim->path_tov / 1000); | 134 | return fcpim->path_tov / 1000; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | bfa_status_t | 137 | bfa_status_t |
| @@ -169,7 +169,7 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa) | |||
| 169 | { | 169 | { |
| 170 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | 170 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); |
| 171 | 171 | ||
| 172 | return (fcpim->q_depth); | 172 | return fcpim->q_depth; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | 175 | ||
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h index 153206cfb37a..5cf418460f75 100644 --- a/drivers/scsi/bfa/bfa_fcpim_priv.h +++ b/drivers/scsi/bfa/bfa_fcpim_priv.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ | 35 | #define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ |
| 36 | 36 | ||
| 37 | #define bfa_fcpim_stats(__fcpim, __stats) \ | 37 | #define bfa_fcpim_stats(__fcpim, __stats) \ |
| 38 | (__fcpim)->stats.__stats ++ | 38 | ((__fcpim)->stats.__stats++) |
| 39 | 39 | ||
| 40 | struct bfa_fcpim_mod_s { | 40 | struct bfa_fcpim_mod_s { |
| 41 | struct bfa_s *bfa; | 41 | struct bfa_s *bfa; |
| @@ -143,7 +143,7 @@ struct bfa_itnim_s { | |||
| 143 | struct bfa_itnim_hal_stats_s stats; | 143 | struct bfa_itnim_hal_stats_s stats; |
| 144 | }; | 144 | }; |
| 145 | 145 | ||
| 146 | #define bfa_itnim_is_online(_itnim) (_itnim)->is_online | 146 | #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) |
| 147 | #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) | 147 | #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) |
| 148 | #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ | 148 | #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ |
| 149 | (&fcpim->ioim_arr[_iotag]) | 149 | (&fcpim->ioim_arr[_iotag]) |
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index 992435987deb..aef648b55dfc 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c | |||
| @@ -388,32 +388,29 @@ bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) | |||
| 388 | bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); | 388 | bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); |
| 389 | bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, | 389 | bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, |
| 390 | BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); | 390 | BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); |
| 391 | if (BFA_PORT_IS_DISABLED(pport->bfa)) { | 391 | if (BFA_PORT_IS_DISABLED(pport->bfa)) |
| 392 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); | 392 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); |
| 393 | } else { | 393 | else |
| 394 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); | 394 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); |
| 395 | } | ||
| 396 | break; | 395 | break; |
| 397 | 396 | ||
| 398 | case BFA_PPORT_SM_STOP: | 397 | case BFA_PPORT_SM_STOP: |
| 399 | bfa_sm_set_state(pport, bfa_pport_sm_stopped); | 398 | bfa_sm_set_state(pport, bfa_pport_sm_stopped); |
| 400 | bfa_pport_reset_linkinfo(pport); | 399 | bfa_pport_reset_linkinfo(pport); |
| 401 | if (BFA_PORT_IS_DISABLED(pport->bfa)) { | 400 | if (BFA_PORT_IS_DISABLED(pport->bfa)) |
| 402 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); | 401 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); |
| 403 | } else { | 402 | else |
| 404 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); | 403 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); |
| 405 | } | ||
| 406 | break; | 404 | break; |
| 407 | 405 | ||
| 408 | case BFA_PPORT_SM_HWFAIL: | 406 | case BFA_PPORT_SM_HWFAIL: |
| 409 | bfa_sm_set_state(pport, bfa_pport_sm_iocdown); | 407 | bfa_sm_set_state(pport, bfa_pport_sm_iocdown); |
| 410 | bfa_pport_reset_linkinfo(pport); | 408 | bfa_pport_reset_linkinfo(pport); |
| 411 | bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); | 409 | bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); |
| 412 | if (BFA_PORT_IS_DISABLED(pport->bfa)) { | 410 | if (BFA_PORT_IS_DISABLED(pport->bfa)) |
| 413 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); | 411 | bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); |
| 414 | } else { | 412 | else |
| 415 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); | 413 | bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); |
| 416 | } | ||
| 417 | break; | 414 | break; |
| 418 | 415 | ||
| 419 | default: | 416 | default: |
| @@ -999,10 +996,10 @@ bfa_pport_enable(struct bfa_s *bfa) | |||
| 999 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); | 996 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); |
| 1000 | 997 | ||
| 1001 | if (pport->diag_busy) | 998 | if (pport->diag_busy) |
| 1002 | return (BFA_STATUS_DIAG_BUSY); | 999 | return BFA_STATUS_DIAG_BUSY; |
| 1003 | else if (bfa_sm_cmp_state | 1000 | else if (bfa_sm_cmp_state |
| 1004 | (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) | 1001 | (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) |
| 1005 | return (BFA_STATUS_DEVBUSY); | 1002 | return BFA_STATUS_DEVBUSY; |
| 1006 | 1003 | ||
| 1007 | bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); | 1004 | bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); |
| 1008 | return BFA_STATUS_OK; | 1005 | return BFA_STATUS_OK; |
| @@ -1032,7 +1029,7 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) | |||
| 1032 | 1029 | ||
| 1033 | pport->cfg.speed = speed; | 1030 | pport->cfg.speed = speed; |
| 1034 | 1031 | ||
| 1035 | return (BFA_STATUS_OK); | 1032 | return BFA_STATUS_OK; |
| 1036 | } | 1033 | } |
| 1037 | 1034 | ||
| 1038 | /** | 1035 | /** |
| @@ -1068,7 +1065,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) | |||
| 1068 | } | 1065 | } |
| 1069 | 1066 | ||
| 1070 | pport->cfg.topology = topology; | 1067 | pport->cfg.topology = topology; |
| 1071 | return (BFA_STATUS_OK); | 1068 | return BFA_STATUS_OK; |
| 1072 | } | 1069 | } |
| 1073 | 1070 | ||
| 1074 | /** | 1071 | /** |
| @@ -1094,7 +1091,7 @@ bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) | |||
| 1094 | pport->cfg.cfg_hardalpa = BFA_TRUE; | 1091 | pport->cfg.cfg_hardalpa = BFA_TRUE; |
| 1095 | pport->cfg.hardalpa = alpa; | 1092 | pport->cfg.hardalpa = alpa; |
| 1096 | 1093 | ||
| 1097 | return (BFA_STATUS_OK); | 1094 | return BFA_STATUS_OK; |
| 1098 | } | 1095 | } |
| 1099 | 1096 | ||
| 1100 | bfa_status_t | 1097 | bfa_status_t |
| @@ -1106,7 +1103,7 @@ bfa_pport_clr_hardalpa(struct bfa_s *bfa) | |||
| 1106 | bfa_trc(bfa, pport->cfg.hardalpa); | 1103 | bfa_trc(bfa, pport->cfg.hardalpa); |
| 1107 | 1104 | ||
| 1108 | pport->cfg.cfg_hardalpa = BFA_FALSE; | 1105 | pport->cfg.cfg_hardalpa = BFA_FALSE; |
| 1109 | return (BFA_STATUS_OK); | 1106 | return BFA_STATUS_OK; |
| 1110 | } | 1107 | } |
| 1111 | 1108 | ||
| 1112 | bfa_boolean_t | 1109 | bfa_boolean_t |
| @@ -1138,16 +1135,16 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) | |||
| 1138 | * with in range | 1135 | * with in range |
| 1139 | */ | 1136 | */ |
| 1140 | if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) | 1137 | if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) |
| 1141 | return (BFA_STATUS_INVLD_DFSZ); | 1138 | return BFA_STATUS_INVLD_DFSZ; |
| 1142 | 1139 | ||
| 1143 | /* | 1140 | /* |
| 1144 | * power of 2, if not the max frame size of 2112 | 1141 | * power of 2, if not the max frame size of 2112 |
| 1145 | */ | 1142 | */ |
| 1146 | if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) | 1143 | if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) |
| 1147 | return (BFA_STATUS_INVLD_DFSZ); | 1144 | return BFA_STATUS_INVLD_DFSZ; |
| 1148 | 1145 | ||
| 1149 | pport->cfg.maxfrsize = maxfrsize; | 1146 | pport->cfg.maxfrsize = maxfrsize; |
| 1150 | return (BFA_STATUS_OK); | 1147 | return BFA_STATUS_OK; |
| 1151 | } | 1148 | } |
| 1152 | 1149 | ||
| 1153 | u16 | 1150 | u16 |
| @@ -1415,7 +1412,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, | |||
| 1415 | 1412 | ||
| 1416 | if (port->stats_busy) { | 1413 | if (port->stats_busy) { |
| 1417 | bfa_trc(bfa, port->stats_busy); | 1414 | bfa_trc(bfa, port->stats_busy); |
| 1418 | return (BFA_STATUS_DEVBUSY); | 1415 | return BFA_STATUS_DEVBUSY; |
| 1419 | } | 1416 | } |
| 1420 | 1417 | ||
| 1421 | port->stats_busy = BFA_TRUE; | 1418 | port->stats_busy = BFA_TRUE; |
| @@ -1427,7 +1424,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, | |||
| 1427 | 1424 | ||
| 1428 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, | 1425 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, |
| 1429 | BFA_PORT_STATS_TOV); | 1426 | BFA_PORT_STATS_TOV); |
| 1430 | return (BFA_STATUS_OK); | 1427 | return BFA_STATUS_OK; |
| 1431 | } | 1428 | } |
| 1432 | 1429 | ||
| 1433 | bfa_status_t | 1430 | bfa_status_t |
| @@ -1437,7 +1434,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) | |||
| 1437 | 1434 | ||
| 1438 | if (port->stats_busy) { | 1435 | if (port->stats_busy) { |
| 1439 | bfa_trc(bfa, port->stats_busy); | 1436 | bfa_trc(bfa, port->stats_busy); |
| 1440 | return (BFA_STATUS_DEVBUSY); | 1437 | return BFA_STATUS_DEVBUSY; |
| 1441 | } | 1438 | } |
| 1442 | 1439 | ||
| 1443 | port->stats_busy = BFA_TRUE; | 1440 | port->stats_busy = BFA_TRUE; |
| @@ -1448,7 +1445,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) | |||
| 1448 | 1445 | ||
| 1449 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, | 1446 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, |
| 1450 | BFA_PORT_STATS_TOV); | 1447 | BFA_PORT_STATS_TOV); |
| 1451 | return (BFA_STATUS_OK); | 1448 | return BFA_STATUS_OK; |
| 1452 | } | 1449 | } |
| 1453 | 1450 | ||
| 1454 | bfa_status_t | 1451 | bfa_status_t |
| @@ -1515,7 +1512,7 @@ bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, | |||
| 1515 | /* | 1512 | /* |
| 1516 | * QoS stats is embedded in port stats | 1513 | * QoS stats is embedded in port stats |
| 1517 | */ | 1514 | */ |
| 1518 | return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg)); | 1515 | return bfa_pport_get_stats(bfa, stats, cbfn, cbarg); |
| 1519 | } | 1516 | } |
| 1520 | 1517 | ||
| 1521 | bfa_status_t | 1518 | bfa_status_t |
| @@ -1525,7 +1522,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) | |||
| 1525 | 1522 | ||
| 1526 | if (port->stats_busy) { | 1523 | if (port->stats_busy) { |
| 1527 | bfa_trc(bfa, port->stats_busy); | 1524 | bfa_trc(bfa, port->stats_busy); |
| 1528 | return (BFA_STATUS_DEVBUSY); | 1525 | return BFA_STATUS_DEVBUSY; |
| 1529 | } | 1526 | } |
| 1530 | 1527 | ||
| 1531 | port->stats_busy = BFA_TRUE; | 1528 | port->stats_busy = BFA_TRUE; |
| @@ -1536,7 +1533,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) | |||
| 1536 | 1533 | ||
| 1537 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, | 1534 | bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, |
| 1538 | BFA_PORT_STATS_TOV); | 1535 | BFA_PORT_STATS_TOV); |
| 1539 | return (BFA_STATUS_OK); | 1536 | return BFA_STATUS_OK; |
| 1540 | } | 1537 | } |
| 1541 | 1538 | ||
| 1542 | /** | 1539 | /** |
| @@ -1545,7 +1542,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) | |||
| 1545 | bfa_status_t | 1542 | bfa_status_t |
| 1546 | bfa_pport_trunk_disable(struct bfa_s *bfa) | 1543 | bfa_pport_trunk_disable(struct bfa_s *bfa) |
| 1547 | { | 1544 | { |
| 1548 | return (BFA_STATUS_OK); | 1545 | return BFA_STATUS_OK; |
| 1549 | } | 1546 | } |
| 1550 | 1547 | ||
| 1551 | bfa_boolean_t | 1548 | bfa_boolean_t |
| @@ -1562,8 +1559,8 @@ bfa_pport_is_disabled(struct bfa_s *bfa) | |||
| 1562 | { | 1559 | { |
| 1563 | struct bfa_pport_s *port = BFA_PORT_MOD(bfa); | 1560 | struct bfa_pport_s *port = BFA_PORT_MOD(bfa); |
| 1564 | 1561 | ||
| 1565 | return (bfa_sm_to_state(hal_pport_sm_table, port->sm) == | 1562 | return bfa_sm_to_state(hal_pport_sm_table, port->sm) == |
| 1566 | BFA_PPORT_ST_DISABLED); | 1563 | BFA_PPORT_ST_DISABLED; |
| 1567 | 1564 | ||
| 1568 | } | 1565 | } |
| 1569 | 1566 | ||
| @@ -1572,7 +1569,7 @@ bfa_pport_is_ratelim(struct bfa_s *bfa) | |||
| 1572 | { | 1569 | { |
| 1573 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); | 1570 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); |
| 1574 | 1571 | ||
| 1575 | return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE); | 1572 | return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; |
| 1576 | 1573 | ||
| 1577 | } | 1574 | } |
| 1578 | 1575 | ||
| @@ -1620,7 +1617,7 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) | |||
| 1620 | 1617 | ||
| 1621 | pport->cfg.trl_def_speed = speed; | 1618 | pport->cfg.trl_def_speed = speed; |
| 1622 | 1619 | ||
| 1623 | return (BFA_STATUS_OK); | 1620 | return BFA_STATUS_OK; |
| 1624 | } | 1621 | } |
| 1625 | 1622 | ||
| 1626 | /** | 1623 | /** |
| @@ -1632,7 +1629,7 @@ bfa_pport_get_ratelim_speed(struct bfa_s *bfa) | |||
| 1632 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); | 1629 | struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); |
| 1633 | 1630 | ||
| 1634 | bfa_trc(bfa, pport->cfg.trl_def_speed); | 1631 | bfa_trc(bfa, pport->cfg.trl_def_speed); |
| 1635 | return (pport->cfg.trl_def_speed); | 1632 | return pport->cfg.trl_def_speed; |
| 1636 | 1633 | ||
| 1637 | } | 1634 | } |
| 1638 | 1635 | ||
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 8975ed041dc0..c7ab257f10a7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c | |||
| @@ -568,11 +568,10 @@ bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port) | |||
| 568 | 568 | ||
| 569 | __port_action[port->fabric->fab_type].offline(port); | 569 | __port_action[port->fabric->fab_type].offline(port); |
| 570 | 570 | ||
| 571 | if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) { | 571 | if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) |
| 572 | bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); | 572 | bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); |
| 573 | } else { | 573 | else |
| 574 | bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); | 574 | bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); |
| 575 | } | ||
| 576 | bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, | 575 | bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, |
| 577 | port->fabric->vf_drv, | 576 | port->fabric->vf_drv, |
| 578 | (port->vport == NULL) ? NULL : port->vport->vport_drv); | 577 | (port->vport == NULL) ? NULL : port->vport->vport_drv); |
| @@ -777,7 +776,7 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) | |||
| 777 | } | 776 | } |
| 778 | 777 | ||
| 779 | bfa_trc(port->fcs, pwwn); | 778 | bfa_trc(port->fcs, pwwn); |
| 780 | return (NULL); | 779 | return NULL; |
| 781 | } | 780 | } |
| 782 | 781 | ||
| 783 | /** | 782 | /** |
| @@ -796,7 +795,7 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) | |||
| 796 | } | 795 | } |
| 797 | 796 | ||
| 798 | bfa_trc(port->fcs, nwwn); | 797 | bfa_trc(port->fcs, nwwn); |
| 799 | return (NULL); | 798 | return NULL; |
| 800 | } | 799 | } |
| 801 | 800 | ||
| 802 | /** | 801 | /** |
| @@ -870,7 +869,7 @@ bfa_fcs_port_lip(struct bfa_fcs_port_s *port) | |||
| 870 | bfa_boolean_t | 869 | bfa_boolean_t |
| 871 | bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) | 870 | bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) |
| 872 | { | 871 | { |
| 873 | return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)); | 872 | return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online); |
| 874 | } | 873 | } |
| 875 | 874 | ||
| 876 | /** | 875 | /** |
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c index 4754a0e9006a..cf0ad6782686 100644 --- a/drivers/scsi/bfa/bfa_fcxp.c +++ b/drivers/scsi/bfa/bfa_fcxp.c | |||
| @@ -199,7 +199,7 @@ bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) | |||
| 199 | if (fcxp) | 199 | if (fcxp) |
| 200 | list_add_tail(&fcxp->qe, &fm->fcxp_active_q); | 200 | list_add_tail(&fcxp->qe, &fm->fcxp_active_q); |
| 201 | 201 | ||
| 202 | return (fcxp); | 202 | return fcxp; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static void | 205 | static void |
| @@ -503,7 +503,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, | |||
| 503 | 503 | ||
| 504 | fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); | 504 | fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); |
| 505 | if (fcxp == NULL) | 505 | if (fcxp == NULL) |
| 506 | return (NULL); | 506 | return NULL; |
| 507 | 507 | ||
| 508 | bfa_trc(bfa, fcxp->fcxp_tag); | 508 | bfa_trc(bfa, fcxp->fcxp_tag); |
| 509 | 509 | ||
| @@ -568,7 +568,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, | |||
| 568 | } | 568 | } |
| 569 | } | 569 | } |
| 570 | 570 | ||
| 571 | return (fcxp); | 571 | return fcxp; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | /** | 574 | /** |
| @@ -709,7 +709,7 @@ bfa_status_t | |||
| 709 | bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) | 709 | bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) |
| 710 | { | 710 | { |
| 711 | bfa_assert(0); | 711 | bfa_assert(0); |
| 712 | return (BFA_STATUS_OK); | 712 | return BFA_STATUS_OK; |
| 713 | } | 713 | } |
| 714 | 714 | ||
| 715 | void | 715 | void |
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c index 0ca125712a04..b36540e4ed76 100644 --- a/drivers/scsi/bfa/bfa_intr.c +++ b/drivers/scsi/bfa/bfa_intr.c | |||
| @@ -59,7 +59,7 @@ bfa_intx(struct bfa_s *bfa) | |||
| 59 | qintr = intr & __HFN_INT_RME_MASK; | 59 | qintr = intr & __HFN_INT_RME_MASK; |
| 60 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); | 60 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); |
| 61 | 61 | ||
| 62 | for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) { | 62 | for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { |
| 63 | if (intr & (__HFN_INT_RME_Q0 << queue)) | 63 | if (intr & (__HFN_INT_RME_Q0 << queue)) |
| 64 | bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); | 64 | bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); |
| 65 | } | 65 | } |
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h index 8ce6e6b105c8..5fc301cf4d1b 100644 --- a/drivers/scsi/bfa/bfa_intr_priv.h +++ b/drivers/scsi/bfa/bfa_intr_priv.h | |||
| @@ -26,9 +26,9 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); | |||
| 26 | void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); | 26 | void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); |
| 27 | 27 | ||
| 28 | 28 | ||
| 29 | #define bfa_reqq_pi(__bfa, __reqq) (__bfa)->iocfc.req_cq_pi[__reqq] | 29 | #define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq]) |
| 30 | #define bfa_reqq_ci(__bfa, __reqq) \ | 30 | #define bfa_reqq_ci(__bfa, __reqq) \ |
| 31 | *(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva) | 31 | (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)) |
| 32 | 32 | ||
| 33 | #define bfa_reqq_full(__bfa, __reqq) \ | 33 | #define bfa_reqq_full(__bfa, __reqq) \ |
| 34 | (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ | 34 | (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ |
| @@ -50,14 +50,16 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); | |||
| 50 | } while (0) | 50 | } while (0) |
| 51 | 51 | ||
| 52 | #define bfa_rspq_pi(__bfa, __rspq) \ | 52 | #define bfa_rspq_pi(__bfa, __rspq) \ |
| 53 | *(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva) | 53 | (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) |
| 54 | 54 | ||
| 55 | #define bfa_rspq_ci(__bfa, __rspq) (__bfa)->iocfc.rsp_cq_ci[__rspq] | 55 | #define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq]) |
| 56 | #define bfa_rspq_elem(__bfa, __rspq, __ci) \ | 56 | #define bfa_rspq_elem(__bfa, __rspq, __ci) \ |
| 57 | &((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci] | 57 | (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]) |
| 58 | 58 | ||
| 59 | #define CQ_INCR(__index, __size) \ | 59 | #define CQ_INCR(__index, __size) do { \ |
| 60 | (__index)++; (__index) &= ((__size) - 1) | 60 | (__index)++; \ |
| 61 | (__index) &= ((__size) - 1); \ | ||
| 62 | } while (0) | ||
| 61 | 63 | ||
| 62 | /** | 64 | /** |
| 63 | * Queue element to wait for room in request queue. FIFO order is | 65 | * Queue element to wait for room in request queue. FIFO order is |
| @@ -94,7 +96,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), | |||
| 94 | wqe->cbarg = cbarg; | 96 | wqe->cbarg = cbarg; |
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | #define bfa_reqq(__bfa, __reqq) &(__bfa)->reqq_waitq[__reqq] | 99 | #define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) |
| 98 | 100 | ||
| 99 | /** | 101 | /** |
| 100 | * static inline void | 102 | * static inline void |
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 149348934ce3..397d7e9eade5 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
| @@ -51,7 +51,7 @@ BFA_TRC_FILE(HAL, IOC); | |||
| 51 | (sizeof(struct bfa_trc_mod_s) - \ | 51 | (sizeof(struct bfa_trc_mod_s) - \ |
| 52 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) | 52 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) |
| 53 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) | 53 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) |
| 54 | #define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++ | 54 | #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) |
| 55 | 55 | ||
| 56 | #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) | 56 | #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) |
| 57 | #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) | 57 | #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) |
| @@ -1953,8 +1953,8 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc) | |||
| 1953 | bfa_boolean_t | 1953 | bfa_boolean_t |
| 1954 | bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) | 1954 | bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) |
| 1955 | { | 1955 | { |
| 1956 | return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) | 1956 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) |
| 1957 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)); | 1957 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); |
| 1958 | } | 1958 | } |
| 1959 | 1959 | ||
| 1960 | /** | 1960 | /** |
| @@ -1963,9 +1963,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) | |||
| 1963 | bfa_boolean_t | 1963 | bfa_boolean_t |
| 1964 | bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) | 1964 | bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) |
| 1965 | { | 1965 | { |
| 1966 | return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) | 1966 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) |
| 1967 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) | 1967 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) |
| 1968 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch)); | 1968 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); |
| 1969 | } | 1969 | } |
| 1970 | 1970 | ||
| 1971 | #define bfa_ioc_state_disabled(__sm) \ | 1971 | #define bfa_ioc_state_disabled(__sm) \ |
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 58efd4b13143..7c30f05ab137 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h | |||
| @@ -179,16 +179,16 @@ struct bfa_ioc_s { | |||
| 179 | struct bfa_ioc_mbox_mod_s mbox_mod; | 179 | struct bfa_ioc_mbox_mod_s mbox_mod; |
| 180 | }; | 180 | }; |
| 181 | 181 | ||
| 182 | #define bfa_ioc_pcifn(__ioc) (__ioc)->pcidev.pci_func | 182 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) |
| 183 | #define bfa_ioc_devid(__ioc) (__ioc)->pcidev.device_id | 183 | #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) |
| 184 | #define bfa_ioc_bar0(__ioc) (__ioc)->pcidev.pci_bar_kva | 184 | #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) |
| 185 | #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) | 185 | #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) |
| 186 | #define bfa_ioc_fetch_stats(__ioc, __stats) \ | 186 | #define bfa_ioc_fetch_stats(__ioc, __stats) \ |
| 187 | ((__stats)->drv_stats) = (__ioc)->stats | 187 | (((__stats)->drv_stats) = (__ioc)->stats) |
| 188 | #define bfa_ioc_clr_stats(__ioc) \ | 188 | #define bfa_ioc_clr_stats(__ioc) \ |
| 189 | bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) | 189 | bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) |
| 190 | #define bfa_ioc_maxfrsize(__ioc) (__ioc)->attr->maxfrsize | 190 | #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) |
| 191 | #define bfa_ioc_rx_bbcredit(__ioc) (__ioc)->attr->rx_bbcredit | 191 | #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) |
| 192 | #define bfa_ioc_speed_sup(__ioc) \ | 192 | #define bfa_ioc_speed_sup(__ioc) \ |
| 193 | BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) | 193 | BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) |
| 194 | 194 | ||
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index 12350b022d63..d7ab792a9e54 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c | |||
| @@ -794,7 +794,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, | |||
| 794 | 794 | ||
| 795 | if (iocfc->stats_busy) { | 795 | if (iocfc->stats_busy) { |
| 796 | bfa_trc(bfa, iocfc->stats_busy); | 796 | bfa_trc(bfa, iocfc->stats_busy); |
| 797 | return (BFA_STATUS_DEVBUSY); | 797 | return BFA_STATUS_DEVBUSY; |
| 798 | } | 798 | } |
| 799 | 799 | ||
| 800 | iocfc->stats_busy = BFA_TRUE; | 800 | iocfc->stats_busy = BFA_TRUE; |
| @@ -804,7 +804,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, | |||
| 804 | 804 | ||
| 805 | bfa_iocfc_stats_query(bfa); | 805 | bfa_iocfc_stats_query(bfa); |
| 806 | 806 | ||
| 807 | return (BFA_STATUS_OK); | 807 | return BFA_STATUS_OK; |
| 808 | } | 808 | } |
| 809 | 809 | ||
| 810 | bfa_status_t | 810 | bfa_status_t |
| @@ -814,7 +814,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) | |||
| 814 | 814 | ||
| 815 | if (iocfc->stats_busy) { | 815 | if (iocfc->stats_busy) { |
| 816 | bfa_trc(bfa, iocfc->stats_busy); | 816 | bfa_trc(bfa, iocfc->stats_busy); |
| 817 | return (BFA_STATUS_DEVBUSY); | 817 | return BFA_STATUS_DEVBUSY; |
| 818 | } | 818 | } |
| 819 | 819 | ||
| 820 | iocfc->stats_busy = BFA_TRUE; | 820 | iocfc->stats_busy = BFA_TRUE; |
| @@ -822,7 +822,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) | |||
| 822 | iocfc->stats_cbarg = cbarg; | 822 | iocfc->stats_cbarg = cbarg; |
| 823 | 823 | ||
| 824 | bfa_iocfc_stats_clear(bfa); | 824 | bfa_iocfc_stats_clear(bfa); |
| 825 | return (BFA_STATUS_OK); | 825 | return BFA_STATUS_OK; |
| 826 | } | 826 | } |
| 827 | 827 | ||
| 828 | /** | 828 | /** |
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h index 7ad177ed4cfc..ce9a830a4207 100644 --- a/drivers/scsi/bfa/bfa_iocfc.h +++ b/drivers/scsi/bfa/bfa_iocfc.h | |||
| @@ -107,13 +107,13 @@ struct bfa_iocfc_s { | |||
| 107 | 107 | ||
| 108 | #define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) | 108 | #define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) |
| 109 | #define bfa_msix_init(__bfa, __nvecs) \ | 109 | #define bfa_msix_init(__bfa, __nvecs) \ |
| 110 | (__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs) | 110 | ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) |
| 111 | #define bfa_msix_install(__bfa) \ | 111 | #define bfa_msix_install(__bfa) \ |
| 112 | (__bfa)->iocfc.hwif.hw_msix_install(__bfa) | 112 | ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) |
| 113 | #define bfa_msix_uninstall(__bfa) \ | 113 | #define bfa_msix_uninstall(__bfa) \ |
| 114 | (__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa) | 114 | ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) |
| 115 | #define bfa_isr_mode_set(__bfa, __msix) \ | 115 | #define bfa_isr_mode_set(__bfa, __msix) \ |
| 116 | (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix) | 116 | ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) |
| 117 | #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ | 117 | #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ |
| 118 | (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) | 118 | (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) |
| 119 | 119 | ||
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c index 7ae2552e1e14..f81d359b7089 100644 --- a/drivers/scsi/bfa/bfa_ioim.c +++ b/drivers/scsi/bfa/bfa_ioim.c | |||
| @@ -105,13 +105,13 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
| 105 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); | 105 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
| 106 | list_del(&ioim->qe); | 106 | list_del(&ioim->qe); |
| 107 | list_add_tail(&ioim->qe, | 107 | list_add_tail(&ioim->qe, |
| 108 | &ioim->fcpim->ioim_comp_q); | 108 | &ioim->fcpim->ioim_comp_q); |
| 109 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, | 109 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
| 110 | __bfa_cb_ioim_pathtov, ioim); | 110 | __bfa_cb_ioim_pathtov, ioim); |
| 111 | } else { | 111 | } else { |
| 112 | list_del(&ioim->qe); | 112 | list_del(&ioim->qe); |
| 113 | list_add_tail(&ioim->qe, | 113 | list_add_tail(&ioim->qe, |
| 114 | &ioim->itnim->pending_q); | 114 | &ioim->itnim->pending_q); |
| 115 | } | 115 | } |
| 116 | break; | 116 | break; |
| 117 | } | 117 | } |
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c index 4d5c61a4f85c..eabf7d38bd09 100644 --- a/drivers/scsi/bfa/bfa_itnim.c +++ b/drivers/scsi/bfa/bfa_itnim.c | |||
| @@ -1029,7 +1029,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) | |||
| 1029 | bfa_stats(itnim, creates); | 1029 | bfa_stats(itnim, creates); |
| 1030 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); | 1030 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); |
| 1031 | 1031 | ||
| 1032 | return (itnim); | 1032 | return itnim; |
| 1033 | } | 1033 | } |
| 1034 | 1034 | ||
| 1035 | void | 1035 | void |
| @@ -1061,7 +1061,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim) | |||
| 1061 | bfa_boolean_t | 1061 | bfa_boolean_t |
| 1062 | bfa_itnim_hold_io(struct bfa_itnim_s *itnim) | 1062 | bfa_itnim_hold_io(struct bfa_itnim_s *itnim) |
| 1063 | { | 1063 | { |
| 1064 | return ( | 1064 | return |
| 1065 | itnim->fcpim->path_tov && itnim->iotov_active && | 1065 | itnim->fcpim->path_tov && itnim->iotov_active && |
| 1066 | (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || | 1066 | (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || |
| 1067 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || | 1067 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || |
| @@ -1069,7 +1069,7 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim) | |||
| 1069 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || | 1069 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || |
| 1070 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || | 1070 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || |
| 1071 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) | 1071 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) |
| 1072 | ); | 1072 | ; |
| 1073 | } | 1073 | } |
| 1074 | 1074 | ||
| 1075 | void | 1075 | void |
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c index c2735e55cf03..e7514016c9c6 100644 --- a/drivers/scsi/bfa/bfa_log.c +++ b/drivers/scsi/bfa/bfa_log.c | |||
| @@ -231,9 +231,9 @@ bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id) | |||
| 231 | return BFA_LOG_INVALID; | 231 | return BFA_LOG_INVALID; |
| 232 | 232 | ||
| 233 | if (log_mod) | 233 | if (log_mod) |
| 234 | return (log_mod->log_level[mod_id]); | 234 | return log_mod->log_level[mod_id]; |
| 235 | else | 235 | else |
| 236 | return (bfa_log_info[mod_id].level); | 236 | return bfa_log_info[mod_id].level; |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | enum bfa_log_severity | 239 | enum bfa_log_severity |
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h index 4b97e2759908..51f698a06b6d 100644 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ b/drivers/scsi/bfa/bfa_port_priv.h | |||
| @@ -59,8 +59,8 @@ struct bfa_pport_s { | |||
| 59 | u8 *stats_kva; | 59 | u8 *stats_kva; |
| 60 | u64 stats_pa; | 60 | u64 stats_pa; |
| 61 | union bfa_pport_stats_u *stats; /* pport stats */ | 61 | union bfa_pport_stats_u *stats; /* pport stats */ |
| 62 | u32 mypid : 24; | 62 | u32 mypid:24; |
| 63 | u32 rsvd_b : 8; | 63 | u32 rsvd_b:8; |
| 64 | struct bfa_timer_s timer; /* timer */ | 64 | struct bfa_timer_s timer; /* timer */ |
| 65 | union bfa_pport_stats_u *stats_ret; | 65 | union bfa_pport_stats_u *stats_ret; |
| 66 | /* driver stats location */ | 66 | /* driver stats location */ |
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c index 16da77a8db28..3e1990a74258 100644 --- a/drivers/scsi/bfa/bfa_rport.c +++ b/drivers/scsi/bfa/bfa_rport.c | |||
| @@ -677,7 +677,7 @@ bfa_rport_alloc(struct bfa_rport_mod_s *mod) | |||
| 677 | if (rport) | 677 | if (rport) |
| 678 | list_add_tail(&rport->qe, &mod->rp_active_q); | 678 | list_add_tail(&rport->qe, &mod->rp_active_q); |
| 679 | 679 | ||
| 680 | return (rport); | 680 | return rport; |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | static void | 683 | static void |
| @@ -834,7 +834,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv) | |||
| 834 | rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); | 834 | rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); |
| 835 | 835 | ||
| 836 | if (rp == NULL) | 836 | if (rp == NULL) |
| 837 | return (NULL); | 837 | return NULL; |
| 838 | 838 | ||
| 839 | rp->bfa = bfa; | 839 | rp->bfa = bfa; |
| 840 | rp->rport_drv = rport_drv; | 840 | rp->rport_drv = rport_drv; |
| @@ -843,7 +843,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv) | |||
| 843 | bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); | 843 | bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); |
| 844 | bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); | 844 | bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); |
| 845 | 845 | ||
| 846 | return (rp); | 846 | return rp; |
| 847 | } | 847 | } |
| 848 | 848 | ||
| 849 | void | 849 | void |
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c index 010d40d1e5d3..ff7a4dc0bf3c 100644 --- a/drivers/scsi/bfa/bfa_tskim.c +++ b/drivers/scsi/bfa/bfa_tskim.c | |||
| @@ -23,13 +23,14 @@ BFA_TRC_FILE(HAL, TSKIM); | |||
| 23 | /** | 23 | /** |
| 24 | * task management completion handling | 24 | * task management completion handling |
| 25 | */ | 25 | */ |
| 26 | #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ | 26 | #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ |
| 27 | bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \ | 27 | bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \ |
| 28 | __cbfn, (__tskim)); \ | ||
| 28 | bfa_tskim_notify_comp(__tskim); \ | 29 | bfa_tskim_notify_comp(__tskim); \ |
| 29 | } while (0) | 30 | } while (0) |
| 30 | 31 | ||
| 31 | #define bfa_tskim_notify_comp(__tskim) do { \ | 32 | #define bfa_tskim_notify_comp(__tskim) do { \ |
| 32 | if ((__tskim)->notify) \ | 33 | if ((__tskim)->notify) \ |
| 33 | bfa_itnim_tskdone((__tskim)->itnim); \ | 34 | bfa_itnim_tskdone((__tskim)->itnim); \ |
| 34 | } while (0) | 35 | } while (0) |
| 35 | 36 | ||
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c index ff5f9deb1b22..4b3c2417d180 100644 --- a/drivers/scsi/bfa/bfa_uf.c +++ b/drivers/scsi/bfa/bfa_uf.c | |||
| @@ -185,7 +185,7 @@ bfa_uf_get(struct bfa_uf_mod_s *uf_mod) | |||
| 185 | struct bfa_uf_s *uf; | 185 | struct bfa_uf_s *uf; |
| 186 | 186 | ||
| 187 | bfa_q_deq(&uf_mod->uf_free_q, &uf); | 187 | bfa_q_deq(&uf_mod->uf_free_q, &uf); |
| 188 | return (uf); | 188 | return uf; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static void | 191 | static void |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 6f2be5abf561..b52b773d49d9 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
| @@ -188,8 +188,8 @@ static struct bfad_port_s * | |||
| 188 | bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, | 188 | bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, |
| 189 | struct bfad_vport_s *vp_drv) | 189 | struct bfad_vport_s *vp_drv) |
| 190 | { | 190 | { |
| 191 | return ((vp_drv) ? (&(vp_drv)->drv_port) | 191 | return (vp_drv) ? (&(vp_drv)->drv_port) |
| 192 | : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport))); | 192 | : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | struct bfad_port_s * | 195 | struct bfad_port_s * |
| @@ -716,7 +716,7 @@ bfad_drv_init(struct bfad_s *bfad) | |||
| 716 | if ((bfad->bfad_flags & BFAD_MSIX_ON) | 716 | if ((bfad->bfad_flags & BFAD_MSIX_ON) |
| 717 | && bfad_install_msix_handler(bfad)) { | 717 | && bfad_install_msix_handler(bfad)) { |
| 718 | printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", | 718 | printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", |
| 719 | __FUNCTION__, bfad->inst_no); | 719 | __func__, bfad->inst_no); |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | bfad_init_timer(bfad); | 722 | bfad_init_timer(bfad); |
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c index bd34b0db2d6b..2ad65f275a92 100644 --- a/drivers/scsi/bfa/bfad_fwimg.c +++ b/drivers/scsi/bfa/bfad_fwimg.c | |||
| @@ -65,10 +65,10 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 65 | memcpy(*bfi_image, fw->data, fw->size); | 65 | memcpy(*bfi_image, fw->data, fw->size); |
| 66 | *bfi_image_size = fw->size/sizeof(u32); | 66 | *bfi_image_size = fw->size/sizeof(u32); |
| 67 | 67 | ||
| 68 | return(*bfi_image); | 68 | return *bfi_image; |
| 69 | 69 | ||
| 70 | error: | 70 | error: |
| 71 | return(NULL); | 71 | return NULL; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | u32 * | 74 | u32 * |
| @@ -78,12 +78,12 @@ bfad_get_firmware_buf(struct pci_dev *pdev) | |||
| 78 | if (bfi_image_ct_size == 0) | 78 | if (bfi_image_ct_size == 0) |
| 79 | bfad_read_firmware(pdev, &bfi_image_ct, | 79 | bfad_read_firmware(pdev, &bfi_image_ct, |
| 80 | &bfi_image_ct_size, BFAD_FW_FILE_CT); | 80 | &bfi_image_ct_size, BFAD_FW_FILE_CT); |
| 81 | return(bfi_image_ct); | 81 | return bfi_image_ct; |
| 82 | } else { | 82 | } else { |
| 83 | if (bfi_image_cb_size == 0) | 83 | if (bfi_image_cb_size == 0) |
| 84 | bfad_read_firmware(pdev, &bfi_image_cb, | 84 | bfad_read_firmware(pdev, &bfi_image_cb, |
| 85 | &bfi_image_cb_size, BFAD_FW_FILE_CB); | 85 | &bfi_image_cb_size, BFAD_FW_FILE_CB); |
| 86 | return(bfi_image_cb); | 86 | return bfi_image_cb; |
| 87 | } | 87 | } |
| 88 | } | 88 | } |
| 89 | 89 | ||
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 55d012a9a668..f788c2a0ab07 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
| @@ -1050,7 +1050,7 @@ bfad_im_itnim_work_handler(struct work_struct *work) | |||
| 1050 | } else { | 1050 | } else { |
| 1051 | printk(KERN_WARNING | 1051 | printk(KERN_WARNING |
| 1052 | "%s: itnim %llx is already in online state\n", | 1052 | "%s: itnim %llx is already in online state\n", |
| 1053 | __FUNCTION__, | 1053 | __func__, |
| 1054 | bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); | 1054 | bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); |
| 1055 | } | 1055 | } |
| 1056 | 1056 | ||
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h index 1d3e74ec338c..b36be15044a4 100644 --- a/drivers/scsi/bfa/bfad_im_compat.h +++ b/drivers/scsi/bfa/bfad_im_compat.h | |||
| @@ -31,7 +31,7 @@ u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 31 | static inline u32 * | 31 | static inline u32 * |
| 32 | bfad_load_fwimg(struct pci_dev *pdev) | 32 | bfad_load_fwimg(struct pci_dev *pdev) |
| 33 | { | 33 | { |
| 34 | return(bfad_get_firmware_buf(pdev)); | 34 | return bfad_get_firmware_buf(pdev); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline void | 37 | static inline void |
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c index f104e029cac9..7de8832f6fee 100644 --- a/drivers/scsi/bfa/bfad_intr.c +++ b/drivers/scsi/bfa/bfad_intr.c | |||
| @@ -23,13 +23,12 @@ BFA_TRC_FILE(LDRV, INTR); | |||
| 23 | /** | 23 | /** |
| 24 | * bfa_isr BFA driver interrupt functions | 24 | * bfa_isr BFA driver interrupt functions |
| 25 | */ | 25 | */ |
| 26 | irqreturn_t bfad_intx(int irq, void *dev_id); | ||
| 27 | static int msix_disable; | 26 | static int msix_disable; |
| 28 | module_param(msix_disable, int, S_IRUGO | S_IWUSR); | 27 | module_param(msix_disable, int, S_IRUGO | S_IWUSR); |
| 29 | /** | 28 | /** |
| 30 | * Line based interrupt handler. | 29 | * Line based interrupt handler. |
| 31 | */ | 30 | */ |
| 32 | irqreturn_t | 31 | static irqreturn_t |
| 33 | bfad_intx(int irq, void *dev_id) | 32 | bfad_intx(int irq, void *dev_id) |
| 34 | { | 33 | { |
| 35 | struct bfad_s *bfad = dev_id; | 34 | struct bfad_s *bfad = dev_id; |
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index a8b14c47b009..a4b5dd449573 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c | |||
| @@ -36,12 +36,12 @@ BFA_TRC_FILE(FCS, FABRIC); | |||
| 36 | #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ | 36 | #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ |
| 37 | #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ | 37 | #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ |
| 38 | 38 | ||
| 39 | #define bfa_fcs_fabric_set_opertype(__fabric) do { \ | 39 | #define bfa_fcs_fabric_set_opertype(__fabric) do { \ |
| 40 | if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ | 40 | if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ |
| 41 | == BFA_PPORT_TOPOLOGY_P2P) \ | 41 | == BFA_PPORT_TOPOLOGY_P2P) \ |
| 42 | (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ | 42 | (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ |
| 43 | else \ | 43 | else \ |
| 44 | (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ | 44 | (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ |
| 45 | } while (0) | 45 | } while (0) |
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| @@ -887,7 +887,7 @@ bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs) | |||
| 887 | bfa_boolean_t | 887 | bfa_boolean_t |
| 888 | bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) | 888 | bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) |
| 889 | { | 889 | { |
| 890 | return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback)); | 890 | return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); |
| 891 | } | 891 | } |
| 892 | 892 | ||
| 893 | enum bfa_pport_type | 893 | enum bfa_pport_type |
| @@ -974,7 +974,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) | |||
| 974 | int | 974 | int |
| 975 | bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) | 975 | bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) |
| 976 | { | 976 | { |
| 977 | return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online)); | 977 | return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); |
| 978 | } | 978 | } |
| 979 | 979 | ||
| 980 | 980 | ||
| @@ -1015,7 +1015,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) | |||
| 1015 | u16 | 1015 | u16 |
| 1016 | bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) | 1016 | bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) |
| 1017 | { | 1017 | { |
| 1018 | return (fabric->num_vports); | 1018 | return fabric->num_vports; |
| 1019 | } | 1019 | } |
| 1020 | 1020 | ||
| 1021 | /** | 1021 | /** |
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c index d174706b9caa..fee5456451cb 100644 --- a/drivers/scsi/bfa/fcbuild.c +++ b/drivers/scsi/bfa/fcbuild.c | |||
| @@ -188,14 +188,14 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len) | |||
| 188 | switch (els_cmd->els_code) { | 188 | switch (els_cmd->els_code) { |
| 189 | case FC_ELS_LS_RJT: | 189 | case FC_ELS_LS_RJT: |
| 190 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) | 190 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) |
| 191 | return (FC_PARSE_BUSY); | 191 | return FC_PARSE_BUSY; |
| 192 | else | 192 | else |
| 193 | return (FC_PARSE_FAILURE); | 193 | return FC_PARSE_FAILURE; |
| 194 | 194 | ||
| 195 | case FC_ELS_ACC: | 195 | case FC_ELS_ACC: |
| 196 | return (FC_PARSE_OK); | 196 | return FC_PARSE_OK; |
| 197 | } | 197 | } |
| 198 | return (FC_PARSE_OK); | 198 | return FC_PARSE_OK; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static void | 201 | static void |
| @@ -228,7 +228,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
| 228 | bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); | 228 | bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); |
| 229 | bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); | 229 | bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); |
| 230 | 230 | ||
| 231 | return (sizeof(struct fc_logi_s)); | 231 | return sizeof(struct fc_logi_s); |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | u16 | 234 | u16 |
| @@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | |||
| 267 | flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ | 267 | flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ |
| 268 | vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); | 268 | vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); |
| 269 | 269 | ||
| 270 | return (sizeof(struct fc_logi_s)); | 270 | return sizeof(struct fc_logi_s); |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | u16 | 273 | u16 |
| @@ -287,7 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | |||
| 287 | 287 | ||
| 288 | flogi->csp.bbcred = bfa_os_htons(local_bb_credits); | 288 | flogi->csp.bbcred = bfa_os_htons(local_bb_credits); |
| 289 | 289 | ||
| 290 | return (sizeof(struct fc_logi_s)); | 290 | return sizeof(struct fc_logi_s); |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | u16 | 293 | u16 |
| @@ -306,7 +306,7 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | |||
| 306 | flogi->port_name = port_name; | 306 | flogi->port_name = port_name; |
| 307 | flogi->node_name = node_name; | 307 | flogi->node_name = node_name; |
| 308 | 308 | ||
| 309 | return (sizeof(struct fc_logi_s)); | 309 | return sizeof(struct fc_logi_s); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | u16 | 312 | u16 |
| @@ -338,26 +338,26 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) | |||
| 338 | case FC_ELS_LS_RJT: | 338 | case FC_ELS_LS_RJT: |
| 339 | ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); | 339 | ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); |
| 340 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) | 340 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) |
| 341 | return (FC_PARSE_BUSY); | 341 | return FC_PARSE_BUSY; |
| 342 | else | 342 | else |
| 343 | return (FC_PARSE_FAILURE); | 343 | return FC_PARSE_FAILURE; |
| 344 | case FC_ELS_ACC: | 344 | case FC_ELS_ACC: |
| 345 | plogi = (struct fc_logi_s *) (fchs + 1); | 345 | plogi = (struct fc_logi_s *) (fchs + 1); |
| 346 | if (len < sizeof(struct fc_logi_s)) | 346 | if (len < sizeof(struct fc_logi_s)) |
| 347 | return (FC_PARSE_FAILURE); | 347 | return FC_PARSE_FAILURE; |
| 348 | 348 | ||
| 349 | if (!wwn_is_equal(plogi->port_name, port_name)) | 349 | if (!wwn_is_equal(plogi->port_name, port_name)) |
| 350 | return (FC_PARSE_FAILURE); | 350 | return FC_PARSE_FAILURE; |
| 351 | 351 | ||
| 352 | if (!plogi->class3.class_valid) | 352 | if (!plogi->class3.class_valid) |
| 353 | return (FC_PARSE_FAILURE); | 353 | return FC_PARSE_FAILURE; |
| 354 | 354 | ||
| 355 | if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) | 355 | if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) |
| 356 | return (FC_PARSE_FAILURE); | 356 | return FC_PARSE_FAILURE; |
| 357 | 357 | ||
| 358 | return (FC_PARSE_OK); | 358 | return FC_PARSE_OK; |
| 359 | default: | 359 | default: |
| 360 | return (FC_PARSE_FAILURE); | 360 | return FC_PARSE_FAILURE; |
| 361 | } | 361 | } |
| 362 | } | 362 | } |
| 363 | 363 | ||
| @@ -372,7 +372,7 @@ fc_plogi_parse(struct fchs_s *fchs) | |||
| 372 | if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) | 372 | if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) |
| 373 | || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) | 373 | || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) |
| 374 | || (plogi->class3.rxsz == 0)) | 374 | || (plogi->class3.rxsz == 0)) |
| 375 | return (FC_PARSE_FAILURE); | 375 | return FC_PARSE_FAILURE; |
| 376 | 376 | ||
| 377 | return FC_PARSE_OK; | 377 | return FC_PARSE_OK; |
| 378 | } | 378 | } |
| @@ -393,7 +393,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
| 393 | prli->parampage.servparams.task_retry_id = 0; | 393 | prli->parampage.servparams.task_retry_id = 0; |
| 394 | prli->parampage.servparams.confirm = 1; | 394 | prli->parampage.servparams.confirm = 1; |
| 395 | 395 | ||
| 396 | return (sizeof(struct fc_prli_s)); | 396 | return sizeof(struct fc_prli_s); |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | u16 | 399 | u16 |
| @@ -414,41 +414,41 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
| 414 | 414 | ||
| 415 | prli->parampage.rspcode = FC_PRLI_ACC_XQTD; | 415 | prli->parampage.rspcode = FC_PRLI_ACC_XQTD; |
| 416 | 416 | ||
| 417 | return (sizeof(struct fc_prli_s)); | 417 | return sizeof(struct fc_prli_s); |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | enum fc_parse_status | 420 | enum fc_parse_status |
| 421 | fc_prli_rsp_parse(struct fc_prli_s *prli, int len) | 421 | fc_prli_rsp_parse(struct fc_prli_s *prli, int len) |
| 422 | { | 422 | { |
| 423 | if (len < sizeof(struct fc_prli_s)) | 423 | if (len < sizeof(struct fc_prli_s)) |
| 424 | return (FC_PARSE_FAILURE); | 424 | return FC_PARSE_FAILURE; |
| 425 | 425 | ||
| 426 | if (prli->command != FC_ELS_ACC) | 426 | if (prli->command != FC_ELS_ACC) |
| 427 | return (FC_PARSE_FAILURE); | 427 | return FC_PARSE_FAILURE; |
| 428 | 428 | ||
| 429 | if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) | 429 | if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) |
| 430 | && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) | 430 | && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) |
| 431 | return (FC_PARSE_FAILURE); | 431 | return FC_PARSE_FAILURE; |
| 432 | 432 | ||
| 433 | if (prli->parampage.servparams.target != 1) | 433 | if (prli->parampage.servparams.target != 1) |
| 434 | return (FC_PARSE_FAILURE); | 434 | return FC_PARSE_FAILURE; |
| 435 | 435 | ||
| 436 | return (FC_PARSE_OK); | 436 | return FC_PARSE_OK; |
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | enum fc_parse_status | 439 | enum fc_parse_status |
| 440 | fc_prli_parse(struct fc_prli_s *prli) | 440 | fc_prli_parse(struct fc_prli_s *prli) |
| 441 | { | 441 | { |
| 442 | if (prli->parampage.type != FC_TYPE_FCP) | 442 | if (prli->parampage.type != FC_TYPE_FCP) |
| 443 | return (FC_PARSE_FAILURE); | 443 | return FC_PARSE_FAILURE; |
| 444 | 444 | ||
| 445 | if (!prli->parampage.imagepair) | 445 | if (!prli->parampage.imagepair) |
| 446 | return (FC_PARSE_FAILURE); | 446 | return FC_PARSE_FAILURE; |
| 447 | 447 | ||
| 448 | if (!prli->parampage.servparams.initiator) | 448 | if (!prli->parampage.servparams.initiator) |
| 449 | return (FC_PARSE_FAILURE); | 449 | return FC_PARSE_FAILURE; |
| 450 | 450 | ||
| 451 | return (FC_PARSE_OK); | 451 | return FC_PARSE_OK; |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | u16 | 454 | u16 |
| @@ -462,7 +462,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, | |||
| 462 | logo->nport_id = (s_id); | 462 | logo->nport_id = (s_id); |
| 463 | logo->orig_port_name = port_name; | 463 | logo->orig_port_name = port_name; |
| 464 | 464 | ||
| 465 | return (sizeof(struct fc_logo_s)); | 465 | return sizeof(struct fc_logo_s); |
| 466 | } | 466 | } |
| 467 | 467 | ||
| 468 | static u16 | 468 | static u16 |
| @@ -484,7 +484,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | |||
| 484 | adisc->orig_node_name = node_name; | 484 | adisc->orig_node_name = node_name; |
| 485 | adisc->nport_id = (s_id); | 485 | adisc->nport_id = (s_id); |
| 486 | 486 | ||
| 487 | return (sizeof(struct fc_adisc_s)); | 487 | return sizeof(struct fc_adisc_s); |
| 488 | } | 488 | } |
| 489 | 489 | ||
| 490 | u16 | 490 | u16 |
| @@ -511,15 +511,15 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, | |||
| 511 | { | 511 | { |
| 512 | 512 | ||
| 513 | if (len < sizeof(struct fc_adisc_s)) | 513 | if (len < sizeof(struct fc_adisc_s)) |
| 514 | return (FC_PARSE_FAILURE); | 514 | return FC_PARSE_FAILURE; |
| 515 | 515 | ||
| 516 | if (adisc->els_cmd.els_code != FC_ELS_ACC) | 516 | if (adisc->els_cmd.els_code != FC_ELS_ACC) |
| 517 | return (FC_PARSE_FAILURE); | 517 | return FC_PARSE_FAILURE; |
| 518 | 518 | ||
| 519 | if (!wwn_is_equal(adisc->orig_port_name, port_name)) | 519 | if (!wwn_is_equal(adisc->orig_port_name, port_name)) |
| 520 | return (FC_PARSE_FAILURE); | 520 | return FC_PARSE_FAILURE; |
| 521 | 521 | ||
| 522 | return (FC_PARSE_OK); | 522 | return FC_PARSE_OK; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | enum fc_parse_status | 525 | enum fc_parse_status |
| @@ -529,14 +529,14 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, | |||
| 529 | struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; | 529 | struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; |
| 530 | 530 | ||
| 531 | if (adisc->els_cmd.els_code != FC_ELS_ACC) | 531 | if (adisc->els_cmd.els_code != FC_ELS_ACC) |
| 532 | return (FC_PARSE_FAILURE); | 532 | return FC_PARSE_FAILURE; |
| 533 | 533 | ||
| 534 | if ((adisc->nport_id == (host_dap)) | 534 | if ((adisc->nport_id == (host_dap)) |
| 535 | && wwn_is_equal(adisc->orig_port_name, port_name) | 535 | && wwn_is_equal(adisc->orig_port_name, port_name) |
| 536 | && wwn_is_equal(adisc->orig_node_name, node_name)) | 536 | && wwn_is_equal(adisc->orig_node_name, node_name)) |
| 537 | return (FC_PARSE_OK); | 537 | return FC_PARSE_OK; |
| 538 | 538 | ||
| 539 | return (FC_PARSE_FAILURE); | 539 | return FC_PARSE_FAILURE; |
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | enum fc_parse_status | 542 | enum fc_parse_status |
| @@ -550,13 +550,13 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) | |||
| 550 | if ((bfa_os_ntohs(pdisc->class3.rxsz) < | 550 | if ((bfa_os_ntohs(pdisc->class3.rxsz) < |
| 551 | (FC_MIN_PDUSZ - sizeof(struct fchs_s))) | 551 | (FC_MIN_PDUSZ - sizeof(struct fchs_s))) |
| 552 | || (pdisc->class3.rxsz == 0)) | 552 | || (pdisc->class3.rxsz == 0)) |
| 553 | return (FC_PARSE_FAILURE); | 553 | return FC_PARSE_FAILURE; |
| 554 | 554 | ||
| 555 | if (!wwn_is_equal(pdisc->port_name, port_name)) | 555 | if (!wwn_is_equal(pdisc->port_name, port_name)) |
| 556 | return (FC_PARSE_FAILURE); | 556 | return FC_PARSE_FAILURE; |
| 557 | 557 | ||
| 558 | if (!wwn_is_equal(pdisc->node_name, node_name)) | 558 | if (!wwn_is_equal(pdisc->node_name, node_name)) |
| 559 | return (FC_PARSE_FAILURE); | 559 | return FC_PARSE_FAILURE; |
| 560 | 560 | ||
| 561 | return FC_PARSE_OK; | 561 | return FC_PARSE_OK; |
| 562 | } | 562 | } |
| @@ -570,7 +570,7 @@ fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | |||
| 570 | fchs->s_id = (s_id); | 570 | fchs->s_id = (s_id); |
| 571 | fchs->ox_id = bfa_os_htons(ox_id); | 571 | fchs->ox_id = bfa_os_htons(ox_id); |
| 572 | 572 | ||
| 573 | return (sizeof(struct fchs_s)); | 573 | return sizeof(struct fchs_s); |
| 574 | } | 574 | } |
| 575 | 575 | ||
| 576 | enum fc_parse_status | 576 | enum fc_parse_status |
| @@ -578,9 +578,9 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len) | |||
| 578 | { | 578 | { |
| 579 | if ((fchs->cat_info == FC_CAT_BA_ACC) | 579 | if ((fchs->cat_info == FC_CAT_BA_ACC) |
| 580 | || (fchs->cat_info == FC_CAT_BA_RJT)) | 580 | || (fchs->cat_info == FC_CAT_BA_RJT)) |
| 581 | return (FC_PARSE_OK); | 581 | return FC_PARSE_OK; |
| 582 | 582 | ||
| 583 | return (FC_PARSE_FAILURE); | 583 | return FC_PARSE_FAILURE; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | u16 | 586 | u16 |
| @@ -597,7 +597,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, | |||
| 597 | rrq->ox_id = bfa_os_htons(rrq_oxid); | 597 | rrq->ox_id = bfa_os_htons(rrq_oxid); |
| 598 | rrq->rx_id = FC_RXID_ANY; | 598 | rrq->rx_id = FC_RXID_ANY; |
| 599 | 599 | ||
| 600 | return (sizeof(struct fc_rrq_s)); | 600 | return sizeof(struct fc_rrq_s); |
| 601 | } | 601 | } |
| 602 | 602 | ||
| 603 | u16 | 603 | u16 |
| @@ -611,7 +611,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
| 611 | memset(acc, 0, sizeof(struct fc_els_cmd_s)); | 611 | memset(acc, 0, sizeof(struct fc_els_cmd_s)); |
| 612 | acc->els_code = FC_ELS_ACC; | 612 | acc->els_code = FC_ELS_ACC; |
| 613 | 613 | ||
| 614 | return (sizeof(struct fc_els_cmd_s)); | 614 | return sizeof(struct fc_els_cmd_s); |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | u16 | 617 | u16 |
| @@ -627,7 +627,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, | |||
| 627 | ls_rjt->reason_code_expl = reason_code_expl; | 627 | ls_rjt->reason_code_expl = reason_code_expl; |
| 628 | ls_rjt->vendor_unique = 0x00; | 628 | ls_rjt->vendor_unique = 0x00; |
| 629 | 629 | ||
| 630 | return (sizeof(struct fc_ls_rjt_s)); | 630 | return sizeof(struct fc_ls_rjt_s); |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | u16 | 633 | u16 |
| @@ -643,7 +643,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, | |||
| 643 | ba_acc->ox_id = fchs->ox_id; | 643 | ba_acc->ox_id = fchs->ox_id; |
| 644 | ba_acc->rx_id = fchs->rx_id; | 644 | ba_acc->rx_id = fchs->rx_id; |
| 645 | 645 | ||
| 646 | return (sizeof(struct fc_ba_acc_s)); | 646 | return sizeof(struct fc_ba_acc_s); |
| 647 | } | 647 | } |
| 648 | 648 | ||
| 649 | u16 | 649 | u16 |
| @@ -654,7 +654,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, | |||
| 654 | memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); | 654 | memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); |
| 655 | els_cmd->els_code = FC_ELS_ACC; | 655 | els_cmd->els_code = FC_ELS_ACC; |
| 656 | 656 | ||
| 657 | return (sizeof(struct fc_els_cmd_s)); | 657 | return sizeof(struct fc_els_cmd_s); |
| 658 | } | 658 | } |
| 659 | 659 | ||
| 660 | int | 660 | int |
| @@ -696,7 +696,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, | |||
| 696 | tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; | 696 | tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; |
| 697 | tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; | 697 | tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; |
| 698 | } | 698 | } |
| 699 | return (bfa_os_ntohs(tprlo_acc->payload_len)); | 699 | return bfa_os_ntohs(tprlo_acc->payload_len); |
| 700 | } | 700 | } |
| 701 | 701 | ||
| 702 | u16 | 702 | u16 |
| @@ -721,7 +721,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, | |||
| 721 | prlo_acc->prlo_acc_params[page].resp_process_assc = 0; | 721 | prlo_acc->prlo_acc_params[page].resp_process_assc = 0; |
| 722 | } | 722 | } |
| 723 | 723 | ||
| 724 | return (bfa_os_ntohs(prlo_acc->payload_len)); | 724 | return bfa_os_ntohs(prlo_acc->payload_len); |
| 725 | } | 725 | } |
| 726 | 726 | ||
| 727 | u16 | 727 | u16 |
| @@ -735,7 +735,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, | |||
| 735 | rnid->els_cmd.els_code = FC_ELS_RNID; | 735 | rnid->els_cmd.els_code = FC_ELS_RNID; |
| 736 | rnid->node_id_data_format = data_format; | 736 | rnid->node_id_data_format = data_format; |
| 737 | 737 | ||
| 738 | return (sizeof(struct fc_rnid_cmd_s)); | 738 | return sizeof(struct fc_rnid_cmd_s); |
| 739 | } | 739 | } |
| 740 | 740 | ||
| 741 | u16 | 741 | u16 |
| @@ -759,10 +759,10 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, | |||
| 759 | rnid_acc->specific_id_data_length = | 759 | rnid_acc->specific_id_data_length = |
| 760 | sizeof(struct fc_rnid_general_topology_data_s); | 760 | sizeof(struct fc_rnid_general_topology_data_s); |
| 761 | bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); | 761 | bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); |
| 762 | return (sizeof(struct fc_rnid_acc_s)); | 762 | return sizeof(struct fc_rnid_acc_s); |
| 763 | } else { | 763 | } else { |
| 764 | return (sizeof(struct fc_rnid_acc_s) - | 764 | return sizeof(struct fc_rnid_acc_s) - |
| 765 | sizeof(struct fc_rnid_general_topology_data_s)); | 765 | sizeof(struct fc_rnid_general_topology_data_s); |
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | } | 768 | } |
| @@ -776,7 +776,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, | |||
| 776 | memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); | 776 | memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); |
| 777 | 777 | ||
| 778 | rpsc->els_cmd.els_code = FC_ELS_RPSC; | 778 | rpsc->els_cmd.els_code = FC_ELS_RPSC; |
| 779 | return (sizeof(struct fc_rpsc_cmd_s)); | 779 | return sizeof(struct fc_rpsc_cmd_s); |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | u16 | 782 | u16 |
| @@ -797,8 +797,8 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, | |||
| 797 | for (i = 0; i < npids; i++) | 797 | for (i = 0; i < npids; i++) |
| 798 | rpsc2->pid_list[i].pid = pid_list[i]; | 798 | rpsc2->pid_list[i].pid = pid_list[i]; |
| 799 | 799 | ||
| 800 | return (sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * | 800 | return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * |
| 801 | (sizeof(u32)))); | 801 | (sizeof(u32))); |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | u16 | 804 | u16 |
| @@ -819,7 +819,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, | |||
| 819 | rpsc_acc->speed_info[0].port_op_speed = | 819 | rpsc_acc->speed_info[0].port_op_speed = |
| 820 | bfa_os_htons(oper_speed->port_op_speed); | 820 | bfa_os_htons(oper_speed->port_op_speed); |
| 821 | 821 | ||
| 822 | return (sizeof(struct fc_rpsc_acc_s)); | 822 | return sizeof(struct fc_rpsc_acc_s); |
| 823 | 823 | ||
| 824 | } | 824 | } |
| 825 | 825 | ||
| @@ -856,7 +856,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | |||
| 856 | pdisc->port_name = port_name; | 856 | pdisc->port_name = port_name; |
| 857 | pdisc->node_name = node_name; | 857 | pdisc->node_name = node_name; |
| 858 | 858 | ||
| 859 | return (sizeof(struct fc_logi_s)); | 859 | return sizeof(struct fc_logi_s); |
| 860 | } | 860 | } |
| 861 | 861 | ||
| 862 | u16 | 862 | u16 |
| @@ -865,21 +865,21 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) | |||
| 865 | struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); | 865 | struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); |
| 866 | 866 | ||
| 867 | if (len < sizeof(struct fc_logi_s)) | 867 | if (len < sizeof(struct fc_logi_s)) |
| 868 | return (FC_PARSE_LEN_INVAL); | 868 | return FC_PARSE_LEN_INVAL; |
| 869 | 869 | ||
| 870 | if (pdisc->els_cmd.els_code != FC_ELS_ACC) | 870 | if (pdisc->els_cmd.els_code != FC_ELS_ACC) |
| 871 | return (FC_PARSE_ACC_INVAL); | 871 | return FC_PARSE_ACC_INVAL; |
| 872 | 872 | ||
| 873 | if (!wwn_is_equal(pdisc->port_name, port_name)) | 873 | if (!wwn_is_equal(pdisc->port_name, port_name)) |
| 874 | return (FC_PARSE_PWWN_NOT_EQUAL); | 874 | return FC_PARSE_PWWN_NOT_EQUAL; |
| 875 | 875 | ||
| 876 | if (!pdisc->class3.class_valid) | 876 | if (!pdisc->class3.class_valid) |
| 877 | return (FC_PARSE_NWWN_NOT_EQUAL); | 877 | return FC_PARSE_NWWN_NOT_EQUAL; |
| 878 | 878 | ||
| 879 | if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) | 879 | if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) |
| 880 | return (FC_PARSE_RXSZ_INVAL); | 880 | return FC_PARSE_RXSZ_INVAL; |
| 881 | 881 | ||
| 882 | return (FC_PARSE_OK); | 882 | return FC_PARSE_OK; |
| 883 | } | 883 | } |
| 884 | 884 | ||
| 885 | u16 | 885 | u16 |
| @@ -903,7 +903,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, | |||
| 903 | prlo->prlo_params[page].resp_process_assc = 0; | 903 | prlo->prlo_params[page].resp_process_assc = 0; |
| 904 | } | 904 | } |
| 905 | 905 | ||
| 906 | return (bfa_os_ntohs(prlo->payload_len)); | 906 | return bfa_os_ntohs(prlo->payload_len); |
| 907 | } | 907 | } |
| 908 | 908 | ||
| 909 | u16 | 909 | u16 |
| @@ -916,7 +916,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len) | |||
| 916 | len = len; | 916 | len = len; |
| 917 | 917 | ||
| 918 | if (prlo->command != FC_ELS_ACC) | 918 | if (prlo->command != FC_ELS_ACC) |
| 919 | return (FC_PARSE_FAILURE); | 919 | return FC_PARSE_FAILURE; |
| 920 | 920 | ||
| 921 | num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; | 921 | num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; |
| 922 | 922 | ||
| @@ -936,7 +936,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len) | |||
| 936 | if (prlo->prlo_acc_params[page].resp_process_assc != 0) | 936 | if (prlo->prlo_acc_params[page].resp_process_assc != 0) |
| 937 | return FC_PARSE_FAILURE; | 937 | return FC_PARSE_FAILURE; |
| 938 | } | 938 | } |
| 939 | return (FC_PARSE_OK); | 939 | return FC_PARSE_OK; |
| 940 | 940 | ||
| 941 | } | 941 | } |
| 942 | 942 | ||
| @@ -968,7 +968,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | |||
| 968 | } | 968 | } |
| 969 | } | 969 | } |
| 970 | 970 | ||
| 971 | return (bfa_os_ntohs(tprlo->payload_len)); | 971 | return bfa_os_ntohs(tprlo->payload_len); |
| 972 | } | 972 | } |
| 973 | 973 | ||
| 974 | u16 | 974 | u16 |
| @@ -981,23 +981,23 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) | |||
| 981 | len = len; | 981 | len = len; |
| 982 | 982 | ||
| 983 | if (tprlo->command != FC_ELS_ACC) | 983 | if (tprlo->command != FC_ELS_ACC) |
| 984 | return (FC_PARSE_ACC_INVAL); | 984 | return FC_PARSE_ACC_INVAL; |
| 985 | 985 | ||
| 986 | num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; | 986 | num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; |
| 987 | 987 | ||
| 988 | for (page = 0; page < num_pages; page++) { | 988 | for (page = 0; page < num_pages; page++) { |
| 989 | if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) | 989 | if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) |
| 990 | return (FC_PARSE_NOT_FCP); | 990 | return FC_PARSE_NOT_FCP; |
| 991 | if (tprlo->tprlo_acc_params[page].opa_valid != 0) | 991 | if (tprlo->tprlo_acc_params[page].opa_valid != 0) |
| 992 | return (FC_PARSE_OPAFLAG_INVAL); | 992 | return FC_PARSE_OPAFLAG_INVAL; |
| 993 | if (tprlo->tprlo_acc_params[page].rpa_valid != 0) | 993 | if (tprlo->tprlo_acc_params[page].rpa_valid != 0) |
| 994 | return (FC_PARSE_RPAFLAG_INVAL); | 994 | return FC_PARSE_RPAFLAG_INVAL; |
| 995 | if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) | 995 | if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) |
| 996 | return (FC_PARSE_OPA_INVAL); | 996 | return FC_PARSE_OPA_INVAL; |
| 997 | if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) | 997 | if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) |
| 998 | return (FC_PARSE_RPA_INVAL); | 998 | return FC_PARSE_RPA_INVAL; |
| 999 | } | 999 | } |
| 1000 | return (FC_PARSE_OK); | 1000 | return FC_PARSE_OK; |
| 1001 | } | 1001 | } |
| 1002 | 1002 | ||
| 1003 | enum fc_parse_status | 1003 | enum fc_parse_status |
| @@ -1024,7 +1024,7 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | |||
| 1024 | fchs->cat_info = FC_CAT_BA_RJT; | 1024 | fchs->cat_info = FC_CAT_BA_RJT; |
| 1025 | ba_rjt->reason_code = reason_code; | 1025 | ba_rjt->reason_code = reason_code; |
| 1026 | ba_rjt->reason_expl = reason_expl; | 1026 | ba_rjt->reason_expl = reason_expl; |
| 1027 | return (sizeof(struct fc_ba_rjt_s)); | 1027 | return sizeof(struct fc_ba_rjt_s); |
| 1028 | } | 1028 | } |
| 1029 | 1029 | ||
| 1030 | static void | 1030 | static void |
| @@ -1073,7 +1073,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1073 | 1073 | ||
| 1074 | bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); | 1074 | bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); |
| 1075 | gidpn->port_name = port_name; | 1075 | gidpn->port_name = port_name; |
| 1076 | return (sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s)); | 1076 | return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); |
| 1077 | } | 1077 | } |
| 1078 | 1078 | ||
| 1079 | u16 | 1079 | u16 |
| @@ -1090,7 +1090,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1090 | 1090 | ||
| 1091 | bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); | 1091 | bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); |
| 1092 | gpnid->dap = port_id; | 1092 | gpnid->dap = port_id; |
| 1093 | return (sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s)); | 1093 | return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); |
| 1094 | } | 1094 | } |
| 1095 | 1095 | ||
| 1096 | u16 | 1096 | u16 |
| @@ -1107,7 +1107,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1107 | 1107 | ||
| 1108 | bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); | 1108 | bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); |
| 1109 | gnnid->dap = port_id; | 1109 | gnnid->dap = port_id; |
| 1110 | return (sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s)); | 1110 | return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); |
| 1111 | } | 1111 | } |
| 1112 | 1112 | ||
| 1113 | u16 | 1113 | u16 |
| @@ -1137,7 +1137,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, | |||
| 1137 | if (set_br_reg) | 1137 | if (set_br_reg) |
| 1138 | scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; | 1138 | scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; |
| 1139 | 1139 | ||
| 1140 | return (sizeof(struct fc_scr_s)); | 1140 | return sizeof(struct fc_scr_s); |
| 1141 | } | 1141 | } |
| 1142 | 1142 | ||
| 1143 | u16 | 1143 | u16 |
| @@ -1157,7 +1157,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, | |||
| 1157 | rscn->event[0].format = FC_RSCN_FORMAT_PORTID; | 1157 | rscn->event[0].format = FC_RSCN_FORMAT_PORTID; |
| 1158 | rscn->event[0].portid = s_id; | 1158 | rscn->event[0].portid = s_id; |
| 1159 | 1159 | ||
| 1160 | return (sizeof(struct fc_rscn_pl_s)); | 1160 | return sizeof(struct fc_rscn_pl_s); |
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | u16 | 1163 | u16 |
| @@ -1188,7 +1188,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1188 | rftid->fc4_type[index] |= bfa_os_htonl(type_value); | 1188 | rftid->fc4_type[index] |= bfa_os_htonl(type_value); |
| 1189 | } | 1189 | } |
| 1190 | 1190 | ||
| 1191 | return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); | 1191 | return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); |
| 1192 | } | 1192 | } |
| 1193 | 1193 | ||
| 1194 | u16 | 1194 | u16 |
| @@ -1210,7 +1210,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, | |||
| 1210 | bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, | 1210 | bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, |
| 1211 | (bitmap_size < 32 ? bitmap_size : 32)); | 1211 | (bitmap_size < 32 ? bitmap_size : 32)); |
| 1212 | 1212 | ||
| 1213 | return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); | 1213 | return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); |
| 1214 | } | 1214 | } |
| 1215 | 1215 | ||
| 1216 | u16 | 1216 | u16 |
| @@ -1231,7 +1231,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1231 | rffid->fc4ftr_bits = fc4_ftrs; | 1231 | rffid->fc4ftr_bits = fc4_ftrs; |
| 1232 | rffid->fc4_type = fc4_type; | 1232 | rffid->fc4_type = fc4_type; |
| 1233 | 1233 | ||
| 1234 | return (sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s)); | 1234 | return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); |
| 1235 | } | 1235 | } |
| 1236 | 1236 | ||
| 1237 | u16 | 1237 | u16 |
| @@ -1253,7 +1253,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
| 1253 | rspnid->spn_len = (u8) strlen((char *)name); | 1253 | rspnid->spn_len = (u8) strlen((char *)name); |
| 1254 | strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); | 1254 | strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); |
| 1255 | 1255 | ||
| 1256 | return (sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s)); | 1256 | return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); |
| 1257 | } | 1257 | } |
| 1258 | 1258 | ||
| 1259 | u16 | 1259 | u16 |
| @@ -1275,7 +1275,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, | |||
| 1275 | gidft->domain_id = 0; | 1275 | gidft->domain_id = 0; |
| 1276 | gidft->area_id = 0; | 1276 | gidft->area_id = 0; |
| 1277 | 1277 | ||
| 1278 | return (sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s)); | 1278 | return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); |
| 1279 | } | 1279 | } |
| 1280 | 1280 | ||
| 1281 | u16 | 1281 | u16 |
| @@ -1294,7 +1294,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
| 1294 | rpnid->port_id = port_id; | 1294 | rpnid->port_id = port_id; |
| 1295 | rpnid->port_name = port_name; | 1295 | rpnid->port_name = port_name; |
| 1296 | 1296 | ||
| 1297 | return (sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s)); | 1297 | return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| 1300 | u16 | 1300 | u16 |
| @@ -1313,7 +1313,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
| 1313 | rnnid->port_id = port_id; | 1313 | rnnid->port_id = port_id; |
| 1314 | rnnid->node_name = node_name; | 1314 | rnnid->node_name = node_name; |
| 1315 | 1315 | ||
| 1316 | return (sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s)); | 1316 | return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); |
| 1317 | } | 1317 | } |
| 1318 | 1318 | ||
| 1319 | u16 | 1319 | u16 |
| @@ -1332,7 +1332,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
| 1332 | rcsid->port_id = port_id; | 1332 | rcsid->port_id = port_id; |
| 1333 | rcsid->cos = cos; | 1333 | rcsid->cos = cos; |
| 1334 | 1334 | ||
| 1335 | return (sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s)); | 1335 | return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); |
| 1336 | } | 1336 | } |
| 1337 | 1337 | ||
| 1338 | u16 | 1338 | u16 |
| @@ -1351,7 +1351,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
| 1351 | rptid->port_id = port_id; | 1351 | rptid->port_id = port_id; |
| 1352 | rptid->port_type = port_type; | 1352 | rptid->port_type = port_type; |
| 1353 | 1353 | ||
| 1354 | return (sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s)); | 1354 | return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); |
| 1355 | } | 1355 | } |
| 1356 | 1356 | ||
| 1357 | u16 | 1357 | u16 |
| @@ -1368,7 +1368,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) | |||
| 1368 | bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); | 1368 | bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); |
| 1369 | ganxt->port_id = port_id; | 1369 | ganxt->port_id = port_id; |
| 1370 | 1370 | ||
| 1371 | return (sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s)); | 1371 | return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); |
| 1372 | } | 1372 | } |
| 1373 | 1373 | ||
| 1374 | /* | 1374 | /* |
| @@ -1385,7 +1385,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, | |||
| 1385 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1385 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
| 1386 | fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); | 1386 | fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); |
| 1387 | 1387 | ||
| 1388 | return (sizeof(struct ct_hdr_s)); | 1388 | return sizeof(struct ct_hdr_s); |
| 1389 | } | 1389 | } |
| 1390 | 1390 | ||
| 1391 | /* | 1391 | /* |
| @@ -1425,7 +1425,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) | |||
| 1425 | bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); | 1425 | bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); |
| 1426 | gmal->wwn = wwn; | 1426 | gmal->wwn = wwn; |
| 1427 | 1427 | ||
| 1428 | return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t)); | 1428 | return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); |
| 1429 | } | 1429 | } |
| 1430 | 1430 | ||
| 1431 | /* | 1431 | /* |
| @@ -1445,5 +1445,5 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) | |||
| 1445 | bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); | 1445 | bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); |
| 1446 | gfn->wwn = wwn; | 1446 | gfn->wwn = wwn; |
| 1447 | 1447 | ||
| 1448 | return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t)); | 1448 | return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); |
| 1449 | } | 1449 | } |
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h index 4d248424f7b3..8fa7f270ef7b 100644 --- a/drivers/scsi/bfa/fcbuild.h +++ b/drivers/scsi/bfa/fcbuild.h | |||
| @@ -32,8 +32,8 @@ | |||
| 32 | * Utility Macros/functions | 32 | * Utility Macros/functions |
| 33 | */ | 33 | */ |
| 34 | 34 | ||
| 35 | #define fcif_sof_set(_ifhdr, _sof) (_ifhdr)->sof = FC_ ## _sof | 35 | #define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof) |
| 36 | #define fcif_eof_set(_ifhdr, _eof) (_ifhdr)->eof = FC_ ## _eof | 36 | #define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof) |
| 37 | 37 | ||
| 38 | #define wwn_is_equal(_wwn1, _wwn2) \ | 38 | #define wwn_is_equal(_wwn1, _wwn2) \ |
| 39 | (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) | 39 | (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) |
| @@ -49,7 +49,7 @@ | |||
| 49 | static inline u32 | 49 | static inline u32 |
| 50 | fc_get_ctresp_pyld_len(u32 resp_len) | 50 | fc_get_ctresp_pyld_len(u32 resp_len) |
| 51 | { | 51 | { |
| 52 | return (resp_len - sizeof(struct ct_hdr_s)); | 52 | return resp_len - sizeof(struct ct_hdr_s); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index 8ce5d8934677..1f3c06efaa9e 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c | |||
| @@ -286,11 +286,10 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, | |||
| 286 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); | 286 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); |
| 287 | bfa_fcb_itnim_offline(itnim->itnim_drv); | 287 | bfa_fcb_itnim_offline(itnim->itnim_drv); |
| 288 | bfa_itnim_offline(itnim->bfa_itnim); | 288 | bfa_itnim_offline(itnim->bfa_itnim); |
| 289 | if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) { | 289 | if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) |
| 290 | bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); | 290 | bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); |
| 291 | } else { | 291 | else |
| 292 | bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); | 292 | bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); |
| 293 | } | ||
| 294 | break; | 293 | break; |
| 295 | 294 | ||
| 296 | case BFA_FCS_ITNIM_SM_DELETE: | 295 | case BFA_FCS_ITNIM_SM_DELETE: |
| @@ -732,7 +731,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) | |||
| 732 | return NULL; | 731 | return NULL; |
| 733 | 732 | ||
| 734 | bfa_assert(rport->itnim != NULL); | 733 | bfa_assert(rport->itnim != NULL); |
| 735 | return (rport->itnim); | 734 | return rport->itnim; |
| 736 | } | 735 | } |
| 737 | 736 | ||
| 738 | bfa_status_t | 737 | bfa_status_t |
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h index deee685e8478..8d08230e6295 100644 --- a/drivers/scsi/bfa/fcs.h +++ b/drivers/scsi/bfa/fcs.h | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #ifndef __FCS_H__ | 23 | #ifndef __FCS_H__ |
| 24 | #define __FCS_H__ | 24 | #define __FCS_H__ |
| 25 | 25 | ||
| 26 | #define __fcs_min_cfg(__fcs) (__fcs)->min_cfg | 26 | #define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) |
| 27 | 27 | ||
| 28 | void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); | 28 | void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); |
| 29 | 29 | ||
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index b845eb272c78..df2a1e54e16b 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c | |||
| @@ -72,9 +72,9 @@ static u16 bfa_fcs_port_fdmi_build_rpa_pyld( | |||
| 72 | struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); | 72 | struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); |
| 73 | static u16 bfa_fcs_port_fdmi_build_portattr_block( | 73 | static u16 bfa_fcs_port_fdmi_build_portattr_block( |
| 74 | struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); | 74 | struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); |
| 75 | void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, | 75 | static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, |
| 76 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr); | 76 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr); |
| 77 | void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, | 77 | static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, |
| 78 | struct bfa_fcs_fdmi_port_attr_s *port_attr); | 78 | struct bfa_fcs_fdmi_port_attr_s *port_attr); |
| 79 | /** | 79 | /** |
| 80 | * fcs_fdmi_sm FCS FDMI state machine | 80 | * fcs_fdmi_sm FCS FDMI state machine |
| @@ -1091,7 +1091,7 @@ bfa_fcs_port_fdmi_timeout(void *arg) | |||
| 1091 | bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); | 1091 | bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); |
| 1092 | } | 1092 | } |
| 1093 | 1093 | ||
| 1094 | void | 1094 | static void |
| 1095 | bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, | 1095 | bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, |
| 1096 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr) | 1096 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr) |
| 1097 | { | 1097 | { |
| @@ -1145,7 +1145,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, | |||
| 1145 | 1145 | ||
| 1146 | } | 1146 | } |
| 1147 | 1147 | ||
| 1148 | void | 1148 | static void |
| 1149 | bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, | 1149 | bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, |
| 1150 | struct bfa_fcs_fdmi_port_attr_s *port_attr) | 1150 | struct bfa_fcs_fdmi_port_attr_s *port_attr) |
| 1151 | { | 1151 | { |
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h index da8cac093d3d..d9cbc2a783d4 100644 --- a/drivers/scsi/bfa/include/aen/bfa_aen.h +++ b/drivers/scsi/bfa/include/aen/bfa_aen.h | |||
| @@ -54,7 +54,7 @@ bfa_aen_get_max_cfg_entry(void) | |||
| 54 | static inline s32 | 54 | static inline s32 |
| 55 | bfa_aen_get_meminfo(void) | 55 | bfa_aen_get_meminfo(void) |
| 56 | { | 56 | { |
| 57 | return (sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry()); | 57 | return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static inline s32 | 60 | static inline s32 |
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h index 64c1412c5703..d4bc0d9fa42c 100644 --- a/drivers/scsi/bfa/include/bfa.h +++ b/drivers/scsi/bfa/include/bfa.h | |||
| @@ -76,11 +76,11 @@ struct bfa_meminfo_s { | |||
| 76 | struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; | 76 | struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; |
| 77 | }; | 77 | }; |
| 78 | #define bfa_meminfo_kva(_m) \ | 78 | #define bfa_meminfo_kva(_m) \ |
| 79 | (_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp | 79 | ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp) |
| 80 | #define bfa_meminfo_dma_virt(_m) \ | 80 | #define bfa_meminfo_dma_virt(_m) \ |
| 81 | (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp | 81 | ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp) |
| 82 | #define bfa_meminfo_dma_phys(_m) \ | 82 | #define bfa_meminfo_dma_phys(_m) \ |
| 83 | (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp | 83 | ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) |
| 84 | 84 | ||
| 85 | /** | 85 | /** |
| 86 | * Generic Scatter Gather Element used by driver | 86 | * Generic Scatter Gather Element used by driver |
| @@ -100,7 +100,7 @@ struct bfa_sge_s { | |||
| 100 | /* | 100 | /* |
| 101 | * bfa stats interfaces | 101 | * bfa stats interfaces |
| 102 | */ | 102 | */ |
| 103 | #define bfa_stats(_mod, _stats) (_mod)->stats._stats ++ | 103 | #define bfa_stats(_mod, _stats) ((_mod)->stats._stats++) |
| 104 | 104 | ||
| 105 | #define bfa_ioc_get_stats(__bfa, __ioc_stats) \ | 105 | #define bfa_ioc_get_stats(__bfa, __ioc_stats) \ |
| 106 | bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) | 106 | bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) |
| @@ -136,7 +136,7 @@ void bfa_isr_enable(struct bfa_s *bfa); | |||
| 136 | void bfa_isr_disable(struct bfa_s *bfa); | 136 | void bfa_isr_disable(struct bfa_s *bfa); |
| 137 | void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, | 137 | void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, |
| 138 | u32 *num_vecs, u32 *max_vec_bit); | 138 | u32 *num_vecs, u32 *max_vec_bit); |
| 139 | #define bfa_msix(__bfa, __vec) (__bfa)->msix.handler[__vec](__bfa, __vec) | 139 | #define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec)) |
| 140 | 140 | ||
| 141 | void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); | 141 | void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); |
| 142 | void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); | 142 | void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); |
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 0c80b74f72ef..268d956bad89 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h | |||
| @@ -34,10 +34,10 @@ struct bfa_fcxp_s; | |||
| 34 | */ | 34 | */ |
| 35 | struct bfa_rport_info_s { | 35 | struct bfa_rport_info_s { |
| 36 | u16 max_frmsz; /* max rcv pdu size */ | 36 | u16 max_frmsz; /* max rcv pdu size */ |
| 37 | u32 pid : 24, /* remote port ID */ | 37 | u32 pid:24, /* remote port ID */ |
| 38 | lp_tag : 8; | 38 | lp_tag:8; |
| 39 | u32 local_pid : 24, /* local port ID */ | 39 | u32 local_pid:24, /* local port ID */ |
| 40 | cisc : 8; /* CIRO supported */ | 40 | cisc:8; /* CIRO supported */ |
| 41 | u8 fc_class; /* supported FC classes. enum fc_cos */ | 41 | u8 fc_class; /* supported FC classes. enum fc_cos */ |
| 42 | u8 vf_en; /* virtual fabric enable */ | 42 | u8 vf_en; /* virtual fabric enable */ |
| 43 | u16 vf_id; /* virtual fabric ID */ | 43 | u16 vf_id; /* virtual fabric ID */ |
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h index 6cadfe0d4ba1..7042c18e542d 100644 --- a/drivers/scsi/bfa/include/bfi/bfi.h +++ b/drivers/scsi/bfa/include/bfi/bfi.h | |||
| @@ -93,13 +93,13 @@ union bfi_addr_u { | |||
| 93 | */ | 93 | */ |
| 94 | struct bfi_sge_s { | 94 | struct bfi_sge_s { |
| 95 | #ifdef __BIGENDIAN | 95 | #ifdef __BIGENDIAN |
| 96 | u32 flags : 2, | 96 | u32 flags:2, |
| 97 | rsvd : 2, | 97 | rsvd:2, |
| 98 | sg_len : 28; | 98 | sg_len:28; |
| 99 | #else | 99 | #else |
| 100 | u32 sg_len : 28, | 100 | u32 sg_len:28, |
| 101 | rsvd : 2, | 101 | rsvd:2, |
| 102 | flags : 2; | 102 | flags:2; |
| 103 | #endif | 103 | #endif |
| 104 | union bfi_addr_u sga; | 104 | union bfi_addr_u sga; |
| 105 | }; | 105 | }; |
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h index 026e9c06ae97..96ef05670659 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h | |||
| @@ -142,7 +142,7 @@ enum { | |||
| 142 | BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ | 142 | BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | #define BFI_ADAPTER_GETP(__prop,__adap_prop) \ | 145 | #define BFI_ADAPTER_GETP(__prop, __adap_prop) \ |
| 146 | (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ | 146 | (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ |
| 147 | BFI_ADAPTER_ ## __prop ## _SH) | 147 | BFI_ADAPTER_ ## __prop ## _SH) |
| 148 | #define BFI_ADAPTER_SETP(__prop, __val) \ | 148 | #define BFI_ADAPTER_SETP(__prop, __val) \ |
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h index 414b0e30f6ef..c59d47badb4b 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_lps.h +++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h | |||
| @@ -55,8 +55,8 @@ struct bfi_lps_login_rsp_s { | |||
| 55 | u16 bb_credit; | 55 | u16 bb_credit; |
| 56 | u8 f_port; | 56 | u8 f_port; |
| 57 | u8 npiv_en; | 57 | u8 npiv_en; |
| 58 | u32 lp_pid : 24; | 58 | u32 lp_pid:24; |
| 59 | u32 auth_req : 8; | 59 | u32 auth_req:8; |
| 60 | mac_t lp_mac; | 60 | mac_t lp_mac; |
| 61 | mac_t fcf_mac; | 61 | mac_t fcf_mac; |
| 62 | u8 ext_status; | 62 | u8 ext_status; |
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h index 3520f55f09d7..e1cd83b56ec6 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_rport.h +++ b/drivers/scsi/bfa/include/bfi/bfi_rport.h | |||
| @@ -38,10 +38,10 @@ struct bfi_rport_create_req_s { | |||
| 38 | struct bfi_mhdr_s mh; /* common msg header */ | 38 | struct bfi_mhdr_s mh; /* common msg header */ |
| 39 | u16 bfa_handle; /* host rport handle */ | 39 | u16 bfa_handle; /* host rport handle */ |
| 40 | u16 max_frmsz; /* max rcv pdu size */ | 40 | u16 max_frmsz; /* max rcv pdu size */ |
| 41 | u32 pid : 24, /* remote port ID */ | 41 | u32 pid:24, /* remote port ID */ |
| 42 | lp_tag : 8; /* local port tag */ | 42 | lp_tag:8; /* local port tag */ |
| 43 | u32 local_pid : 24, /* local port ID */ | 43 | u32 local_pid:24, /* local port ID */ |
| 44 | cisc : 8; | 44 | cisc:8; |
| 45 | u8 fc_class; /* supported FC classes */ | 45 | u8 fc_class; /* supported FC classes */ |
| 46 | u8 vf_en; /* virtual fabric enable */ | 46 | u8 vf_en; /* virtual fabric enable */ |
| 47 | u16 vf_id; /* virtual fabric ID */ | 47 | u16 vf_id; /* virtual fabric ID */ |
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h index af8c1d533ba8..650f8d0aaff9 100644 --- a/drivers/scsi/bfa/include/cs/bfa_checksum.h +++ b/drivers/scsi/bfa/include/cs/bfa_checksum.h | |||
| @@ -31,7 +31,7 @@ bfa_checksum_u32(u32 *buf, int sz) | |||
| 31 | for (i = 0; i < m; i++) | 31 | for (i = 0; i < m; i++) |
| 32 | sum ^= buf[i]; | 32 | sum ^= buf[i]; |
| 33 | 33 | ||
| 34 | return (sum); | 34 | return sum; |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline u16 | 37 | static inline u16 |
| @@ -43,7 +43,7 @@ bfa_checksum_u16(u16 *buf, int sz) | |||
| 43 | for (i = 0; i < m; i++) | 43 | for (i = 0; i < m; i++) |
| 44 | sum ^= buf[i]; | 44 | sum ^= buf[i]; |
| 45 | 45 | ||
| 46 | return (sum); | 46 | return sum; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline u8 | 49 | static inline u8 |
| @@ -55,6 +55,6 @@ bfa_checksum_u8(u8 *buf, int sz) | |||
| 55 | for (i = 0; i < sz; i++) | 55 | for (i = 0; i < sz; i++) |
| 56 | sum ^= buf[i]; | 56 | sum ^= buf[i]; |
| 57 | 57 | ||
| 58 | return (sum); | 58 | return sum; |
| 59 | } | 59 | } |
| 60 | #endif | 60 | #endif |
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h index 9877066680a6..b0a92baf6657 100644 --- a/drivers/scsi/bfa/include/cs/bfa_sm.h +++ b/drivers/scsi/bfa/include/cs/bfa_sm.h | |||
| @@ -24,8 +24,8 @@ | |||
| 24 | 24 | ||
| 25 | typedef void (*bfa_sm_t)(void *sm, int event); | 25 | typedef void (*bfa_sm_t)(void *sm, int event); |
| 26 | 26 | ||
| 27 | #define bfa_sm_set_state(_sm, _state) (_sm)->sm = (bfa_sm_t)(_state) | 27 | #define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) |
| 28 | #define bfa_sm_send_event(_sm, _event) (_sm)->sm((_sm), (_event)) | 28 | #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) |
| 29 | #define bfa_sm_get_state(_sm) ((_sm)->sm) | 29 | #define bfa_sm_get_state(_sm) ((_sm)->sm) |
| 30 | #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) | 30 | #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) |
| 31 | 31 | ||
| @@ -62,7 +62,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event); | |||
| 62 | } while (0) | 62 | } while (0) |
| 63 | 63 | ||
| 64 | #define bfa_fsm_send_event(_fsm, _event) \ | 64 | #define bfa_fsm_send_event(_fsm, _event) \ |
| 65 | (_fsm)->fsm((_fsm), (_event)) | 65 | ((_fsm)->fsm((_fsm), (_event))) |
| 66 | #define bfa_fsm_cmp_state(_fsm, _state) \ | 66 | #define bfa_fsm_cmp_state(_fsm, _state) \ |
| 67 | ((_fsm)->fsm == (bfa_fsm_t)(_state)) | 67 | ((_fsm)->fsm == (bfa_fsm_t)(_state)) |
| 68 | 68 | ||
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h index 3e743928c74c..310771c888e7 100644 --- a/drivers/scsi/bfa/include/cs/bfa_trc.h +++ b/drivers/scsi/bfa/include/cs/bfa_trc.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #ifndef BFA_TRC_TS | 26 | #ifndef BFA_TRC_TS |
| 27 | #define BFA_TRC_TS(_trcm) ((_trcm)->ticks ++) | 27 | #define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) |
| 28 | #endif | 28 | #endif |
| 29 | 29 | ||
| 30 | struct bfa_trc_s { | 30 | struct bfa_trc_s { |
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h index a000bc4e2d4a..bf320412ee24 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h | |||
| @@ -61,7 +61,7 @@ enum bfa_pport_speed { | |||
| 61 | * Port operational type (in sync with SNIA port type). | 61 | * Port operational type (in sync with SNIA port type). |
| 62 | */ | 62 | */ |
| 63 | enum bfa_pport_type { | 63 | enum bfa_pport_type { |
| 64 | BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unkown */ | 64 | BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */ |
| 65 | BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */ | 65 | BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */ |
| 66 | BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */ | 66 | BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */ |
| 67 | BFA_PPORT_TYPE_NLPORT = 6, /* public loop */ | 67 | BFA_PPORT_TYPE_NLPORT = 6, /* public loop */ |
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h index 31881d218515..ade763dbc8ce 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | * Temperature sensor status values | 25 | * Temperature sensor status values |
| 26 | */ | 26 | */ |
| 27 | enum bfa_tsensor_status { | 27 | enum bfa_tsensor_status { |
| 28 | BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unkown status */ | 28 | BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */ |
| 29 | BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */ | 29 | BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */ |
| 30 | BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */ | 30 | BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */ |
| 31 | BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */ | 31 | BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */ |
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h index 4ffd2242d3de..08b79d5e46f3 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h | |||
| @@ -75,7 +75,7 @@ struct bfa_fcs_fabric_s { | |||
| 75 | */ | 75 | */ |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | #define bfa_fcs_fabric_npiv_capable(__f) (__f)->is_npiv | 78 | #define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) |
| 79 | #define bfa_fcs_fabric_is_switched(__f) \ | 79 | #define bfa_fcs_fabric_is_switched(__f) \ |
| 80 | ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) | 80 | ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) |
| 81 | 81 | ||
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h index b85cba884b96..967ceb0eb074 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h | |||
| @@ -125,12 +125,12 @@ union bfa_fcs_port_topo_u { | |||
| 125 | struct bfa_fcs_port_s { | 125 | struct bfa_fcs_port_s { |
| 126 | struct list_head qe; /* used by port/vport */ | 126 | struct list_head qe; /* used by port/vport */ |
| 127 | bfa_sm_t sm; /* state machine */ | 127 | bfa_sm_t sm; /* state machine */ |
| 128 | struct bfa_fcs_fabric_s *fabric; /* parent fabric */ | 128 | struct bfa_fcs_fabric_s *fabric;/* parent fabric */ |
| 129 | struct bfa_port_cfg_s port_cfg; /* port configuration */ | 129 | struct bfa_port_cfg_s port_cfg;/* port configuration */ |
| 130 | struct bfa_timer_s link_timer; /* timer for link offline */ | 130 | struct bfa_timer_s link_timer; /* timer for link offline */ |
| 131 | u32 pid : 24; /* FC address */ | 131 | u32 pid:24; /* FC address */ |
| 132 | u8 lp_tag; /* lport tag */ | 132 | u8 lp_tag; /* lport tag */ |
| 133 | u16 num_rports; /* Num of r-ports */ | 133 | u16 num_rports; /* Num of r-ports */ |
| 134 | struct list_head rport_q; /* queue of discovered r-ports */ | 134 | struct list_head rport_q; /* queue of discovered r-ports */ |
| 135 | struct bfa_fcs_s *fcs; /* FCS instance */ | 135 | struct bfa_fcs_s *fcs; /* FCS instance */ |
| 136 | union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */ | 136 | union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */ |
| @@ -188,13 +188,14 @@ bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port) | |||
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | 190 | ||
| 191 | #define bfa_fcs_port_get_opertype(_lport) (_lport)->fabric->oper_type | 191 | #define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type) |
| 192 | 192 | ||
| 193 | 193 | ||
| 194 | #define bfa_fcs_port_get_fabric_name(_lport) (_lport)->fabric->fabric_name | 194 | #define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name) |
| 195 | 195 | ||
| 196 | 196 | ||
| 197 | #define bfa_fcs_port_get_fabric_ipaddr(_lport) (_lport)->fabric->fabric_ip_addr | 197 | #define bfa_fcs_port_get_fabric_ipaddr(_lport) \ |
| 198 | ((_lport)->fabric->fabric_ip_addr) | ||
| 198 | 199 | ||
| 199 | /** | 200 | /** |
| 200 | * bfa fcs port public functions | 201 | * bfa fcs port public functions |
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h index c59d6630b070..b82540a230c4 100644 --- a/drivers/scsi/bfa/include/protocol/ct.h +++ b/drivers/scsi/bfa/include/protocol/ct.h | |||
| @@ -82,7 +82,7 @@ enum { | |||
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * defintions for CT reason code | 85 | * definitions for CT reason code |
| 86 | */ | 86 | */ |
| 87 | enum { | 87 | enum { |
| 88 | CT_RSN_INV_CMD = 0x01, | 88 | CT_RSN_INV_CMD = 0x01, |
| @@ -129,7 +129,7 @@ enum { | |||
| 129 | }; | 129 | }; |
| 130 | 130 | ||
| 131 | /* | 131 | /* |
| 132 | * defintions for the explanation code for all servers | 132 | * definitions for the explanation code for all servers |
| 133 | */ | 133 | */ |
| 134 | enum { | 134 | enum { |
| 135 | CT_EXP_AUTH_EXCEPTION = 0xF1, | 135 | CT_EXP_AUTH_EXCEPTION = 0xF1, |
| @@ -193,11 +193,11 @@ struct fcgs_rftid_req_s { | |||
| 193 | #define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 | 193 | #define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 |
| 194 | 194 | ||
| 195 | struct fcgs_rffid_req_s{ | 195 | struct fcgs_rffid_req_s{ |
| 196 | u32 rsvd :8; | 196 | u32 rsvd:8; |
| 197 | u32 dap :24; /* port identifier */ | 197 | u32 dap:24; /* port identifier */ |
| 198 | u32 rsvd1 :16; | 198 | u32 rsvd1:16; |
| 199 | u32 fc4ftr_bits :8; /* fc4 feature bits */ | 199 | u32 fc4ftr_bits:8; /* fc4 feature bits */ |
| 200 | u32 fc4_type :8; /* corresponding FC4 Type */ | 200 | u32 fc4_type:8; /* corresponding FC4 Type */ |
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | /** | 203 | /** |
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h index 3e39ba58cfb5..14969eecf6a9 100644 --- a/drivers/scsi/bfa/include/protocol/fc.h +++ b/drivers/scsi/bfa/include/protocol/fc.h | |||
| @@ -486,14 +486,14 @@ struct fc_rsi_s { | |||
| 486 | * see FC-PH-X table 113 & 115 for explanation also FCP table 8 | 486 | * see FC-PH-X table 113 & 115 for explanation also FCP table 8 |
| 487 | */ | 487 | */ |
| 488 | struct fc_prli_params_s{ | 488 | struct fc_prli_params_s{ |
| 489 | u32 reserved: 16; | 489 | u32 reserved:16; |
| 490 | #ifdef __BIGENDIAN | 490 | #ifdef __BIGENDIAN |
| 491 | u32 reserved1: 5; | 491 | u32 reserved1:5; |
| 492 | u32 rec_support : 1; | 492 | u32 rec_support:1; |
| 493 | u32 task_retry_id : 1; | 493 | u32 task_retry_id:1; |
| 494 | u32 retry : 1; | 494 | u32 retry:1; |
| 495 | 495 | ||
| 496 | u32 confirm : 1; | 496 | u32 confirm:1; |
| 497 | u32 doverlay:1; | 497 | u32 doverlay:1; |
| 498 | u32 initiator:1; | 498 | u32 initiator:1; |
| 499 | u32 target:1; | 499 | u32 target:1; |
| @@ -502,10 +502,10 @@ struct fc_prli_params_s{ | |||
| 502 | u32 rxrdisab:1; | 502 | u32 rxrdisab:1; |
| 503 | u32 wxrdisab:1; | 503 | u32 wxrdisab:1; |
| 504 | #else | 504 | #else |
| 505 | u32 retry : 1; | 505 | u32 retry:1; |
| 506 | u32 task_retry_id : 1; | 506 | u32 task_retry_id:1; |
| 507 | u32 rec_support : 1; | 507 | u32 rec_support:1; |
| 508 | u32 reserved1: 5; | 508 | u32 reserved1:5; |
| 509 | 509 | ||
| 510 | u32 wxrdisab:1; | 510 | u32 wxrdisab:1; |
| 511 | u32 rxrdisab:1; | 511 | u32 rxrdisab:1; |
| @@ -514,7 +514,7 @@ struct fc_prli_params_s{ | |||
| 514 | u32 target:1; | 514 | u32 target:1; |
| 515 | u32 initiator:1; | 515 | u32 initiator:1; |
| 516 | u32 doverlay:1; | 516 | u32 doverlay:1; |
| 517 | u32 confirm : 1; | 517 | u32 confirm:1; |
| 518 | #endif | 518 | #endif |
| 519 | }; | 519 | }; |
| 520 | 520 | ||
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c index a418dedebe9e..f7c7f4f3c640 100644 --- a/drivers/scsi/bfa/loop.c +++ b/drivers/scsi/bfa/loop.c | |||
| @@ -58,49 +58,16 @@ static const u8 port_loop_alpa_map[] = { | |||
| 58 | /* | 58 | /* |
| 59 | * Local Functions | 59 | * Local Functions |
| 60 | */ | 60 | */ |
| 61 | bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, | 61 | static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, |
| 62 | u8 alpa); | 62 | u8 alpa); |
| 63 | 63 | ||
| 64 | void bfa_fcs_port_loop_plogi_response(void *fcsarg, | 64 | static void bfa_fcs_port_loop_plogi_response(void *fcsarg, |
| 65 | struct bfa_fcxp_s *fcxp, | 65 | struct bfa_fcxp_s *fcxp, |
| 66 | void *cbarg, | 66 | void *cbarg, |
| 67 | bfa_status_t req_status, | 67 | bfa_status_t req_status, |
| 68 | u32 rsp_len, | 68 | u32 rsp_len, |
| 69 | u32 resid_len, | 69 | u32 resid_len, |
| 70 | struct fchs_s *rsp_fchs); | 70 | struct fchs_s *rsp_fchs); |
| 71 | |||
| 72 | bfa_status_t bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, | ||
| 73 | u8 alpa); | ||
| 74 | |||
| 75 | void bfa_fcs_port_loop_adisc_response(void *fcsarg, | ||
| 76 | struct bfa_fcxp_s *fcxp, | ||
| 77 | void *cbarg, | ||
| 78 | bfa_status_t req_status, | ||
| 79 | u32 rsp_len, | ||
| 80 | u32 resid_len, | ||
| 81 | struct fchs_s *rsp_fchs); | ||
| 82 | |||
| 83 | bfa_status_t bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, | ||
| 84 | u8 alpa); | ||
| 85 | |||
| 86 | void bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, | ||
| 87 | struct bfa_fcxp_s *fcxp, | ||
| 88 | void *cbarg, | ||
| 89 | bfa_status_t req_status, | ||
| 90 | u32 rsp_len, | ||
| 91 | u32 resid_len, | ||
| 92 | struct fchs_s *rsp_fchs); | ||
| 93 | |||
| 94 | bfa_status_t bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, | ||
| 95 | u8 alpa); | ||
| 96 | |||
| 97 | void bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, | ||
| 98 | struct bfa_fcxp_s *fcxp, | ||
| 99 | void *cbarg, | ||
| 100 | bfa_status_t req_status, | ||
| 101 | u32 rsp_len, | ||
| 102 | u32 resid_len, | ||
| 103 | struct fchs_s *rsp_fchs); | ||
| 104 | /** | 71 | /** |
| 105 | * Called by port to initializar in provate LOOP topology. | 72 | * Called by port to initializar in provate LOOP topology. |
| 106 | */ | 73 | */ |
| @@ -179,7 +146,7 @@ bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port) | |||
| 179 | /** | 146 | /** |
| 180 | * Local Functions. | 147 | * Local Functions. |
| 181 | */ | 148 | */ |
| 182 | bfa_status_t | 149 | static bfa_status_t |
| 183 | bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) | 150 | bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) |
| 184 | { | 151 | { |
| 185 | struct fchs_s fchs; | 152 | struct fchs_s fchs; |
| @@ -208,7 +175,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) | |||
| 208 | /** | 175 | /** |
| 209 | * Called by fcxp to notify the Plogi response | 176 | * Called by fcxp to notify the Plogi response |
| 210 | */ | 177 | */ |
| 211 | void | 178 | static void |
| 212 | bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, | 179 | bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, |
| 213 | void *cbarg, bfa_status_t req_status, | 180 | void *cbarg, bfa_status_t req_status, |
| 214 | u32 rsp_len, u32 resid_len, | 181 | u32 rsp_len, u32 resid_len, |
| @@ -244,179 +211,3 @@ bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, | |||
| 244 | bfa_assert(0); | 211 | bfa_assert(0); |
| 245 | } | 212 | } |
| 246 | } | 213 | } |
| 247 | |||
| 248 | bfa_status_t | ||
| 249 | bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, u8 alpa) | ||
| 250 | { | ||
| 251 | struct fchs_s fchs; | ||
| 252 | struct bfa_fcxp_s *fcxp; | ||
| 253 | int len; | ||
| 254 | |||
| 255 | bfa_trc(port->fcs, alpa); | ||
| 256 | |||
| 257 | fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, | ||
| 258 | NULL); | ||
| 259 | bfa_assert(fcxp); | ||
| 260 | |||
| 261 | len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, | ||
| 262 | bfa_fcs_port_get_fcid(port), 0, | ||
| 263 | port->port_cfg.pwwn, port->port_cfg.nwwn, | ||
| 264 | bfa_pport_get_maxfrsize(port->fcs->bfa)); | ||
| 265 | |||
| 266 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, | ||
| 267 | FC_CLASS_3, len, &fchs, | ||
| 268 | bfa_fcs_port_loop_plogi_acc_response, | ||
| 269 | (void *)port, FC_MAX_PDUSZ, 0); /* No response | ||
| 270 | * expected | ||
| 271 | */ | ||
| 272 | |||
| 273 | return BFA_STATUS_OK; | ||
| 274 | } | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Plogi Acc Response | ||
| 278 | * We donot do any processing here. | ||
| 279 | */ | ||
| 280 | void | ||
| 281 | bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, | ||
| 282 | void *cbarg, bfa_status_t req_status, | ||
| 283 | u32 rsp_len, u32 resid_len, | ||
| 284 | struct fchs_s *rsp_fchs) | ||
| 285 | { | ||
| 286 | |||
| 287 | struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; | ||
| 288 | |||
| 289 | bfa_trc(port->fcs, port->pid); | ||
| 290 | |||
| 291 | /* | ||
| 292 | * Sanity Checks | ||
| 293 | */ | ||
| 294 | if (req_status != BFA_STATUS_OK) { | ||
| 295 | bfa_trc(port->fcs, req_status); | ||
| 296 | return; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 300 | bfa_status_t | ||
| 301 | bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, u8 alpa) | ||
| 302 | { | ||
| 303 | struct fchs_s fchs; | ||
| 304 | struct bfa_fcxp_s *fcxp; | ||
| 305 | int len; | ||
| 306 | |||
| 307 | bfa_trc(port->fcs, alpa); | ||
| 308 | |||
| 309 | fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, | ||
| 310 | NULL); | ||
| 311 | bfa_assert(fcxp); | ||
| 312 | |||
| 313 | len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, | ||
| 314 | bfa_fcs_port_get_fcid(port), 0, | ||
| 315 | port->port_cfg.pwwn, port->port_cfg.nwwn); | ||
| 316 | |||
| 317 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, | ||
| 318 | FC_CLASS_3, len, &fchs, | ||
| 319 | bfa_fcs_port_loop_adisc_response, (void *)port, | ||
| 320 | FC_MAX_PDUSZ, FC_RA_TOV); | ||
| 321 | |||
| 322 | return BFA_STATUS_OK; | ||
| 323 | } | ||
| 324 | |||
| 325 | /** | ||
| 326 | * Called by fcxp to notify the ADISC response | ||
| 327 | */ | ||
| 328 | void | ||
| 329 | bfa_fcs_port_loop_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, | ||
| 330 | void *cbarg, bfa_status_t req_status, | ||
| 331 | u32 rsp_len, u32 resid_len, | ||
| 332 | struct fchs_s *rsp_fchs) | ||
| 333 | { | ||
| 334 | struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; | ||
| 335 | struct bfa_fcs_rport_s *rport; | ||
| 336 | struct fc_adisc_s *adisc_resp; | ||
| 337 | struct fc_els_cmd_s *els_cmd; | ||
| 338 | u32 pid = rsp_fchs->s_id; | ||
| 339 | |||
| 340 | bfa_trc(port->fcs, req_status); | ||
| 341 | |||
| 342 | /* | ||
| 343 | * Sanity Checks | ||
| 344 | */ | ||
| 345 | if (req_status != BFA_STATUS_OK) { | ||
| 346 | /* | ||
| 347 | * TBD : we may need to retry certain requests | ||
| 348 | */ | ||
| 349 | bfa_fcxp_free(fcxp); | ||
| 350 | return; | ||
| 351 | } | ||
| 352 | |||
| 353 | els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); | ||
| 354 | adisc_resp = (struct fc_adisc_s *) els_cmd; | ||
| 355 | |||
| 356 | if (els_cmd->els_code == FC_ELS_ACC) { | ||
| 357 | } else { | ||
| 358 | bfa_trc(port->fcs, adisc_resp->els_cmd.els_code); | ||
| 359 | |||
| 360 | /* | ||
| 361 | * TBD: we may need to check for reject codes and retry | ||
| 362 | */ | ||
| 363 | rport = bfa_fcs_port_get_rport_by_pid(port, pid); | ||
| 364 | if (rport) { | ||
| 365 | list_del(&rport->qe); | ||
| 366 | bfa_fcs_rport_delete(rport); | ||
| 367 | } | ||
| 368 | |||
| 369 | } | ||
| 370 | return; | ||
| 371 | } | ||
| 372 | |||
| 373 | bfa_status_t | ||
| 374 | bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, u8 alpa) | ||
| 375 | { | ||
| 376 | struct fchs_s fchs; | ||
| 377 | struct bfa_fcxp_s *fcxp; | ||
| 378 | int len; | ||
| 379 | |||
| 380 | bfa_trc(port->fcs, alpa); | ||
| 381 | |||
| 382 | fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, | ||
| 383 | NULL); | ||
| 384 | bfa_assert(fcxp); | ||
| 385 | |||
| 386 | len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, | ||
| 387 | bfa_fcs_port_get_fcid(port), 0, | ||
| 388 | port->port_cfg.pwwn, port->port_cfg.nwwn); | ||
| 389 | |||
| 390 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, | ||
| 391 | FC_CLASS_3, len, &fchs, | ||
| 392 | bfa_fcs_port_loop_adisc_acc_response, | ||
| 393 | (void *)port, FC_MAX_PDUSZ, 0); /* no reponse | ||
| 394 | * expected | ||
| 395 | */ | ||
| 396 | |||
| 397 | return BFA_STATUS_OK; | ||
| 398 | } | ||
| 399 | |||
| 400 | /* | ||
| 401 | * Adisc Acc Response | ||
| 402 | * We donot do any processing here. | ||
| 403 | */ | ||
| 404 | void | ||
| 405 | bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, | ||
| 406 | void *cbarg, bfa_status_t req_status, | ||
| 407 | u32 rsp_len, u32 resid_len, | ||
| 408 | struct fchs_s *rsp_fchs) | ||
| 409 | { | ||
| 410 | |||
| 411 | struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; | ||
| 412 | |||
| 413 | bfa_trc(port->fcs, port->pid); | ||
| 414 | |||
| 415 | /* | ||
| 416 | * Sanity Checks | ||
| 417 | */ | ||
| 418 | if (req_status != BFA_STATUS_OK) { | ||
| 419 | bfa_trc(port->fcs, req_status); | ||
| 420 | return; | ||
| 421 | } | ||
| 422 | } | ||
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c index 8f51a83f1834..1e06792cd4c2 100644 --- a/drivers/scsi/bfa/lport_api.c +++ b/drivers/scsi/bfa/lport_api.c | |||
| @@ -43,7 +43,7 @@ bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg) | |||
| 43 | struct bfa_fcs_port_s * | 43 | struct bfa_fcs_port_s * |
| 44 | bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) | 44 | bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) |
| 45 | { | 45 | { |
| 46 | return (&fcs->fabric.bport); | 46 | return &fcs->fabric.bport; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | wwn_t | 49 | wwn_t |
| @@ -88,11 +88,10 @@ bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index, | |||
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | bfa_trc(fcs, i); | 90 | bfa_trc(fcs, i); |
| 91 | if (rport) { | 91 | if (rport) |
| 92 | return rport->pwwn; | 92 | return rport->pwwn; |
| 93 | } else { | 93 | else |
| 94 | return (wwn_t) 0; | 94 | return (wwn_t) 0; |
| 95 | } | ||
| 96 | } | 95 | } |
| 97 | 96 | ||
| 98 | void | 97 | void |
| @@ -198,17 +197,17 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) | |||
| 198 | vf = bfa_fcs_vf_lookup(fcs, vf_id); | 197 | vf = bfa_fcs_vf_lookup(fcs, vf_id); |
| 199 | if (vf == NULL) { | 198 | if (vf == NULL) { |
| 200 | bfa_trc(fcs, vf_id); | 199 | bfa_trc(fcs, vf_id); |
| 201 | return (NULL); | 200 | return NULL; |
| 202 | } | 201 | } |
| 203 | 202 | ||
| 204 | if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) | 203 | if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) |
| 205 | return (&vf->bport); | 204 | return &vf->bport; |
| 206 | 205 | ||
| 207 | vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); | 206 | vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); |
| 208 | if (vport) | 207 | if (vport) |
| 209 | return (&vport->lport); | 208 | return &vport->lport; |
| 210 | 209 | ||
| 211 | return (NULL); | 210 | return NULL; |
| 212 | } | 211 | } |
| 213 | 212 | ||
| 214 | /* | 213 | /* |
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c index 59fea99d67a4..2f8b880060bb 100644 --- a/drivers/scsi/bfa/ns.c +++ b/drivers/scsi/bfa/ns.c | |||
| @@ -932,11 +932,10 @@ bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) | |||
| 932 | } | 932 | } |
| 933 | ns->fcxp = fcxp; | 933 | ns->fcxp = fcxp; |
| 934 | 934 | ||
| 935 | if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { | 935 | if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) |
| 936 | fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; | 936 | fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; |
| 937 | } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) { | 937 | else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) |
| 938 | fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET; | 938 | fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET; |
| 939 | } | ||
| 940 | 939 | ||
| 941 | len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 940 | len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
| 942 | bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP, | 941 | bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP, |
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c index 86af818d17bb..fcb8864d3276 100644 --- a/drivers/scsi/bfa/plog.c +++ b/drivers/scsi/bfa/plog.c | |||
| @@ -180,5 +180,5 @@ bfa_plog_disable(struct bfa_plog_s *plog) | |||
| 180 | bfa_boolean_t | 180 | bfa_boolean_t |
| 181 | bfa_plog_get_setting(struct bfa_plog_s *plog) | 181 | bfa_plog_get_setting(struct bfa_plog_s *plog) |
| 182 | { | 182 | { |
| 183 | return((bfa_boolean_t)plog->plog_enabled); | 183 | return (bfa_boolean_t)plog->plog_enabled; |
| 184 | } | 184 | } |
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c index 8a1f59d596c1..e1932c885ac2 100644 --- a/drivers/scsi/bfa/rport_ftrs.c +++ b/drivers/scsi/bfa/rport_ftrs.c | |||
| @@ -79,7 +79,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 79 | bfa_trc(rport->fcs, event); | 79 | bfa_trc(rport->fcs, event); |
| 80 | 80 | ||
| 81 | switch (event) { | 81 | switch (event) { |
| 82 | case RPFSM_EVENT_RPORT_ONLINE : | 82 | case RPFSM_EVENT_RPORT_ONLINE: |
| 83 | if (!BFA_FCS_PID_IS_WKA(rport->pid)) { | 83 | if (!BFA_FCS_PID_IS_WKA(rport->pid)) { |
| 84 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); | 84 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); |
| 85 | rpf->rpsc_retries = 0; | 85 | rpf->rpsc_retries = 0; |
| @@ -87,7 +87,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 87 | break; | 87 | break; |
| 88 | }; | 88 | }; |
| 89 | 89 | ||
| 90 | case RPFSM_EVENT_RPORT_OFFLINE : | 90 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 91 | break; | 91 | break; |
| 92 | 92 | ||
| 93 | default: | 93 | default: |
| @@ -107,7 +107,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 107 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); | 107 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); |
| 108 | break; | 108 | break; |
| 109 | 109 | ||
| 110 | case RPFSM_EVENT_RPORT_OFFLINE : | 110 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 111 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); | 111 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); |
| 112 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); | 112 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); |
| 113 | rpf->rpsc_retries = 0; | 113 | rpf->rpsc_retries = 0; |
| @@ -130,11 +130,10 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 130 | case RPFSM_EVENT_RPSC_COMP: | 130 | case RPFSM_EVENT_RPSC_COMP: |
| 131 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); | 131 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); |
| 132 | /* Update speed info in f/w via BFA */ | 132 | /* Update speed info in f/w via BFA */ |
| 133 | if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) { | 133 | if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) |
| 134 | bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); | 134 | bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); |
| 135 | } else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) { | 135 | else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) |
| 136 | bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); | 136 | bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); |
| 137 | } | ||
| 138 | break; | 137 | break; |
| 139 | 138 | ||
| 140 | case RPFSM_EVENT_RPSC_FAIL: | 139 | case RPFSM_EVENT_RPSC_FAIL: |
| @@ -154,7 +153,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 154 | } | 153 | } |
| 155 | break; | 154 | break; |
| 156 | 155 | ||
| 157 | case RPFSM_EVENT_RPORT_OFFLINE : | 156 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 158 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); | 157 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); |
| 159 | bfa_fcxp_discard(rpf->fcxp); | 158 | bfa_fcxp_discard(rpf->fcxp); |
| 160 | rpf->rpsc_retries = 0; | 159 | rpf->rpsc_retries = 0; |
| @@ -174,13 +173,13 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 174 | bfa_trc(rport->fcs, event); | 173 | bfa_trc(rport->fcs, event); |
| 175 | 174 | ||
| 176 | switch (event) { | 175 | switch (event) { |
| 177 | case RPFSM_EVENT_TIMEOUT : | 176 | case RPFSM_EVENT_TIMEOUT: |
| 178 | /* re-send the RPSC */ | 177 | /* re-send the RPSC */ |
| 179 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); | 178 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); |
| 180 | bfa_fcs_rpf_send_rpsc2(rpf, NULL); | 179 | bfa_fcs_rpf_send_rpsc2(rpf, NULL); |
| 181 | break; | 180 | break; |
| 182 | 181 | ||
| 183 | case RPFSM_EVENT_RPORT_OFFLINE : | 182 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 184 | bfa_timer_stop(&rpf->timer); | 183 | bfa_timer_stop(&rpf->timer); |
| 185 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); | 184 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); |
| 186 | rpf->rpsc_retries = 0; | 185 | rpf->rpsc_retries = 0; |
| @@ -201,7 +200,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 201 | bfa_trc(rport->fcs, event); | 200 | bfa_trc(rport->fcs, event); |
| 202 | 201 | ||
| 203 | switch (event) { | 202 | switch (event) { |
| 204 | case RPFSM_EVENT_RPORT_OFFLINE : | 203 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 205 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); | 204 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); |
| 206 | rpf->rpsc_retries = 0; | 205 | rpf->rpsc_retries = 0; |
| 207 | break; | 206 | break; |
| @@ -221,12 +220,12 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
| 221 | bfa_trc(rport->fcs, event); | 220 | bfa_trc(rport->fcs, event); |
| 222 | 221 | ||
| 223 | switch (event) { | 222 | switch (event) { |
| 224 | case RPFSM_EVENT_RPORT_ONLINE : | 223 | case RPFSM_EVENT_RPORT_ONLINE: |
| 225 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); | 224 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); |
| 226 | bfa_fcs_rpf_send_rpsc2(rpf, NULL); | 225 | bfa_fcs_rpf_send_rpsc2(rpf, NULL); |
| 227 | break; | 226 | break; |
| 228 | 227 | ||
| 229 | case RPFSM_EVENT_RPORT_OFFLINE : | 228 | case RPFSM_EVENT_RPORT_OFFLINE: |
| 230 | break; | 229 | break; |
| 231 | 230 | ||
| 232 | default: | 231 | default: |
| @@ -366,10 +365,9 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, | |||
| 366 | bfa_trc(rport->fcs, ls_rjt->reason_code); | 365 | bfa_trc(rport->fcs, ls_rjt->reason_code); |
| 367 | bfa_trc(rport->fcs, ls_rjt->reason_code_expl); | 366 | bfa_trc(rport->fcs, ls_rjt->reason_code_expl); |
| 368 | rport->stats.rpsc_rejects++; | 367 | rport->stats.rpsc_rejects++; |
| 369 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { | 368 | if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) |
| 370 | bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); | 369 | bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); |
| 371 | } else { | 370 | else |
| 372 | bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); | 371 | bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); |
| 373 | } | ||
| 374 | } | 372 | } |
| 375 | } | 373 | } |
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c index 31d81fe2fc48..391a4790bebd 100644 --- a/drivers/scsi/bfa/vfapi.c +++ b/drivers/scsi/bfa/vfapi.c | |||
| @@ -189,7 +189,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) | |||
| 189 | { | 189 | { |
| 190 | bfa_trc(fcs, vf_id); | 190 | bfa_trc(fcs, vf_id); |
| 191 | if (vf_id == FC_VF_ID_NULL) | 191 | if (vf_id == FC_VF_ID_NULL) |
| 192 | return (&fcs->fabric); | 192 | return &fcs->fabric; |
| 193 | 193 | ||
| 194 | /** | 194 | /** |
| 195 | * @todo vf support | 195 | * @todo vf support |
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index c10af06c5714..e90f1e38c32d 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c | |||
| @@ -31,13 +31,13 @@ | |||
| 31 | 31 | ||
| 32 | BFA_TRC_FILE(FCS, VPORT); | 32 | BFA_TRC_FILE(FCS, VPORT); |
| 33 | 33 | ||
| 34 | #define __vport_fcs(__vp) (__vp)->lport.fcs | 34 | #define __vport_fcs(__vp) ((__vp)->lport.fcs) |
| 35 | #define __vport_pwwn(__vp) (__vp)->lport.port_cfg.pwwn | 35 | #define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) |
| 36 | #define __vport_nwwn(__vp) (__vp)->lport.port_cfg.nwwn | 36 | #define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) |
| 37 | #define __vport_bfa(__vp) (__vp)->lport.fcs->bfa | 37 | #define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) |
| 38 | #define __vport_fcid(__vp) (__vp)->lport.pid | 38 | #define __vport_fcid(__vp) ((__vp)->lport.pid) |
| 39 | #define __vport_fabric(__vp) (__vp)->lport.fabric | 39 | #define __vport_fabric(__vp) ((__vp)->lport.fabric) |
| 40 | #define __vport_vfid(__vp) (__vp)->lport.fabric->vf_id | 40 | #define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) |
| 41 | 41 | ||
| 42 | #define BFA_FCS_VPORT_MAX_RETRIES 5 | 42 | #define BFA_FCS_VPORT_MAX_RETRIES 5 |
| 43 | /* | 43 | /* |
| @@ -641,9 +641,9 @@ bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs) | |||
| 641 | bfa_get_attr(fcs->bfa, &ioc_attr); | 641 | bfa_get_attr(fcs->bfa, &ioc_attr); |
| 642 | 642 | ||
| 643 | if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) | 643 | if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) |
| 644 | return (BFA_FCS_MAX_VPORTS_SUPP_CT); | 644 | return BFA_FCS_MAX_VPORTS_SUPP_CT; |
| 645 | else | 645 | else |
| 646 | return (BFA_FCS_MAX_VPORTS_SUPP_CB); | 646 | return BFA_FCS_MAX_VPORTS_SUPP_CB; |
| 647 | } | 647 | } |
| 648 | 648 | ||
| 649 | 649 | ||
| @@ -675,7 +675,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, | |||
| 675 | struct bfad_vport_s *vport_drv) | 675 | struct bfad_vport_s *vport_drv) |
| 676 | { | 676 | { |
| 677 | if (vport_cfg->pwwn == 0) | 677 | if (vport_cfg->pwwn == 0) |
| 678 | return (BFA_STATUS_INVALID_WWN); | 678 | return BFA_STATUS_INVALID_WWN; |
| 679 | 679 | ||
| 680 | if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) | 680 | if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) |
| 681 | return BFA_STATUS_VPORT_WWN_BP; | 681 | return BFA_STATUS_VPORT_WWN_BP; |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 5edde1a8c04d..2b973f3c2eb2 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
| @@ -232,7 +232,6 @@ struct bnx2i_conn { | |||
| 232 | struct iscsi_cls_conn *cls_conn; | 232 | struct iscsi_cls_conn *cls_conn; |
| 233 | struct bnx2i_hba *hba; | 233 | struct bnx2i_hba *hba; |
| 234 | struct completion cmd_cleanup_cmpl; | 234 | struct completion cmd_cleanup_cmpl; |
| 235 | int is_bound; | ||
| 236 | 235 | ||
| 237 | u32 iscsi_conn_cid; | 236 | u32 iscsi_conn_cid; |
| 238 | #define BNX2I_CID_RESERVED 0x5AFF | 237 | #define BNX2I_CID_RESERVED 0x5AFF |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index cafb888c2376..132898c88d5e 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
| @@ -1161,9 +1161,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task) | |||
| 1161 | struct bnx2i_cmd *cmd = task->dd_data; | 1161 | struct bnx2i_cmd *cmd = task->dd_data; |
| 1162 | struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; | 1162 | struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; |
| 1163 | 1163 | ||
| 1164 | if (!bnx2i_conn->is_bound) | ||
| 1165 | return -ENOTCONN; | ||
| 1166 | |||
| 1167 | /* | 1164 | /* |
| 1168 | * If there is no scsi_cmnd this must be a mgmt task | 1165 | * If there is no scsi_cmnd this must be a mgmt task |
| 1169 | */ | 1166 | */ |
| @@ -1371,7 +1368,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 1371 | bnx2i_conn->ep = bnx2i_ep; | 1368 | bnx2i_conn->ep = bnx2i_ep; |
| 1372 | bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; | 1369 | bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; |
| 1373 | bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; | 1370 | bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; |
| 1374 | bnx2i_conn->is_bound = 1; | ||
| 1375 | 1371 | ||
| 1376 | ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, | 1372 | ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, |
| 1377 | bnx2i_ep->ep_iscsi_cid); | 1373 | bnx2i_ep->ep_iscsi_cid); |
| @@ -1883,7 +1879,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 1883 | 1879 | ||
| 1884 | bnx2i_ep = ep->dd_data; | 1880 | bnx2i_ep = ep->dd_data; |
| 1885 | 1881 | ||
| 1886 | /* driver should not attempt connection cleanup untill TCP_CONNECT | 1882 | /* driver should not attempt connection cleanup until TCP_CONNECT |
| 1887 | * completes either successfully or fails. Timeout is 9-secs, so | 1883 | * completes either successfully or fails. Timeout is 9-secs, so |
| 1888 | * wait for it to complete | 1884 | * wait for it to complete |
| 1889 | */ | 1885 | */ |
| @@ -1896,9 +1892,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 1896 | conn = bnx2i_conn->cls_conn->dd_data; | 1892 | conn = bnx2i_conn->cls_conn->dd_data; |
| 1897 | session = conn->session; | 1893 | session = conn->session; |
| 1898 | 1894 | ||
| 1899 | spin_lock_bh(&session->lock); | 1895 | iscsi_suspend_queue(conn); |
| 1900 | bnx2i_conn->is_bound = 0; | ||
| 1901 | spin_unlock_bh(&session->lock); | ||
| 1902 | } | 1896 | } |
| 1903 | 1897 | ||
| 1904 | hba = bnx2i_ep->hba; | 1898 | hba = bnx2i_ep->hba; |
| @@ -2034,7 +2028,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { | |||
| 2034 | ISCSI_USERNAME | ISCSI_PASSWORD | | 2028 | ISCSI_USERNAME | ISCSI_PASSWORD | |
| 2035 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 2029 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
| 2036 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 2030 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
| 2037 | ISCSI_LU_RESET_TMO | | 2031 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | |
| 2038 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | 2032 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
| 2039 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | 2033 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, |
| 2040 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, | 2034 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 63abb06c4edb..9129bcf117cf 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
| @@ -141,6 +141,7 @@ static const struct value_name_pair serv_out12_arr[] = { | |||
| 141 | static const struct value_name_pair serv_in16_arr[] = { | 141 | static const struct value_name_pair serv_in16_arr[] = { |
| 142 | {0x10, "Read capacity(16)"}, | 142 | {0x10, "Read capacity(16)"}, |
| 143 | {0x11, "Read long(16)"}, | 143 | {0x11, "Read long(16)"}, |
| 144 | {0x12, "Get LBA status"}, | ||
| 144 | }; | 145 | }; |
| 145 | #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) | 146 | #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) |
| 146 | 147 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 2631bddd255e..969c83162cc4 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | |||
| @@ -937,7 +937,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { | |||
| 937 | ISCSI_USERNAME | ISCSI_PASSWORD | | 937 | ISCSI_USERNAME | ISCSI_PASSWORD | |
| 938 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 938 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
| 939 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 939 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
| 940 | ISCSI_LU_RESET_TMO | | 940 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | |
| 941 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | 941 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
| 942 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | 942 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, |
| 943 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | 943 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 075e2397273c..6c59c02c1ed9 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
| @@ -1509,7 +1509,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, | |||
| 1509 | * Try anyway? | 1509 | * Try anyway? |
| 1510 | * | 1510 | * |
| 1511 | * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection | 1511 | * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection |
| 1512 | * Timeout, a Disconnect or a Reselction IRQ, so we would be screwed! | 1512 | * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed! |
| 1513 | * (This is likely to be a bug in the hardware. Obviously, most people | 1513 | * (This is likely to be a bug in the hardware. Obviously, most people |
| 1514 | * only have one initiator per SCSI bus.) | 1514 | * only have one initiator per SCSI bus.) |
| 1515 | * Instead let this fail and have the timer make sure the command is | 1515 | * Instead let this fail and have the timer make sure the command is |
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 3ee1cbc89479..e19a1a55270c 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c | |||
| @@ -226,7 +226,7 @@ store_dh_state(struct device *dev, struct device_attribute *attr, | |||
| 226 | * Activate a device handler | 226 | * Activate a device handler |
| 227 | */ | 227 | */ |
| 228 | if (scsi_dh->activate) | 228 | if (scsi_dh->activate) |
| 229 | err = scsi_dh->activate(sdev); | 229 | err = scsi_dh->activate(sdev, NULL, NULL); |
| 230 | else | 230 | else |
| 231 | err = 0; | 231 | err = 0; |
| 232 | } | 232 | } |
| @@ -304,18 +304,15 @@ static int scsi_dh_notifier(struct notifier_block *nb, | |||
| 304 | sdev = to_scsi_device(dev); | 304 | sdev = to_scsi_device(dev); |
| 305 | 305 | ||
| 306 | if (action == BUS_NOTIFY_ADD_DEVICE) { | 306 | if (action == BUS_NOTIFY_ADD_DEVICE) { |
| 307 | err = device_create_file(dev, &scsi_dh_state_attr); | ||
| 308 | /* don't care about err */ | ||
| 307 | devinfo = device_handler_match(NULL, sdev); | 309 | devinfo = device_handler_match(NULL, sdev); |
| 308 | if (!devinfo) | 310 | if (devinfo) |
| 309 | goto out; | 311 | err = scsi_dh_handler_attach(sdev, devinfo); |
| 310 | |||
| 311 | err = scsi_dh_handler_attach(sdev, devinfo); | ||
| 312 | if (!err) | ||
| 313 | err = device_create_file(dev, &scsi_dh_state_attr); | ||
| 314 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { | 312 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { |
| 315 | device_remove_file(dev, &scsi_dh_state_attr); | 313 | device_remove_file(dev, &scsi_dh_state_attr); |
| 316 | scsi_dh_handler_detach(sdev, NULL); | 314 | scsi_dh_handler_detach(sdev, NULL); |
| 317 | } | 315 | } |
| 318 | out: | ||
| 319 | return err; | 316 | return err; |
| 320 | } | 317 | } |
| 321 | 318 | ||
| @@ -423,10 +420,17 @@ EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); | |||
| 423 | /* | 420 | /* |
| 424 | * scsi_dh_activate - activate the path associated with the scsi_device | 421 | * scsi_dh_activate - activate the path associated with the scsi_device |
| 425 | * corresponding to the given request queue. | 422 | * corresponding to the given request queue. |
| 426 | * @q - Request queue that is associated with the scsi_device to be | 423 | * Returns immediately without waiting for activation to be completed. |
| 427 | * activated. | 424 | * @q - Request queue that is associated with the scsi_device to be |
| 425 | * activated. | ||
| 426 | * @fn - Function to be called upon completion of the activation. | ||
| 427 | * Function fn is called with data (below) and the error code. | ||
| 428 | * Function fn may be called from the same calling context. So, | ||
| 429 | * do not hold the lock in the caller which may be needed in fn. | ||
| 430 | * @data - data passed to the function fn upon completion. | ||
| 431 | * | ||
| 428 | */ | 432 | */ |
| 429 | int scsi_dh_activate(struct request_queue *q) | 433 | int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) |
| 430 | { | 434 | { |
| 431 | int err = 0; | 435 | int err = 0; |
| 432 | unsigned long flags; | 436 | unsigned long flags; |
| @@ -445,7 +449,7 @@ int scsi_dh_activate(struct request_queue *q) | |||
| 445 | return err; | 449 | return err; |
| 446 | 450 | ||
| 447 | if (scsi_dh->activate) | 451 | if (scsi_dh->activate) |
| 448 | err = scsi_dh->activate(sdev); | 452 | err = scsi_dh->activate(sdev, fn, data); |
| 449 | put_device(&sdev->sdev_gendev); | 453 | put_device(&sdev->sdev_gendev); |
| 450 | return err; | 454 | return err; |
| 451 | } | 455 | } |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index b5cdefaf2608..4f0d0138f48b 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
| @@ -60,11 +60,17 @@ struct alua_dh_data { | |||
| 60 | int bufflen; | 60 | int bufflen; |
| 61 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | 61 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; |
| 62 | int senselen; | 62 | int senselen; |
| 63 | struct scsi_device *sdev; | ||
| 64 | activate_complete callback_fn; | ||
| 65 | void *callback_data; | ||
| 63 | }; | 66 | }; |
| 64 | 67 | ||
| 65 | #define ALUA_POLICY_SWITCH_CURRENT 0 | 68 | #define ALUA_POLICY_SWITCH_CURRENT 0 |
| 66 | #define ALUA_POLICY_SWITCH_ALL 1 | 69 | #define ALUA_POLICY_SWITCH_ALL 1 |
| 67 | 70 | ||
| 71 | static char print_alua_state(int); | ||
| 72 | static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); | ||
| 73 | |||
| 68 | static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) | 74 | static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) |
| 69 | { | 75 | { |
| 70 | struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; | 76 | struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; |
| @@ -231,18 +237,71 @@ done: | |||
| 231 | } | 237 | } |
| 232 | 238 | ||
| 233 | /* | 239 | /* |
| 240 | * alua_stpg - Evaluate SET TARGET GROUP STATES | ||
| 241 | * @sdev: the device to be evaluated | ||
| 242 | * @state: the new target group state | ||
| 243 | * | ||
| 244 | * Send a SET TARGET GROUP STATES command to the device. | ||
| 245 | * We only have to test here if we should resubmit the command; | ||
| 246 | * any other error is assumed as a failure. | ||
| 247 | */ | ||
| 248 | static void stpg_endio(struct request *req, int error) | ||
| 249 | { | ||
| 250 | struct alua_dh_data *h = req->end_io_data; | ||
| 251 | struct scsi_sense_hdr sense_hdr; | ||
| 252 | unsigned err = SCSI_DH_IO; | ||
| 253 | |||
| 254 | if (error || host_byte(req->errors) != DID_OK || | ||
| 255 | msg_byte(req->errors) != COMMAND_COMPLETE) | ||
| 256 | goto done; | ||
| 257 | |||
| 258 | if (err == SCSI_DH_IO && h->senselen > 0) { | ||
| 259 | err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, | ||
| 260 | &sense_hdr); | ||
| 261 | if (!err) { | ||
| 262 | err = SCSI_DH_IO; | ||
| 263 | goto done; | ||
| 264 | } | ||
| 265 | err = alua_check_sense(h->sdev, &sense_hdr); | ||
| 266 | if (err == ADD_TO_MLQUEUE) { | ||
| 267 | err = SCSI_DH_RETRY; | ||
| 268 | goto done; | ||
| 269 | } | ||
| 270 | sdev_printk(KERN_INFO, h->sdev, | ||
| 271 | "%s: stpg sense code: %02x/%02x/%02x\n", | ||
| 272 | ALUA_DH_NAME, sense_hdr.sense_key, | ||
| 273 | sense_hdr.asc, sense_hdr.ascq); | ||
| 274 | err = SCSI_DH_IO; | ||
| 275 | } | ||
| 276 | if (err == SCSI_DH_OK) { | ||
| 277 | h->state = TPGS_STATE_OPTIMIZED; | ||
| 278 | sdev_printk(KERN_INFO, h->sdev, | ||
| 279 | "%s: port group %02x switched to state %c\n", | ||
| 280 | ALUA_DH_NAME, h->group_id, | ||
| 281 | print_alua_state(h->state)); | ||
| 282 | } | ||
| 283 | done: | ||
| 284 | blk_put_request(req); | ||
| 285 | if (h->callback_fn) { | ||
| 286 | h->callback_fn(h->callback_data, err); | ||
| 287 | h->callback_fn = h->callback_data = NULL; | ||
| 288 | } | ||
| 289 | return; | ||
| 290 | } | ||
| 291 | |||
| 292 | /* | ||
| 234 | * submit_stpg - Issue a SET TARGET GROUP STATES command | 293 | * submit_stpg - Issue a SET TARGET GROUP STATES command |
| 235 | * @sdev: sdev the command should be sent to | ||
| 236 | * | 294 | * |
| 237 | * Currently we're only setting the current target port group state | 295 | * Currently we're only setting the current target port group state |
| 238 | * to 'active/optimized' and let the array firmware figure out | 296 | * to 'active/optimized' and let the array firmware figure out |
| 239 | * the states of the remaining groups. | 297 | * the states of the remaining groups. |
| 240 | */ | 298 | */ |
| 241 | static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) | 299 | static unsigned submit_stpg(struct alua_dh_data *h) |
| 242 | { | 300 | { |
| 243 | struct request *rq; | 301 | struct request *rq; |
| 244 | int err = SCSI_DH_RES_TEMP_UNAVAIL; | 302 | int err = SCSI_DH_RES_TEMP_UNAVAIL; |
| 245 | int stpg_len = 8; | 303 | int stpg_len = 8; |
| 304 | struct scsi_device *sdev = h->sdev; | ||
| 246 | 305 | ||
| 247 | /* Prepare the data buffer */ | 306 | /* Prepare the data buffer */ |
| 248 | memset(h->buff, 0, stpg_len); | 307 | memset(h->buff, 0, stpg_len); |
| @@ -252,7 +311,7 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) | |||
| 252 | 311 | ||
| 253 | rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); | 312 | rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); |
| 254 | if (!rq) | 313 | if (!rq) |
| 255 | goto done; | 314 | return SCSI_DH_RES_TEMP_UNAVAIL; |
| 256 | 315 | ||
| 257 | /* Prepare the command. */ | 316 | /* Prepare the command. */ |
| 258 | rq->cmd[0] = MAINTENANCE_OUT; | 317 | rq->cmd[0] = MAINTENANCE_OUT; |
| @@ -266,17 +325,9 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) | |||
| 266 | rq->sense = h->sense; | 325 | rq->sense = h->sense; |
| 267 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | 326 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); |
| 268 | rq->sense_len = h->senselen = 0; | 327 | rq->sense_len = h->senselen = 0; |
| 328 | rq->end_io_data = h; | ||
| 269 | 329 | ||
| 270 | err = blk_execute_rq(rq->q, NULL, rq, 1); | 330 | blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); |
| 271 | if (err == -EIO) { | ||
| 272 | sdev_printk(KERN_INFO, sdev, | ||
| 273 | "%s: stpg failed with %x\n", | ||
| 274 | ALUA_DH_NAME, rq->errors); | ||
| 275 | h->senselen = rq->sense_len; | ||
| 276 | err = SCSI_DH_IO; | ||
| 277 | } | ||
| 278 | blk_put_request(rq); | ||
| 279 | done: | ||
| 280 | return err; | 331 | return err; |
| 281 | } | 332 | } |
| 282 | 333 | ||
| @@ -477,50 +528,6 @@ static int alua_check_sense(struct scsi_device *sdev, | |||
| 477 | } | 528 | } |
| 478 | 529 | ||
| 479 | /* | 530 | /* |
| 480 | * alua_stpg - Evaluate SET TARGET GROUP STATES | ||
| 481 | * @sdev: the device to be evaluated | ||
| 482 | * @state: the new target group state | ||
| 483 | * | ||
| 484 | * Send a SET TARGET GROUP STATES command to the device. | ||
| 485 | * We only have to test here if we should resubmit the command; | ||
| 486 | * any other error is assumed as a failure. | ||
| 487 | */ | ||
| 488 | static int alua_stpg(struct scsi_device *sdev, int state, | ||
| 489 | struct alua_dh_data *h) | ||
| 490 | { | ||
| 491 | struct scsi_sense_hdr sense_hdr; | ||
| 492 | unsigned err; | ||
| 493 | int retry = ALUA_FAILOVER_RETRIES; | ||
| 494 | |||
| 495 | retry: | ||
| 496 | err = submit_stpg(sdev, h); | ||
| 497 | if (err == SCSI_DH_IO && h->senselen > 0) { | ||
| 498 | err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, | ||
| 499 | &sense_hdr); | ||
| 500 | if (!err) | ||
| 501 | return SCSI_DH_IO; | ||
| 502 | err = alua_check_sense(sdev, &sense_hdr); | ||
| 503 | if (retry > 0 && err == ADD_TO_MLQUEUE) { | ||
| 504 | retry--; | ||
| 505 | goto retry; | ||
| 506 | } | ||
| 507 | sdev_printk(KERN_INFO, sdev, | ||
| 508 | "%s: stpg sense code: %02x/%02x/%02x\n", | ||
| 509 | ALUA_DH_NAME, sense_hdr.sense_key, | ||
| 510 | sense_hdr.asc, sense_hdr.ascq); | ||
| 511 | err = SCSI_DH_IO; | ||
| 512 | } | ||
| 513 | if (err == SCSI_DH_OK) { | ||
| 514 | h->state = state; | ||
| 515 | sdev_printk(KERN_INFO, sdev, | ||
| 516 | "%s: port group %02x switched to state %c\n", | ||
| 517 | ALUA_DH_NAME, h->group_id, | ||
| 518 | print_alua_state(h->state) ); | ||
| 519 | } | ||
| 520 | return err; | ||
| 521 | } | ||
| 522 | |||
| 523 | /* | ||
| 524 | * alua_rtpg - Evaluate REPORT TARGET GROUP STATES | 531 | * alua_rtpg - Evaluate REPORT TARGET GROUP STATES |
| 525 | * @sdev: the device to be evaluated. | 532 | * @sdev: the device to be evaluated. |
| 526 | * | 533 | * |
| @@ -652,7 +659,8 @@ out: | |||
| 652 | * based on a certain policy. But until we actually encounter them it | 659 | * based on a certain policy. But until we actually encounter them it |
| 653 | * should be okay. | 660 | * should be okay. |
| 654 | */ | 661 | */ |
| 655 | static int alua_activate(struct scsi_device *sdev) | 662 | static int alua_activate(struct scsi_device *sdev, |
| 663 | activate_complete fn, void *data) | ||
| 656 | { | 664 | { |
| 657 | struct alua_dh_data *h = get_alua_data(sdev); | 665 | struct alua_dh_data *h = get_alua_data(sdev); |
| 658 | int err = SCSI_DH_OK; | 666 | int err = SCSI_DH_OK; |
| @@ -663,11 +671,19 @@ static int alua_activate(struct scsi_device *sdev) | |||
| 663 | goto out; | 671 | goto out; |
| 664 | } | 672 | } |
| 665 | 673 | ||
| 666 | if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) | 674 | if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { |
| 667 | err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); | 675 | h->callback_fn = fn; |
| 676 | h->callback_data = data; | ||
| 677 | err = submit_stpg(h); | ||
| 678 | if (err == SCSI_DH_OK) | ||
| 679 | return 0; | ||
| 680 | h->callback_fn = h->callback_data = NULL; | ||
| 681 | } | ||
| 668 | 682 | ||
| 669 | out: | 683 | out: |
| 670 | return err; | 684 | if (fn) |
| 685 | fn(data, err); | ||
| 686 | return 0; | ||
| 671 | } | 687 | } |
| 672 | 688 | ||
| 673 | /* | 689 | /* |
| @@ -745,6 +761,7 @@ static int alua_bus_attach(struct scsi_device *sdev) | |||
| 745 | h->rel_port = -1; | 761 | h->rel_port = -1; |
| 746 | h->buff = h->inq; | 762 | h->buff = h->inq; |
| 747 | h->bufflen = ALUA_INQUIRY_SIZE; | 763 | h->bufflen = ALUA_INQUIRY_SIZE; |
| 764 | h->sdev = sdev; | ||
| 748 | 765 | ||
| 749 | err = alua_initialize(sdev, h); | 766 | err = alua_initialize(sdev, h); |
| 750 | if (err != SCSI_DH_OK) | 767 | if (err != SCSI_DH_OK) |
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 0cffe84976fe..61966750bd60 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c | |||
| @@ -528,7 +528,8 @@ retry: | |||
| 528 | return err; | 528 | return err; |
| 529 | } | 529 | } |
| 530 | 530 | ||
| 531 | static int clariion_activate(struct scsi_device *sdev) | 531 | static int clariion_activate(struct scsi_device *sdev, |
| 532 | activate_complete fn, void *data) | ||
| 532 | { | 533 | { |
| 533 | struct clariion_dh_data *csdev = get_clariion_data(sdev); | 534 | struct clariion_dh_data *csdev = get_clariion_data(sdev); |
| 534 | int result; | 535 | int result; |
| @@ -559,7 +560,9 @@ done: | |||
| 559 | csdev->port, lun_state[csdev->lun_state], | 560 | csdev->port, lun_state[csdev->lun_state], |
| 560 | csdev->default_sp + 'A'); | 561 | csdev->default_sp + 'A'); |
| 561 | 562 | ||
| 562 | return result; | 563 | if (fn) |
| 564 | fn(data, result); | ||
| 565 | return 0; | ||
| 563 | } | 566 | } |
| 564 | /* | 567 | /* |
| 565 | * params - parameters in the following format | 568 | * params - parameters in the following format |
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index f7da7530875e..857fdd6032b2 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c | |||
| @@ -39,8 +39,14 @@ struct hp_sw_dh_data { | |||
| 39 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | 39 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; |
| 40 | int path_state; | 40 | int path_state; |
| 41 | int retries; | 41 | int retries; |
| 42 | int retry_cnt; | ||
| 43 | struct scsi_device *sdev; | ||
| 44 | activate_complete callback_fn; | ||
| 45 | void *callback_data; | ||
| 42 | }; | 46 | }; |
| 43 | 47 | ||
| 48 | static int hp_sw_start_stop(struct hp_sw_dh_data *); | ||
| 49 | |||
| 44 | static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) | 50 | static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) |
| 45 | { | 51 | { |
| 46 | struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; | 52 | struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; |
| @@ -191,19 +197,53 @@ static int start_done(struct scsi_device *sdev, unsigned char *sense) | |||
| 191 | return rc; | 197 | return rc; |
| 192 | } | 198 | } |
| 193 | 199 | ||
| 200 | static void start_stop_endio(struct request *req, int error) | ||
| 201 | { | ||
| 202 | struct hp_sw_dh_data *h = req->end_io_data; | ||
| 203 | unsigned err = SCSI_DH_OK; | ||
| 204 | |||
| 205 | if (error || host_byte(req->errors) != DID_OK || | ||
| 206 | msg_byte(req->errors) != COMMAND_COMPLETE) { | ||
| 207 | sdev_printk(KERN_WARNING, h->sdev, | ||
| 208 | "%s: sending start_stop_unit failed with %x\n", | ||
| 209 | HP_SW_NAME, req->errors); | ||
| 210 | err = SCSI_DH_IO; | ||
| 211 | goto done; | ||
| 212 | } | ||
| 213 | |||
| 214 | if (req->sense_len > 0) { | ||
| 215 | err = start_done(h->sdev, h->sense); | ||
| 216 | if (err == SCSI_DH_RETRY) { | ||
| 217 | err = SCSI_DH_IO; | ||
| 218 | if (--h->retry_cnt) { | ||
| 219 | blk_put_request(req); | ||
| 220 | err = hp_sw_start_stop(h); | ||
| 221 | if (err == SCSI_DH_OK) | ||
| 222 | return; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | } | ||
| 226 | done: | ||
| 227 | blk_put_request(req); | ||
| 228 | if (h->callback_fn) { | ||
| 229 | h->callback_fn(h->callback_data, err); | ||
| 230 | h->callback_fn = h->callback_data = NULL; | ||
| 231 | } | ||
| 232 | return; | ||
| 233 | |||
| 234 | } | ||
| 235 | |||
| 194 | /* | 236 | /* |
| 195 | * hp_sw_start_stop - Send START STOP UNIT command | 237 | * hp_sw_start_stop - Send START STOP UNIT command |
| 196 | * @sdev: sdev command should be sent to | 238 | * @sdev: sdev command should be sent to |
| 197 | * | 239 | * |
| 198 | * Sending START STOP UNIT activates the SP. | 240 | * Sending START STOP UNIT activates the SP. |
| 199 | */ | 241 | */ |
| 200 | static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) | 242 | static int hp_sw_start_stop(struct hp_sw_dh_data *h) |
| 201 | { | 243 | { |
| 202 | struct request *req; | 244 | struct request *req; |
| 203 | int ret, retry; | ||
| 204 | 245 | ||
| 205 | retry: | 246 | req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); |
| 206 | req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); | ||
| 207 | if (!req) | 247 | if (!req) |
| 208 | return SCSI_DH_RES_TEMP_UNAVAIL; | 248 | return SCSI_DH_RES_TEMP_UNAVAIL; |
| 209 | 249 | ||
| @@ -217,32 +257,10 @@ retry: | |||
| 217 | req->sense = h->sense; | 257 | req->sense = h->sense; |
| 218 | memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); | 258 | memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); |
| 219 | req->sense_len = 0; | 259 | req->sense_len = 0; |
| 220 | retry = h->retries; | 260 | req->end_io_data = h; |
| 221 | |||
| 222 | ret = blk_execute_rq(req->q, NULL, req, 1); | ||
| 223 | if (ret == -EIO) { | ||
| 224 | if (req->sense_len > 0) { | ||
| 225 | ret = start_done(sdev, h->sense); | ||
| 226 | } else { | ||
| 227 | sdev_printk(KERN_WARNING, sdev, | ||
| 228 | "%s: sending start_stop_unit failed with %x\n", | ||
| 229 | HP_SW_NAME, req->errors); | ||
| 230 | ret = SCSI_DH_IO; | ||
| 231 | } | ||
| 232 | } else | ||
| 233 | ret = SCSI_DH_OK; | ||
| 234 | 261 | ||
| 235 | if (ret == SCSI_DH_RETRY) { | 262 | blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); |
| 236 | if (--retry) { | 263 | return SCSI_DH_OK; |
| 237 | blk_put_request(req); | ||
| 238 | goto retry; | ||
| 239 | } | ||
| 240 | ret = SCSI_DH_IO; | ||
| 241 | } | ||
| 242 | |||
| 243 | blk_put_request(req); | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | 264 | } |
| 247 | 265 | ||
| 248 | static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) | 266 | static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) |
| @@ -268,7 +286,8 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) | |||
| 268 | * activate the passive path (and deactivate the | 286 | * activate the passive path (and deactivate the |
| 269 | * previously active one). | 287 | * previously active one). |
| 270 | */ | 288 | */ |
| 271 | static int hp_sw_activate(struct scsi_device *sdev) | 289 | static int hp_sw_activate(struct scsi_device *sdev, |
| 290 | activate_complete fn, void *data) | ||
| 272 | { | 291 | { |
| 273 | int ret = SCSI_DH_OK; | 292 | int ret = SCSI_DH_OK; |
| 274 | struct hp_sw_dh_data *h = get_hp_sw_data(sdev); | 293 | struct hp_sw_dh_data *h = get_hp_sw_data(sdev); |
| @@ -276,14 +295,18 @@ static int hp_sw_activate(struct scsi_device *sdev) | |||
| 276 | ret = hp_sw_tur(sdev, h); | 295 | ret = hp_sw_tur(sdev, h); |
| 277 | 296 | ||
| 278 | if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { | 297 | if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { |
| 279 | ret = hp_sw_start_stop(sdev, h); | 298 | h->retry_cnt = h->retries; |
| 299 | h->callback_fn = fn; | ||
| 300 | h->callback_data = data; | ||
| 301 | ret = hp_sw_start_stop(h); | ||
| 280 | if (ret == SCSI_DH_OK) | 302 | if (ret == SCSI_DH_OK) |
| 281 | sdev_printk(KERN_INFO, sdev, | 303 | return 0; |
| 282 | "%s: activated path\n", | 304 | h->callback_fn = h->callback_data = NULL; |
| 283 | HP_SW_NAME); | ||
| 284 | } | 305 | } |
| 285 | 306 | ||
| 286 | return ret; | 307 | if (fn) |
| 308 | fn(data, ret); | ||
| 309 | return 0; | ||
| 287 | } | 310 | } |
| 288 | 311 | ||
| 289 | static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { | 312 | static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { |
| @@ -326,6 +349,7 @@ static int hp_sw_bus_attach(struct scsi_device *sdev) | |||
| 326 | h = (struct hp_sw_dh_data *) scsi_dh_data->buf; | 349 | h = (struct hp_sw_dh_data *) scsi_dh_data->buf; |
| 327 | h->path_state = HP_SW_PATH_UNINITIALIZED; | 350 | h->path_state = HP_SW_PATH_UNINITIALIZED; |
| 328 | h->retries = HP_SW_RETRIES; | 351 | h->retries = HP_SW_RETRIES; |
| 352 | h->sdev = sdev; | ||
| 329 | 353 | ||
| 330 | ret = hp_sw_tur(sdev, h); | 354 | ret = hp_sw_tur(sdev, h); |
| 331 | if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) | 355 | if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 268189d31d9c..47cfe1c49c3e 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <scsi/scsi.h> | 22 | #include <scsi/scsi.h> |
| 23 | #include <scsi/scsi_eh.h> | 23 | #include <scsi/scsi_eh.h> |
| 24 | #include <scsi/scsi_dh.h> | 24 | #include <scsi/scsi_dh.h> |
| 25 | #include <linux/workqueue.h> | ||
| 25 | 26 | ||
| 26 | #define RDAC_NAME "rdac" | 27 | #define RDAC_NAME "rdac" |
| 27 | #define RDAC_RETRY_COUNT 5 | 28 | #define RDAC_RETRY_COUNT 5 |
| @@ -138,7 +139,13 @@ struct rdac_controller { | |||
| 138 | } mode_select; | 139 | } mode_select; |
| 139 | u8 index; | 140 | u8 index; |
| 140 | u8 array_name[ARRAY_LABEL_LEN]; | 141 | u8 array_name[ARRAY_LABEL_LEN]; |
| 142 | spinlock_t ms_lock; | ||
| 143 | int ms_queued; | ||
| 144 | struct work_struct ms_work; | ||
| 145 | struct scsi_device *ms_sdev; | ||
| 146 | struct list_head ms_head; | ||
| 141 | }; | 147 | }; |
| 148 | |||
| 142 | struct c8_inquiry { | 149 | struct c8_inquiry { |
| 143 | u8 peripheral_info; | 150 | u8 peripheral_info; |
| 144 | u8 page_code; /* 0xC8 */ | 151 | u8 page_code; /* 0xC8 */ |
| @@ -198,8 +205,17 @@ static const char *lun_state[] = | |||
| 198 | "owned (AVT mode)", | 205 | "owned (AVT mode)", |
| 199 | }; | 206 | }; |
| 200 | 207 | ||
| 208 | struct rdac_queue_data { | ||
| 209 | struct list_head entry; | ||
| 210 | struct rdac_dh_data *h; | ||
| 211 | activate_complete callback_fn; | ||
| 212 | void *callback_data; | ||
| 213 | }; | ||
| 214 | |||
| 201 | static LIST_HEAD(ctlr_list); | 215 | static LIST_HEAD(ctlr_list); |
| 202 | static DEFINE_SPINLOCK(list_lock); | 216 | static DEFINE_SPINLOCK(list_lock); |
| 217 | static struct workqueue_struct *kmpath_rdacd; | ||
| 218 | static void send_mode_select(struct work_struct *work); | ||
| 203 | 219 | ||
| 204 | /* | 220 | /* |
| 205 | * module parameter to enable rdac debug logging. | 221 | * module parameter to enable rdac debug logging. |
| @@ -281,7 +297,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, | |||
| 281 | rdac_pg->subpage_code = 0x1; | 297 | rdac_pg->subpage_code = 0x1; |
| 282 | rdac_pg->page_len[0] = 0x01; | 298 | rdac_pg->page_len[0] = 0x01; |
| 283 | rdac_pg->page_len[1] = 0x28; | 299 | rdac_pg->page_len[1] = 0x28; |
| 284 | rdac_pg->lun_table[h->lun] = 0x81; | ||
| 285 | } else { | 300 | } else { |
| 286 | struct rdac_pg_legacy *rdac_pg; | 301 | struct rdac_pg_legacy *rdac_pg; |
| 287 | 302 | ||
| @@ -291,7 +306,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, | |||
| 291 | common = &rdac_pg->common; | 306 | common = &rdac_pg->common; |
| 292 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | 307 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; |
| 293 | rdac_pg->page_len = 0x68; | 308 | rdac_pg->page_len = 0x68; |
| 294 | rdac_pg->lun_table[h->lun] = 0x81; | ||
| 295 | } | 309 | } |
| 296 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; | 310 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; |
| 297 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; | 311 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; |
| @@ -325,6 +339,7 @@ static void release_controller(struct kref *kref) | |||
| 325 | struct rdac_controller *ctlr; | 339 | struct rdac_controller *ctlr; |
| 326 | ctlr = container_of(kref, struct rdac_controller, kref); | 340 | ctlr = container_of(kref, struct rdac_controller, kref); |
| 327 | 341 | ||
| 342 | flush_workqueue(kmpath_rdacd); | ||
| 328 | spin_lock(&list_lock); | 343 | spin_lock(&list_lock); |
| 329 | list_del(&ctlr->node); | 344 | list_del(&ctlr->node); |
| 330 | spin_unlock(&list_lock); | 345 | spin_unlock(&list_lock); |
| @@ -363,6 +378,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, | |||
| 363 | 378 | ||
| 364 | kref_init(&ctlr->kref); | 379 | kref_init(&ctlr->kref); |
| 365 | ctlr->use_ms10 = -1; | 380 | ctlr->use_ms10 = -1; |
| 381 | ctlr->ms_queued = 0; | ||
| 382 | ctlr->ms_sdev = NULL; | ||
| 383 | spin_lock_init(&ctlr->ms_lock); | ||
| 384 | INIT_WORK(&ctlr->ms_work, send_mode_select); | ||
| 385 | INIT_LIST_HEAD(&ctlr->ms_head); | ||
| 366 | list_add(&ctlr->node, &ctlr_list); | 386 | list_add(&ctlr->node, &ctlr_list); |
| 367 | done: | 387 | done: |
| 368 | spin_unlock(&list_lock); | 388 | spin_unlock(&list_lock); |
| @@ -490,7 +510,7 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) | |||
| 490 | } | 510 | } |
| 491 | 511 | ||
| 492 | static int mode_select_handle_sense(struct scsi_device *sdev, | 512 | static int mode_select_handle_sense(struct scsi_device *sdev, |
| 493 | unsigned char *sensebuf) | 513 | unsigned char *sensebuf) |
| 494 | { | 514 | { |
| 495 | struct scsi_sense_hdr sense_hdr; | 515 | struct scsi_sense_hdr sense_hdr; |
| 496 | int err = SCSI_DH_IO, ret; | 516 | int err = SCSI_DH_IO, ret; |
| @@ -533,11 +553,29 @@ done: | |||
| 533 | return err; | 553 | return err; |
| 534 | } | 554 | } |
| 535 | 555 | ||
| 536 | static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) | 556 | static void send_mode_select(struct work_struct *work) |
| 537 | { | 557 | { |
| 558 | struct rdac_controller *ctlr = | ||
| 559 | container_of(work, struct rdac_controller, ms_work); | ||
| 538 | struct request *rq; | 560 | struct request *rq; |
| 561 | struct scsi_device *sdev = ctlr->ms_sdev; | ||
| 562 | struct rdac_dh_data *h = get_rdac_data(sdev); | ||
| 539 | struct request_queue *q = sdev->request_queue; | 563 | struct request_queue *q = sdev->request_queue; |
| 540 | int err, retry_cnt = RDAC_RETRY_COUNT; | 564 | int err, retry_cnt = RDAC_RETRY_COUNT; |
| 565 | struct rdac_queue_data *tmp, *qdata; | ||
| 566 | LIST_HEAD(list); | ||
| 567 | u8 *lun_table; | ||
| 568 | |||
| 569 | spin_lock(&ctlr->ms_lock); | ||
| 570 | list_splice_init(&ctlr->ms_head, &list); | ||
| 571 | ctlr->ms_queued = 0; | ||
| 572 | ctlr->ms_sdev = NULL; | ||
| 573 | spin_unlock(&ctlr->ms_lock); | ||
| 574 | |||
| 575 | if (ctlr->use_ms10) | ||
| 576 | lun_table = ctlr->mode_select.expanded.lun_table; | ||
| 577 | else | ||
| 578 | lun_table = ctlr->mode_select.legacy.lun_table; | ||
| 541 | 579 | ||
| 542 | retry: | 580 | retry: |
| 543 | err = SCSI_DH_RES_TEMP_UNAVAIL; | 581 | err = SCSI_DH_RES_TEMP_UNAVAIL; |
| @@ -545,6 +583,10 @@ retry: | |||
| 545 | if (!rq) | 583 | if (!rq) |
| 546 | goto done; | 584 | goto done; |
| 547 | 585 | ||
| 586 | list_for_each_entry(qdata, &list, entry) { | ||
| 587 | lun_table[qdata->h->lun] = 0x81; | ||
| 588 | } | ||
| 589 | |||
| 548 | RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " | 590 | RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " |
| 549 | "%s MODE_SELECT command", | 591 | "%s MODE_SELECT command", |
| 550 | (char *) h->ctlr->array_name, h->ctlr->index, | 592 | (char *) h->ctlr->array_name, h->ctlr->index, |
| @@ -565,10 +607,45 @@ retry: | |||
| 565 | } | 607 | } |
| 566 | 608 | ||
| 567 | done: | 609 | done: |
| 568 | return err; | 610 | list_for_each_entry_safe(qdata, tmp, &list, entry) { |
| 611 | list_del(&qdata->entry); | ||
| 612 | if (err == SCSI_DH_OK) | ||
| 613 | qdata->h->state = RDAC_STATE_ACTIVE; | ||
| 614 | if (qdata->callback_fn) | ||
| 615 | qdata->callback_fn(qdata->callback_data, err); | ||
| 616 | kfree(qdata); | ||
| 617 | } | ||
| 618 | return; | ||
| 619 | } | ||
| 620 | |||
| 621 | static int queue_mode_select(struct scsi_device *sdev, | ||
| 622 | activate_complete fn, void *data) | ||
| 623 | { | ||
| 624 | struct rdac_queue_data *qdata; | ||
| 625 | struct rdac_controller *ctlr; | ||
| 626 | |||
| 627 | qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); | ||
| 628 | if (!qdata) | ||
| 629 | return SCSI_DH_RETRY; | ||
| 630 | |||
| 631 | qdata->h = get_rdac_data(sdev); | ||
| 632 | qdata->callback_fn = fn; | ||
| 633 | qdata->callback_data = data; | ||
| 634 | |||
| 635 | ctlr = qdata->h->ctlr; | ||
| 636 | spin_lock(&ctlr->ms_lock); | ||
| 637 | list_add_tail(&qdata->entry, &ctlr->ms_head); | ||
| 638 | if (!ctlr->ms_queued) { | ||
| 639 | ctlr->ms_queued = 1; | ||
| 640 | ctlr->ms_sdev = sdev; | ||
| 641 | queue_work(kmpath_rdacd, &ctlr->ms_work); | ||
| 642 | } | ||
| 643 | spin_unlock(&ctlr->ms_lock); | ||
| 644 | return SCSI_DH_OK; | ||
| 569 | } | 645 | } |
| 570 | 646 | ||
| 571 | static int rdac_activate(struct scsi_device *sdev) | 647 | static int rdac_activate(struct scsi_device *sdev, |
| 648 | activate_complete fn, void *data) | ||
| 572 | { | 649 | { |
| 573 | struct rdac_dh_data *h = get_rdac_data(sdev); | 650 | struct rdac_dh_data *h = get_rdac_data(sdev); |
| 574 | int err = SCSI_DH_OK; | 651 | int err = SCSI_DH_OK; |
| @@ -577,10 +654,15 @@ static int rdac_activate(struct scsi_device *sdev) | |||
| 577 | if (err != SCSI_DH_OK) | 654 | if (err != SCSI_DH_OK) |
| 578 | goto done; | 655 | goto done; |
| 579 | 656 | ||
| 580 | if (h->lun_state == RDAC_LUN_UNOWNED) | 657 | if (h->lun_state == RDAC_LUN_UNOWNED) { |
| 581 | err = send_mode_select(sdev, h); | 658 | err = queue_mode_select(sdev, fn, data); |
| 659 | if (err == SCSI_DH_OK) | ||
| 660 | return 0; | ||
| 661 | } | ||
| 582 | done: | 662 | done: |
| 583 | return err; | 663 | if (fn) |
| 664 | fn(data, err); | ||
| 665 | return 0; | ||
| 584 | } | 666 | } |
| 585 | 667 | ||
| 586 | static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) | 668 | static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) |
| @@ -790,13 +872,26 @@ static int __init rdac_init(void) | |||
| 790 | int r; | 872 | int r; |
| 791 | 873 | ||
| 792 | r = scsi_register_device_handler(&rdac_dh); | 874 | r = scsi_register_device_handler(&rdac_dh); |
| 793 | if (r != 0) | 875 | if (r != 0) { |
| 794 | printk(KERN_ERR "Failed to register scsi device handler."); | 876 | printk(KERN_ERR "Failed to register scsi device handler."); |
| 877 | goto done; | ||
| 878 | } | ||
| 879 | |||
| 880 | /* | ||
| 881 | * Create workqueue to handle mode selects for rdac | ||
| 882 | */ | ||
| 883 | kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); | ||
| 884 | if (!kmpath_rdacd) { | ||
| 885 | scsi_unregister_device_handler(&rdac_dh); | ||
| 886 | printk(KERN_ERR "kmpath_rdacd creation failed.\n"); | ||
| 887 | } | ||
| 888 | done: | ||
| 795 | return r; | 889 | return r; |
| 796 | } | 890 | } |
| 797 | 891 | ||
| 798 | static void __exit rdac_exit(void) | 892 | static void __exit rdac_exit(void) |
| 799 | { | 893 | { |
| 894 | destroy_workqueue(kmpath_rdacd); | ||
| 800 | scsi_unregister_device_handler(&rdac_dh); | 895 | scsi_unregister_device_handler(&rdac_dh); |
| 801 | } | 896 | } |
| 802 | 897 | ||
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index fa738ec8692a..207352cc70cc 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | #include <scsi/scsi_host.h> | 31 | #include <scsi/scsi_host.h> |
| 32 | 32 | ||
| 33 | /* | 33 | /* |
| 34 | * Defintions for the generic 5380 driver. | 34 | * Definitions for the generic 5380 driver. |
| 35 | */ | 35 | */ |
| 36 | #define AUTOSENSE | 36 | #define AUTOSENSE |
| 37 | 37 | ||
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 704b8e034946..a30ffaa1222c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
| @@ -66,14 +66,14 @@ LIST_HEAD(fcoe_hostlist); | |||
| 66 | DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); | 66 | DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); |
| 67 | 67 | ||
| 68 | /* Function Prototypes */ | 68 | /* Function Prototypes */ |
| 69 | static int fcoe_reset(struct Scsi_Host *shost); | 69 | static int fcoe_reset(struct Scsi_Host *); |
| 70 | static int fcoe_xmit(struct fc_lport *, struct fc_frame *); | 70 | static int fcoe_xmit(struct fc_lport *, struct fc_frame *); |
| 71 | static int fcoe_rcv(struct sk_buff *, struct net_device *, | 71 | static int fcoe_rcv(struct sk_buff *, struct net_device *, |
| 72 | struct packet_type *, struct net_device *); | 72 | struct packet_type *, struct net_device *); |
| 73 | static int fcoe_percpu_receive_thread(void *arg); | 73 | static int fcoe_percpu_receive_thread(void *); |
| 74 | static void fcoe_clean_pending_queue(struct fc_lport *lp); | 74 | static void fcoe_clean_pending_queue(struct fc_lport *); |
| 75 | static void fcoe_percpu_clean(struct fc_lport *lp); | 75 | static void fcoe_percpu_clean(struct fc_lport *); |
| 76 | static int fcoe_link_ok(struct fc_lport *lp); | 76 | static int fcoe_link_ok(struct fc_lport *); |
| 77 | 77 | ||
| 78 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); | 78 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); |
| 79 | static int fcoe_hostlist_add(const struct fc_lport *); | 79 | static int fcoe_hostlist_add(const struct fc_lport *); |
| @@ -82,15 +82,69 @@ static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); | |||
| 82 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); | 82 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); |
| 83 | static void fcoe_dev_setup(void); | 83 | static void fcoe_dev_setup(void); |
| 84 | static void fcoe_dev_cleanup(void); | 84 | static void fcoe_dev_cleanup(void); |
| 85 | static struct fcoe_interface * | 85 | static struct fcoe_interface |
| 86 | fcoe_hostlist_lookup_port(const struct net_device *dev); | 86 | *fcoe_hostlist_lookup_port(const struct net_device *); |
| 87 | |||
| 88 | static int fcoe_fip_recv(struct sk_buff *, struct net_device *, | ||
| 89 | struct packet_type *, struct net_device *); | ||
| 90 | |||
| 91 | static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); | ||
| 92 | static void fcoe_update_src_mac(struct fc_lport *, u8 *); | ||
| 93 | static u8 *fcoe_get_src_mac(struct fc_lport *); | ||
| 94 | static void fcoe_destroy_work(struct work_struct *); | ||
| 95 | |||
| 96 | static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, | ||
| 97 | unsigned int); | ||
| 98 | static int fcoe_ddp_done(struct fc_lport *, u16); | ||
| 99 | |||
| 100 | static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); | ||
| 101 | |||
| 102 | static int fcoe_create(const char *, struct kernel_param *); | ||
| 103 | static int fcoe_destroy(const char *, struct kernel_param *); | ||
| 104 | |||
| 105 | static struct fc_seq *fcoe_elsct_send(struct fc_lport *, | ||
| 106 | u32 did, struct fc_frame *, | ||
| 107 | unsigned int op, | ||
| 108 | void (*resp)(struct fc_seq *, | ||
| 109 | struct fc_frame *, | ||
| 110 | void *), | ||
| 111 | void *, u32 timeout); | ||
| 112 | static void fcoe_recv_frame(struct sk_buff *skb); | ||
| 113 | |||
| 114 | static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); | ||
| 115 | |||
| 116 | module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); | ||
| 117 | __MODULE_PARM_TYPE(create, "string"); | ||
| 118 | MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); | ||
| 119 | module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); | ||
| 120 | __MODULE_PARM_TYPE(destroy, "string"); | ||
| 121 | MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); | ||
| 87 | 122 | ||
| 88 | /* notification function from net device */ | 123 | /* notification function for packets from net device */ |
| 89 | static struct notifier_block fcoe_notifier = { | 124 | static struct notifier_block fcoe_notifier = { |
| 90 | .notifier_call = fcoe_device_notification, | 125 | .notifier_call = fcoe_device_notification, |
| 91 | }; | 126 | }; |
| 92 | 127 | ||
| 93 | static struct scsi_transport_template *scsi_transport_fcoe_sw; | 128 | /* notification function for CPU hotplug events */ |
| 129 | static struct notifier_block fcoe_cpu_notifier = { | ||
| 130 | .notifier_call = fcoe_cpu_callback, | ||
| 131 | }; | ||
| 132 | |||
| 133 | static struct scsi_transport_template *fcoe_transport_template; | ||
| 134 | static struct scsi_transport_template *fcoe_vport_transport_template; | ||
| 135 | |||
| 136 | static int fcoe_vport_destroy(struct fc_vport *); | ||
| 137 | static int fcoe_vport_create(struct fc_vport *, bool disabled); | ||
| 138 | static int fcoe_vport_disable(struct fc_vport *, bool disable); | ||
| 139 | static void fcoe_set_vport_symbolic_name(struct fc_vport *); | ||
| 140 | |||
| 141 | static struct libfc_function_template fcoe_libfc_fcn_templ = { | ||
| 142 | .frame_send = fcoe_xmit, | ||
| 143 | .ddp_setup = fcoe_ddp_setup, | ||
| 144 | .ddp_done = fcoe_ddp_done, | ||
| 145 | .elsct_send = fcoe_elsct_send, | ||
| 146 | .get_lesb = fcoe_get_lesb, | ||
| 147 | }; | ||
| 94 | 148 | ||
| 95 | struct fc_function_template fcoe_transport_function = { | 149 | struct fc_function_template fcoe_transport_function = { |
| 96 | .show_host_node_name = 1, | 150 | .show_host_node_name = 1, |
| @@ -123,6 +177,48 @@ struct fc_function_template fcoe_transport_function = { | |||
| 123 | .issue_fc_host_lip = fcoe_reset, | 177 | .issue_fc_host_lip = fcoe_reset, |
| 124 | 178 | ||
| 125 | .terminate_rport_io = fc_rport_terminate_io, | 179 | .terminate_rport_io = fc_rport_terminate_io, |
| 180 | |||
| 181 | .vport_create = fcoe_vport_create, | ||
| 182 | .vport_delete = fcoe_vport_destroy, | ||
| 183 | .vport_disable = fcoe_vport_disable, | ||
| 184 | .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, | ||
| 185 | |||
| 186 | .bsg_request = fc_lport_bsg_request, | ||
| 187 | }; | ||
| 188 | |||
| 189 | struct fc_function_template fcoe_vport_transport_function = { | ||
| 190 | .show_host_node_name = 1, | ||
| 191 | .show_host_port_name = 1, | ||
| 192 | .show_host_supported_classes = 1, | ||
| 193 | .show_host_supported_fc4s = 1, | ||
| 194 | .show_host_active_fc4s = 1, | ||
| 195 | .show_host_maxframe_size = 1, | ||
| 196 | |||
| 197 | .show_host_port_id = 1, | ||
| 198 | .show_host_supported_speeds = 1, | ||
| 199 | .get_host_speed = fc_get_host_speed, | ||
| 200 | .show_host_speed = 1, | ||
| 201 | .show_host_port_type = 1, | ||
| 202 | .get_host_port_state = fc_get_host_port_state, | ||
| 203 | .show_host_port_state = 1, | ||
| 204 | .show_host_symbolic_name = 1, | ||
| 205 | |||
| 206 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), | ||
| 207 | .show_rport_maxframe_size = 1, | ||
| 208 | .show_rport_supported_classes = 1, | ||
| 209 | |||
| 210 | .show_host_fabric_name = 1, | ||
| 211 | .show_starget_node_name = 1, | ||
| 212 | .show_starget_port_name = 1, | ||
| 213 | .show_starget_port_id = 1, | ||
| 214 | .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, | ||
| 215 | .show_rport_dev_loss_tmo = 1, | ||
| 216 | .get_fc_host_stats = fc_get_host_stats, | ||
| 217 | .issue_fc_host_lip = fcoe_reset, | ||
| 218 | |||
| 219 | .terminate_rport_io = fc_rport_terminate_io, | ||
| 220 | |||
| 221 | .bsg_request = fc_lport_bsg_request, | ||
| 126 | }; | 222 | }; |
| 127 | 223 | ||
| 128 | static struct scsi_host_template fcoe_shost_template = { | 224 | static struct scsi_host_template fcoe_shost_template = { |
| @@ -137,20 +233,17 @@ static struct scsi_host_template fcoe_shost_template = { | |||
| 137 | .change_queue_depth = fc_change_queue_depth, | 233 | .change_queue_depth = fc_change_queue_depth, |
| 138 | .change_queue_type = fc_change_queue_type, | 234 | .change_queue_type = fc_change_queue_type, |
| 139 | .this_id = -1, | 235 | .this_id = -1, |
| 140 | .cmd_per_lun = 32, | 236 | .cmd_per_lun = 3, |
| 141 | .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, | 237 | .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, |
| 142 | .use_clustering = ENABLE_CLUSTERING, | 238 | .use_clustering = ENABLE_CLUSTERING, |
| 143 | .sg_tablesize = SG_ALL, | 239 | .sg_tablesize = SG_ALL, |
| 144 | .max_sectors = 0xffff, | 240 | .max_sectors = 0xffff, |
| 145 | }; | 241 | }; |
| 146 | 242 | ||
| 147 | static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, | ||
| 148 | struct packet_type *ptype, | ||
| 149 | struct net_device *orig_dev); | ||
| 150 | /** | 243 | /** |
| 151 | * fcoe_interface_setup() | 244 | * fcoe_interface_setup() - Setup a FCoE interface |
| 152 | * @fcoe: new fcoe_interface | 245 | * @fcoe: The new FCoE interface |
| 153 | * @netdev : ptr to the associated netdevice struct | 246 | * @netdev: The net device that the fcoe interface is on |
| 154 | * | 247 | * |
| 155 | * Returns : 0 for success | 248 | * Returns : 0 for success |
| 156 | * Locking: must be called with the RTNL mutex held | 249 | * Locking: must be called with the RTNL mutex held |
| @@ -160,23 +253,36 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, | |||
| 160 | { | 253 | { |
| 161 | struct fcoe_ctlr *fip = &fcoe->ctlr; | 254 | struct fcoe_ctlr *fip = &fcoe->ctlr; |
| 162 | struct netdev_hw_addr *ha; | 255 | struct netdev_hw_addr *ha; |
| 256 | struct net_device *real_dev; | ||
| 163 | u8 flogi_maddr[ETH_ALEN]; | 257 | u8 flogi_maddr[ETH_ALEN]; |
| 258 | const struct net_device_ops *ops; | ||
| 164 | 259 | ||
| 165 | fcoe->netdev = netdev; | 260 | fcoe->netdev = netdev; |
| 166 | 261 | ||
| 262 | /* Let LLD initialize for FCoE */ | ||
| 263 | ops = netdev->netdev_ops; | ||
| 264 | if (ops->ndo_fcoe_enable) { | ||
| 265 | if (ops->ndo_fcoe_enable(netdev)) | ||
| 266 | FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" | ||
| 267 | " specific feature for LLD.\n"); | ||
| 268 | } | ||
| 269 | |||
| 167 | /* Do not support for bonding device */ | 270 | /* Do not support for bonding device */ |
| 168 | if ((netdev->priv_flags & IFF_MASTER_ALB) || | 271 | if ((netdev->priv_flags & IFF_MASTER_ALB) || |
| 169 | (netdev->priv_flags & IFF_SLAVE_INACTIVE) || | 272 | (netdev->priv_flags & IFF_SLAVE_INACTIVE) || |
| 170 | (netdev->priv_flags & IFF_MASTER_8023AD)) { | 273 | (netdev->priv_flags & IFF_MASTER_8023AD)) { |
| 274 | FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); | ||
| 171 | return -EOPNOTSUPP; | 275 | return -EOPNOTSUPP; |
| 172 | } | 276 | } |
| 173 | 277 | ||
| 174 | /* look for SAN MAC address, if multiple SAN MACs exist, only | 278 | /* look for SAN MAC address, if multiple SAN MACs exist, only |
| 175 | * use the first one for SPMA */ | 279 | * use the first one for SPMA */ |
| 280 | real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? | ||
| 281 | vlan_dev_real_dev(netdev) : netdev; | ||
| 176 | rcu_read_lock(); | 282 | rcu_read_lock(); |
| 177 | for_each_dev_addr(netdev, ha) { | 283 | for_each_dev_addr(real_dev, ha) { |
| 178 | if ((ha->type == NETDEV_HW_ADDR_T_SAN) && | 284 | if ((ha->type == NETDEV_HW_ADDR_T_SAN) && |
| 179 | (is_valid_ether_addr(fip->ctl_src_addr))) { | 285 | (is_valid_ether_addr(ha->addr))) { |
| 180 | memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); | 286 | memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); |
| 181 | fip->spma = 1; | 287 | fip->spma = 1; |
| 182 | break; | 288 | break; |
| @@ -216,19 +322,16 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, | |||
| 216 | return 0; | 322 | return 0; |
| 217 | } | 323 | } |
| 218 | 324 | ||
| 219 | static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); | ||
| 220 | static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new); | ||
| 221 | static void fcoe_destroy_work(struct work_struct *work); | ||
| 222 | |||
| 223 | /** | 325 | /** |
| 224 | * fcoe_interface_create() | 326 | * fcoe_interface_create() - Create a FCoE interface on a net device |
| 225 | * @netdev: network interface | 327 | * @netdev: The net device to create the FCoE interface on |
| 226 | * | 328 | * |
| 227 | * Returns: pointer to a struct fcoe_interface or NULL on error | 329 | * Returns: pointer to a struct fcoe_interface or NULL on error |
| 228 | */ | 330 | */ |
| 229 | static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) | 331 | static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) |
| 230 | { | 332 | { |
| 231 | struct fcoe_interface *fcoe; | 333 | struct fcoe_interface *fcoe; |
| 334 | int err; | ||
| 232 | 335 | ||
| 233 | fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); | 336 | fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); |
| 234 | if (!fcoe) { | 337 | if (!fcoe) { |
| @@ -245,15 +348,22 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) | |||
| 245 | fcoe_ctlr_init(&fcoe->ctlr); | 348 | fcoe_ctlr_init(&fcoe->ctlr); |
| 246 | fcoe->ctlr.send = fcoe_fip_send; | 349 | fcoe->ctlr.send = fcoe_fip_send; |
| 247 | fcoe->ctlr.update_mac = fcoe_update_src_mac; | 350 | fcoe->ctlr.update_mac = fcoe_update_src_mac; |
| 351 | fcoe->ctlr.get_src_addr = fcoe_get_src_mac; | ||
| 248 | 352 | ||
| 249 | fcoe_interface_setup(fcoe, netdev); | 353 | err = fcoe_interface_setup(fcoe, netdev); |
| 354 | if (err) { | ||
| 355 | fcoe_ctlr_destroy(&fcoe->ctlr); | ||
| 356 | kfree(fcoe); | ||
| 357 | dev_put(netdev); | ||
| 358 | return NULL; | ||
| 359 | } | ||
| 250 | 360 | ||
| 251 | return fcoe; | 361 | return fcoe; |
| 252 | } | 362 | } |
| 253 | 363 | ||
| 254 | /** | 364 | /** |
| 255 | * fcoe_interface_cleanup() - clean up netdev configurations | 365 | * fcoe_interface_cleanup() - Clean up a FCoE interface |
| 256 | * @fcoe: | 366 | * @fcoe: The FCoE interface to be cleaned up |
| 257 | * | 367 | * |
| 258 | * Caller must be holding the RTNL mutex | 368 | * Caller must be holding the RTNL mutex |
| 259 | */ | 369 | */ |
| @@ -262,6 +372,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
| 262 | struct net_device *netdev = fcoe->netdev; | 372 | struct net_device *netdev = fcoe->netdev; |
| 263 | struct fcoe_ctlr *fip = &fcoe->ctlr; | 373 | struct fcoe_ctlr *fip = &fcoe->ctlr; |
| 264 | u8 flogi_maddr[ETH_ALEN]; | 374 | u8 flogi_maddr[ETH_ALEN]; |
| 375 | const struct net_device_ops *ops; | ||
| 265 | 376 | ||
| 266 | /* | 377 | /* |
| 267 | * Don't listen for Ethernet packets anymore. | 378 | * Don't listen for Ethernet packets anymore. |
| @@ -276,16 +387,22 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
| 276 | /* Delete secondary MAC addresses */ | 387 | /* Delete secondary MAC addresses */ |
| 277 | memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); | 388 | memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); |
| 278 | dev_unicast_delete(netdev, flogi_maddr); | 389 | dev_unicast_delete(netdev, flogi_maddr); |
| 279 | if (!is_zero_ether_addr(fip->data_src_addr)) | ||
| 280 | dev_unicast_delete(netdev, fip->data_src_addr); | ||
| 281 | if (fip->spma) | 390 | if (fip->spma) |
| 282 | dev_unicast_delete(netdev, fip->ctl_src_addr); | 391 | dev_unicast_delete(netdev, fip->ctl_src_addr); |
| 283 | dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); | 392 | dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); |
| 393 | |||
| 394 | /* Tell the LLD we are done w/ FCoE */ | ||
| 395 | ops = netdev->netdev_ops; | ||
| 396 | if (ops->ndo_fcoe_disable) { | ||
| 397 | if (ops->ndo_fcoe_disable(netdev)) | ||
| 398 | FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" | ||
| 399 | " specific feature for LLD.\n"); | ||
| 400 | } | ||
| 284 | } | 401 | } |
| 285 | 402 | ||
| 286 | /** | 403 | /** |
| 287 | * fcoe_interface_release() - fcoe_port kref release function | 404 | * fcoe_interface_release() - fcoe_port kref release function |
| 288 | * @kref: embedded reference count in an fcoe_interface struct | 405 | * @kref: Embedded reference count in an fcoe_interface struct |
| 289 | */ | 406 | */ |
| 290 | static void fcoe_interface_release(struct kref *kref) | 407 | static void fcoe_interface_release(struct kref *kref) |
| 291 | { | 408 | { |
| @@ -301,8 +418,8 @@ static void fcoe_interface_release(struct kref *kref) | |||
| 301 | } | 418 | } |
| 302 | 419 | ||
| 303 | /** | 420 | /** |
| 304 | * fcoe_interface_get() | 421 | * fcoe_interface_get() - Get a reference to a FCoE interface |
| 305 | * @fcoe: | 422 | * @fcoe: The FCoE interface to be held |
| 306 | */ | 423 | */ |
| 307 | static inline void fcoe_interface_get(struct fcoe_interface *fcoe) | 424 | static inline void fcoe_interface_get(struct fcoe_interface *fcoe) |
| 308 | { | 425 | { |
| @@ -310,8 +427,8 @@ static inline void fcoe_interface_get(struct fcoe_interface *fcoe) | |||
| 310 | } | 427 | } |
| 311 | 428 | ||
| 312 | /** | 429 | /** |
| 313 | * fcoe_interface_put() | 430 | * fcoe_interface_put() - Put a reference to a FCoE interface |
| 314 | * @fcoe: | 431 | * @fcoe: The FCoE interface to be released |
| 315 | */ | 432 | */ |
| 316 | static inline void fcoe_interface_put(struct fcoe_interface *fcoe) | 433 | static inline void fcoe_interface_put(struct fcoe_interface *fcoe) |
| 317 | { | 434 | { |
| @@ -319,15 +436,16 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe) | |||
| 319 | } | 436 | } |
| 320 | 437 | ||
| 321 | /** | 438 | /** |
| 322 | * fcoe_fip_recv - handle a received FIP frame. | 439 | * fcoe_fip_recv() - Handler for received FIP frames |
| 323 | * @skb: the receive skb | 440 | * @skb: The receive skb |
| 324 | * @dev: associated &net_device | 441 | * @netdev: The associated net device |
| 325 | * @ptype: the &packet_type structure which was used to register this handler. | 442 | * @ptype: The packet_type structure which was used to register this handler |
| 326 | * @orig_dev: original receive &net_device, in case @dev is a bond. | 443 | * @orig_dev: The original net_device the the skb was received on. |
| 444 | * (in case dev is a bond) | ||
| 327 | * | 445 | * |
| 328 | * Returns: 0 for success | 446 | * Returns: 0 for success |
| 329 | */ | 447 | */ |
| 330 | static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, | 448 | static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, |
| 331 | struct packet_type *ptype, | 449 | struct packet_type *ptype, |
| 332 | struct net_device *orig_dev) | 450 | struct net_device *orig_dev) |
| 333 | { | 451 | { |
| @@ -339,9 +457,9 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 339 | } | 457 | } |
| 340 | 458 | ||
| 341 | /** | 459 | /** |
| 342 | * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame. | 460 | * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame |
| 343 | * @fip: FCoE controller. | 461 | * @fip: The FCoE controller |
| 344 | * @skb: FIP Packet. | 462 | * @skb: The FIP packet to be sent |
| 345 | */ | 463 | */ |
| 346 | static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | 464 | static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) |
| 347 | { | 465 | { |
| @@ -350,88 +468,101 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 350 | } | 468 | } |
| 351 | 469 | ||
| 352 | /** | 470 | /** |
| 353 | * fcoe_update_src_mac() - Update Ethernet MAC filters. | 471 | * fcoe_update_src_mac() - Update the Ethernet MAC filters |
| 354 | * @fip: FCoE controller. | 472 | * @lport: The local port to update the source MAC on |
| 355 | * @old: Unicast MAC address to delete if the MAC is non-zero. | 473 | * @addr: Unicast MAC address to add |
| 356 | * @new: Unicast MAC address to add. | ||
| 357 | * | 474 | * |
| 358 | * Remove any previously-set unicast MAC filter. | 475 | * Remove any previously-set unicast MAC filter. |
| 359 | * Add secondary FCoE MAC address filter for our OUI. | 476 | * Add secondary FCoE MAC address filter for our OUI. |
| 360 | */ | 477 | */ |
| 361 | static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) | 478 | static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) |
| 362 | { | 479 | { |
| 363 | struct fcoe_interface *fcoe; | 480 | struct fcoe_port *port = lport_priv(lport); |
| 481 | struct fcoe_interface *fcoe = port->fcoe; | ||
| 364 | 482 | ||
| 365 | fcoe = fcoe_from_ctlr(fip); | ||
| 366 | rtnl_lock(); | 483 | rtnl_lock(); |
| 367 | if (!is_zero_ether_addr(old)) | 484 | if (!is_zero_ether_addr(port->data_src_addr)) |
| 368 | dev_unicast_delete(fcoe->netdev, old); | 485 | dev_unicast_delete(fcoe->netdev, port->data_src_addr); |
| 369 | dev_unicast_add(fcoe->netdev, new); | 486 | if (!is_zero_ether_addr(addr)) |
| 487 | dev_unicast_add(fcoe->netdev, addr); | ||
| 488 | memcpy(port->data_src_addr, addr, ETH_ALEN); | ||
| 370 | rtnl_unlock(); | 489 | rtnl_unlock(); |
| 371 | } | 490 | } |
| 372 | 491 | ||
| 373 | /** | 492 | /** |
| 374 | * fcoe_lport_config() - sets up the fc_lport | 493 | * fcoe_get_src_mac() - return the Ethernet source address for an lport |
| 375 | * @lp: ptr to the fc_lport | 494 | * @lport: libfc lport |
| 495 | */ | ||
| 496 | static u8 *fcoe_get_src_mac(struct fc_lport *lport) | ||
| 497 | { | ||
| 498 | struct fcoe_port *port = lport_priv(lport); | ||
| 499 | |||
| 500 | return port->data_src_addr; | ||
| 501 | } | ||
| 502 | |||
| 503 | /** | ||
| 504 | * fcoe_lport_config() - Set up a local port | ||
| 505 | * @lport: The local port to be setup | ||
| 376 | * | 506 | * |
| 377 | * Returns: 0 for success | 507 | * Returns: 0 for success |
| 378 | */ | 508 | */ |
| 379 | static int fcoe_lport_config(struct fc_lport *lp) | 509 | static int fcoe_lport_config(struct fc_lport *lport) |
| 380 | { | 510 | { |
| 381 | lp->link_up = 0; | 511 | lport->link_up = 0; |
| 382 | lp->qfull = 0; | 512 | lport->qfull = 0; |
| 383 | lp->max_retry_count = 3; | 513 | lport->max_retry_count = 3; |
| 384 | lp->max_rport_retry_count = 3; | 514 | lport->max_rport_retry_count = 3; |
| 385 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ | 515 | lport->e_d_tov = 2 * 1000; /* FC-FS default */ |
| 386 | lp->r_a_tov = 2 * 2 * 1000; | 516 | lport->r_a_tov = 2 * 2 * 1000; |
| 387 | lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | | 517 | lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | |
| 388 | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); | 518 | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); |
| 389 | 519 | lport->does_npiv = 1; | |
| 390 | fc_lport_init_stats(lp); | 520 | |
| 521 | fc_lport_init_stats(lport); | ||
| 391 | 522 | ||
| 392 | /* lport fc_lport related configuration */ | 523 | /* lport fc_lport related configuration */ |
| 393 | fc_lport_config(lp); | 524 | fc_lport_config(lport); |
| 394 | 525 | ||
| 395 | /* offload related configuration */ | 526 | /* offload related configuration */ |
| 396 | lp->crc_offload = 0; | 527 | lport->crc_offload = 0; |
| 397 | lp->seq_offload = 0; | 528 | lport->seq_offload = 0; |
| 398 | lp->lro_enabled = 0; | 529 | lport->lro_enabled = 0; |
| 399 | lp->lro_xid = 0; | 530 | lport->lro_xid = 0; |
| 400 | lp->lso_max = 0; | 531 | lport->lso_max = 0; |
| 401 | 532 | ||
| 402 | return 0; | 533 | return 0; |
| 403 | } | 534 | } |
| 404 | 535 | ||
| 405 | /** | 536 | /** |
| 406 | * fcoe_queue_timer() - fcoe queue timer | 537 | * fcoe_queue_timer() - The fcoe queue timer |
| 407 | * @lp: the fc_lport pointer | 538 | * @lport: The local port |
| 408 | * | 539 | * |
| 409 | * Calls fcoe_check_wait_queue on timeout | 540 | * Calls fcoe_check_wait_queue on timeout |
| 410 | * | ||
| 411 | */ | 541 | */ |
| 412 | static void fcoe_queue_timer(ulong lp) | 542 | static void fcoe_queue_timer(ulong lport) |
| 413 | { | 543 | { |
| 414 | fcoe_check_wait_queue((struct fc_lport *)lp, NULL); | 544 | fcoe_check_wait_queue((struct fc_lport *)lport, NULL); |
| 415 | } | 545 | } |
| 416 | 546 | ||
| 417 | /** | 547 | /** |
| 418 | * fcoe_netdev_config() - Set up netdev for SW FCoE | 548 | * fcoe_netdev_config() - Set up net devive for SW FCoE |
| 419 | * @lp : ptr to the fc_lport | 549 | * @lport: The local port that is associated with the net device |
| 420 | * @netdev : ptr to the associated netdevice struct | 550 | * @netdev: The associated net device |
| 421 | * | 551 | * |
| 422 | * Must be called after fcoe_lport_config() as it will use lport mutex | 552 | * Must be called after fcoe_lport_config() as it will use local port mutex |
| 423 | * | 553 | * |
| 424 | * Returns : 0 for success | 554 | * Returns: 0 for success |
| 425 | */ | 555 | */ |
| 426 | static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) | 556 | static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) |
| 427 | { | 557 | { |
| 428 | u32 mfs; | 558 | u32 mfs; |
| 429 | u64 wwnn, wwpn; | 559 | u64 wwnn, wwpn; |
| 430 | struct fcoe_interface *fcoe; | 560 | struct fcoe_interface *fcoe; |
| 431 | struct fcoe_port *port; | 561 | struct fcoe_port *port; |
| 562 | int vid = 0; | ||
| 432 | 563 | ||
| 433 | /* Setup lport private data to point to fcoe softc */ | 564 | /* Setup lport private data to point to fcoe softc */ |
| 434 | port = lport_priv(lp); | 565 | port = lport_priv(lport); |
| 435 | fcoe = port->fcoe; | 566 | fcoe = port->fcoe; |
| 436 | 567 | ||
| 437 | /* | 568 | /* |
| @@ -439,86 +570,112 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
| 439 | * user-configured limit. If the MFS is too low, fcoe_link_ok() | 570 | * user-configured limit. If the MFS is too low, fcoe_link_ok() |
| 440 | * will return 0, so do this first. | 571 | * will return 0, so do this first. |
| 441 | */ | 572 | */ |
| 442 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + | 573 | mfs = netdev->mtu; |
| 443 | sizeof(struct fcoe_crc_eof)); | 574 | if (netdev->features & NETIF_F_FCOE_MTU) { |
| 444 | if (fc_set_mfs(lp, mfs)) | 575 | mfs = FCOE_MTU; |
| 576 | FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); | ||
| 577 | } | ||
| 578 | mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); | ||
| 579 | if (fc_set_mfs(lport, mfs)) | ||
| 445 | return -EINVAL; | 580 | return -EINVAL; |
| 446 | 581 | ||
| 447 | /* offload features support */ | 582 | /* offload features support */ |
| 448 | if (netdev->features & NETIF_F_SG) | 583 | if (netdev->features & NETIF_F_SG) |
| 449 | lp->sg_supp = 1; | 584 | lport->sg_supp = 1; |
| 450 | 585 | ||
| 451 | if (netdev->features & NETIF_F_FCOE_CRC) { | 586 | if (netdev->features & NETIF_F_FCOE_CRC) { |
| 452 | lp->crc_offload = 1; | 587 | lport->crc_offload = 1; |
| 453 | FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); | 588 | FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); |
| 454 | } | 589 | } |
| 455 | if (netdev->features & NETIF_F_FSO) { | 590 | if (netdev->features & NETIF_F_FSO) { |
| 456 | lp->seq_offload = 1; | 591 | lport->seq_offload = 1; |
| 457 | lp->lso_max = netdev->gso_max_size; | 592 | lport->lso_max = netdev->gso_max_size; |
| 458 | FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", | 593 | FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", |
| 459 | lp->lso_max); | 594 | lport->lso_max); |
| 460 | } | 595 | } |
| 461 | if (netdev->fcoe_ddp_xid) { | 596 | if (netdev->fcoe_ddp_xid) { |
| 462 | lp->lro_enabled = 1; | 597 | lport->lro_enabled = 1; |
| 463 | lp->lro_xid = netdev->fcoe_ddp_xid; | 598 | lport->lro_xid = netdev->fcoe_ddp_xid; |
| 464 | FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", | 599 | FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", |
| 465 | lp->lro_xid); | 600 | lport->lro_xid); |
| 466 | } | 601 | } |
| 467 | skb_queue_head_init(&port->fcoe_pending_queue); | 602 | skb_queue_head_init(&port->fcoe_pending_queue); |
| 468 | port->fcoe_pending_queue_active = 0; | 603 | port->fcoe_pending_queue_active = 0; |
| 469 | setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); | 604 | setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); |
| 470 | 605 | ||
| 471 | wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); | 606 | if (!lport->vport) { |
| 472 | fc_set_wwnn(lp, wwnn); | 607 | /* |
| 473 | /* XXX - 3rd arg needs to be vlan id */ | 608 | * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN: |
| 474 | wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); | 609 | * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0. |
| 475 | fc_set_wwpn(lp, wwpn); | 610 | * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID |
| 611 | */ | ||
| 612 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | ||
| 613 | vid = vlan_dev_vlan_id(netdev); | ||
| 614 | wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); | ||
| 615 | fc_set_wwnn(lport, wwnn); | ||
| 616 | wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid); | ||
| 617 | fc_set_wwpn(lport, wwpn); | ||
| 618 | } | ||
| 476 | 619 | ||
| 477 | return 0; | 620 | return 0; |
| 478 | } | 621 | } |
| 479 | 622 | ||
| 480 | /** | 623 | /** |
| 481 | * fcoe_shost_config() - Sets up fc_lport->host | 624 | * fcoe_shost_config() - Set up the SCSI host associated with a local port |
| 482 | * @lp : ptr to the fc_lport | 625 | * @lport: The local port |
| 483 | * @shost : ptr to the associated scsi host | 626 | * @shost: The SCSI host to associate with the local port |
| 484 | * @dev : device associated to scsi host | 627 | * @dev: The device associated with the SCSI host |
| 485 | * | 628 | * |
| 486 | * Must be called after fcoe_lport_config() and fcoe_netdev_config() | 629 | * Must be called after fcoe_lport_config() and fcoe_netdev_config() |
| 487 | * | 630 | * |
| 488 | * Returns : 0 for success | 631 | * Returns: 0 for success |
| 489 | */ | 632 | */ |
| 490 | static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, | 633 | static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost, |
| 491 | struct device *dev) | 634 | struct device *dev) |
| 492 | { | 635 | { |
| 493 | int rc = 0; | 636 | int rc = 0; |
| 494 | 637 | ||
| 495 | /* lport scsi host config */ | 638 | /* lport scsi host config */ |
| 496 | lp->host = shost; | 639 | lport->host->max_lun = FCOE_MAX_LUN; |
| 497 | 640 | lport->host->max_id = FCOE_MAX_FCP_TARGET; | |
| 498 | lp->host->max_lun = FCOE_MAX_LUN; | 641 | lport->host->max_channel = 0; |
| 499 | lp->host->max_id = FCOE_MAX_FCP_TARGET; | 642 | if (lport->vport) |
| 500 | lp->host->max_channel = 0; | 643 | lport->host->transportt = fcoe_vport_transport_template; |
| 501 | lp->host->transportt = scsi_transport_fcoe_sw; | 644 | else |
| 645 | lport->host->transportt = fcoe_transport_template; | ||
| 502 | 646 | ||
| 503 | /* add the new host to the SCSI-ml */ | 647 | /* add the new host to the SCSI-ml */ |
| 504 | rc = scsi_add_host(lp->host, dev); | 648 | rc = scsi_add_host(lport->host, dev); |
| 505 | if (rc) { | 649 | if (rc) { |
| 506 | FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " | 650 | FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " |
| 507 | "error on scsi_add_host\n"); | 651 | "error on scsi_add_host\n"); |
| 508 | return rc; | 652 | return rc; |
| 509 | } | 653 | } |
| 510 | sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", | 654 | |
| 511 | FCOE_NAME, FCOE_VERSION, | 655 | if (!lport->vport) |
| 512 | fcoe_netdev(lp)->name); | 656 | fc_host_max_npiv_vports(lport->host) = USHORT_MAX; |
| 657 | |||
| 658 | snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, | ||
| 659 | "%s v%s over %s", FCOE_NAME, FCOE_VERSION, | ||
| 660 | fcoe_netdev(lport)->name); | ||
| 513 | 661 | ||
| 514 | return 0; | 662 | return 0; |
| 515 | } | 663 | } |
| 516 | 664 | ||
| 517 | /* | 665 | /** |
| 518 | * fcoe_oem_match() - match for read types IO | 666 | * fcoe_oem_match() - The match routine for the offloaded exchange manager |
| 519 | * @fp: the fc_frame for new IO. | 667 | * @fp: The I/O frame |
| 668 | * | ||
| 669 | * This routine will be associated with an exchange manager (EM). When | ||
| 670 | * the libfc exchange handling code is looking for an EM to use it will | ||
| 671 | * call this routine and pass it the frame that it wishes to send. This | ||
| 672 | * routine will return True if the associated EM is to be used and False | ||
| 673 | * if the echange code should continue looking for an EM. | ||
| 520 | * | 674 | * |
| 521 | * Returns : true for read types IO, otherwise returns false. | 675 | * The offload EM that this routine is associated with will handle any |
| 676 | * packets that are for SCSI read requests. | ||
| 677 | * | ||
| 678 | * Returns: True for read types I/O, otherwise returns false. | ||
| 522 | */ | 679 | */ |
| 523 | bool fcoe_oem_match(struct fc_frame *fp) | 680 | bool fcoe_oem_match(struct fc_frame *fp) |
| 524 | { | 681 | { |
| @@ -527,14 +684,14 @@ bool fcoe_oem_match(struct fc_frame *fp) | |||
| 527 | } | 684 | } |
| 528 | 685 | ||
| 529 | /** | 686 | /** |
| 530 | * fcoe_em_config() - allocates em for this lport | 687 | * fcoe_em_config() - Allocate and configure an exchange manager |
| 531 | * @lp: the fcoe that em is to allocated for | 688 | * @lport: The local port that the new EM will be associated with |
| 532 | * | 689 | * |
| 533 | * Returns : 0 on success | 690 | * Returns: 0 on success |
| 534 | */ | 691 | */ |
| 535 | static inline int fcoe_em_config(struct fc_lport *lp) | 692 | static inline int fcoe_em_config(struct fc_lport *lport) |
| 536 | { | 693 | { |
| 537 | struct fcoe_port *port = lport_priv(lp); | 694 | struct fcoe_port *port = lport_priv(lport); |
| 538 | struct fcoe_interface *fcoe = port->fcoe; | 695 | struct fcoe_interface *fcoe = port->fcoe; |
| 539 | struct fcoe_interface *oldfcoe = NULL; | 696 | struct fcoe_interface *oldfcoe = NULL; |
| 540 | struct net_device *old_real_dev, *cur_real_dev; | 697 | struct net_device *old_real_dev, *cur_real_dev; |
| @@ -545,8 +702,9 @@ static inline int fcoe_em_config(struct fc_lport *lp) | |||
| 545 | * Check if need to allocate an em instance for | 702 | * Check if need to allocate an em instance for |
| 546 | * offload exchange ids to be shared across all VN_PORTs/lport. | 703 | * offload exchange ids to be shared across all VN_PORTs/lport. |
| 547 | */ | 704 | */ |
| 548 | if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) { | 705 | if (!lport->lro_enabled || !lport->lro_xid || |
| 549 | lp->lro_xid = 0; | 706 | (lport->lro_xid >= max_xid)) { |
| 707 | lport->lro_xid = 0; | ||
| 550 | goto skip_oem; | 708 | goto skip_oem; |
| 551 | } | 709 | } |
| 552 | 710 | ||
| @@ -572,16 +730,16 @@ static inline int fcoe_em_config(struct fc_lport *lp) | |||
| 572 | } | 730 | } |
| 573 | 731 | ||
| 574 | if (fcoe->oem) { | 732 | if (fcoe->oem) { |
| 575 | if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) { | 733 | if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { |
| 576 | printk(KERN_ERR "fcoe_em_config: failed to add " | 734 | printk(KERN_ERR "fcoe_em_config: failed to add " |
| 577 | "offload em:%p on interface:%s\n", | 735 | "offload em:%p on interface:%s\n", |
| 578 | fcoe->oem, fcoe->netdev->name); | 736 | fcoe->oem, fcoe->netdev->name); |
| 579 | return -ENOMEM; | 737 | return -ENOMEM; |
| 580 | } | 738 | } |
| 581 | } else { | 739 | } else { |
| 582 | fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, | 740 | fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, |
| 583 | FCOE_MIN_XID, lp->lro_xid, | 741 | FCOE_MIN_XID, lport->lro_xid, |
| 584 | fcoe_oem_match); | 742 | fcoe_oem_match); |
| 585 | if (!fcoe->oem) { | 743 | if (!fcoe->oem) { |
| 586 | printk(KERN_ERR "fcoe_em_config: failed to allocate " | 744 | printk(KERN_ERR "fcoe_em_config: failed to allocate " |
| 587 | "em for offload exches on interface:%s\n", | 745 | "em for offload exches on interface:%s\n", |
| @@ -593,10 +751,10 @@ static inline int fcoe_em_config(struct fc_lport *lp) | |||
| 593 | /* | 751 | /* |
| 594 | * Exclude offload EM xid range from next EM xid range. | 752 | * Exclude offload EM xid range from next EM xid range. |
| 595 | */ | 753 | */ |
| 596 | min_xid += lp->lro_xid + 1; | 754 | min_xid += lport->lro_xid + 1; |
| 597 | 755 | ||
| 598 | skip_oem: | 756 | skip_oem: |
| 599 | if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { | 757 | if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { |
| 600 | printk(KERN_ERR "fcoe_em_config: failed to " | 758 | printk(KERN_ERR "fcoe_em_config: failed to " |
| 601 | "allocate em on interface %s\n", fcoe->netdev->name); | 759 | "allocate em on interface %s\n", fcoe->netdev->name); |
| 602 | return -ENOMEM; | 760 | return -ENOMEM; |
| @@ -606,8 +764,8 @@ skip_oem: | |||
| 606 | } | 764 | } |
| 607 | 765 | ||
| 608 | /** | 766 | /** |
| 609 | * fcoe_if_destroy() - FCoE software HBA tear-down function | 767 | * fcoe_if_destroy() - Tear down a SW FCoE instance |
| 610 | * @lport: fc_lport to destroy | 768 | * @lport: The local port to be destroyed |
| 611 | */ | 769 | */ |
| 612 | static void fcoe_if_destroy(struct fc_lport *lport) | 770 | static void fcoe_if_destroy(struct fc_lport *lport) |
| 613 | { | 771 | { |
| @@ -630,6 +788,11 @@ static void fcoe_if_destroy(struct fc_lport *lport) | |||
| 630 | /* Free existing transmit skbs */ | 788 | /* Free existing transmit skbs */ |
| 631 | fcoe_clean_pending_queue(lport); | 789 | fcoe_clean_pending_queue(lport); |
| 632 | 790 | ||
| 791 | rtnl_lock(); | ||
| 792 | if (!is_zero_ether_addr(port->data_src_addr)) | ||
| 793 | dev_unicast_delete(netdev, port->data_src_addr); | ||
| 794 | rtnl_unlock(); | ||
| 795 | |||
| 633 | /* receives may not be stopped until after this */ | 796 | /* receives may not be stopped until after this */ |
| 634 | fcoe_interface_put(fcoe); | 797 | fcoe_interface_put(fcoe); |
| 635 | 798 | ||
| @@ -650,82 +813,89 @@ static void fcoe_if_destroy(struct fc_lport *lport) | |||
| 650 | scsi_host_put(lport->host); | 813 | scsi_host_put(lport->host); |
| 651 | } | 814 | } |
| 652 | 815 | ||
| 653 | /* | 816 | /** |
| 654 | * fcoe_ddp_setup - calls LLD's ddp_setup through net_device | 817 | * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device |
| 655 | * @lp: the corresponding fc_lport | 818 | * @lport: The local port to setup DDP for |
| 656 | * @xid: the exchange id for this ddp transfer | 819 | * @xid: The exchange ID for this DDP transfer |
| 657 | * @sgl: the scatterlist describing this transfer | 820 | * @sgl: The scatterlist describing this transfer |
| 658 | * @sgc: number of sg items | 821 | * @sgc: The number of sg items |
| 659 | * | 822 | * |
| 660 | * Returns : 0 no ddp | 823 | * Returns: 0 if the DDP context was not configured |
| 661 | */ | 824 | */ |
| 662 | static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, | 825 | static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, |
| 663 | struct scatterlist *sgl, unsigned int sgc) | 826 | struct scatterlist *sgl, unsigned int sgc) |
| 664 | { | 827 | { |
| 665 | struct net_device *n = fcoe_netdev(lp); | 828 | struct net_device *netdev = fcoe_netdev(lport); |
| 666 | 829 | ||
| 667 | if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup) | 830 | if (netdev->netdev_ops->ndo_fcoe_ddp_setup) |
| 668 | return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); | 831 | return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, |
| 832 | xid, sgl, | ||
| 833 | sgc); | ||
| 669 | 834 | ||
| 670 | return 0; | 835 | return 0; |
| 671 | } | 836 | } |
| 672 | 837 | ||
| 673 | /* | 838 | /** |
| 674 | * fcoe_ddp_done - calls LLD's ddp_done through net_device | 839 | * fcoe_ddp_done() - Call a LLD's ddp_done through the net device |
| 675 | * @lp: the corresponding fc_lport | 840 | * @lport: The local port to complete DDP on |
| 676 | * @xid: the exchange id for this ddp transfer | 841 | * @xid: The exchange ID for this DDP transfer |
| 677 | * | 842 | * |
| 678 | * Returns : the length of data that have been completed by ddp | 843 | * Returns: the length of data that have been completed by DDP |
| 679 | */ | 844 | */ |
| 680 | static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) | 845 | static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) |
| 681 | { | 846 | { |
| 682 | struct net_device *n = fcoe_netdev(lp); | 847 | struct net_device *netdev = fcoe_netdev(lport); |
| 683 | 848 | ||
| 684 | if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done) | 849 | if (netdev->netdev_ops->ndo_fcoe_ddp_done) |
| 685 | return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); | 850 | return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); |
| 686 | return 0; | 851 | return 0; |
| 687 | } | 852 | } |
| 688 | 853 | ||
| 689 | static struct libfc_function_template fcoe_libfc_fcn_templ = { | ||
| 690 | .frame_send = fcoe_xmit, | ||
| 691 | .ddp_setup = fcoe_ddp_setup, | ||
| 692 | .ddp_done = fcoe_ddp_done, | ||
| 693 | }; | ||
| 694 | |||
| 695 | /** | 854 | /** |
| 696 | * fcoe_if_create() - this function creates the fcoe port | 855 | * fcoe_if_create() - Create a FCoE instance on an interface |
| 697 | * @fcoe: fcoe_interface structure to create an fc_lport instance on | 856 | * @fcoe: The FCoE interface to create a local port on |
| 698 | * @parent: device pointer to be the parent in sysfs for the SCSI host | 857 | * @parent: The device pointer to be the parent in sysfs for the SCSI host |
| 858 | * @npiv: Indicates if the port is a vport or not | ||
| 699 | * | 859 | * |
| 700 | * Creates fc_lport struct and scsi_host for lport, configures lport. | 860 | * Creates a fc_lport instance and a Scsi_Host instance and configure them. |
| 701 | * | 861 | * |
| 702 | * Returns : The allocated fc_lport or an error pointer | 862 | * Returns: The allocated fc_lport or an error pointer |
| 703 | */ | 863 | */ |
| 704 | static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, | 864 | static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, |
| 705 | struct device *parent) | 865 | struct device *parent, int npiv) |
| 706 | { | 866 | { |
| 707 | int rc; | 867 | struct net_device *netdev = fcoe->netdev; |
| 708 | struct fc_lport *lport = NULL; | 868 | struct fc_lport *lport = NULL; |
| 709 | struct fcoe_port *port; | 869 | struct fcoe_port *port; |
| 710 | struct Scsi_Host *shost; | 870 | struct Scsi_Host *shost; |
| 711 | struct net_device *netdev = fcoe->netdev; | 871 | int rc; |
| 872 | /* | ||
| 873 | * parent is only a vport if npiv is 1, | ||
| 874 | * but we'll only use vport in that case so go ahead and set it | ||
| 875 | */ | ||
| 876 | struct fc_vport *vport = dev_to_vport(parent); | ||
| 712 | 877 | ||
| 713 | FCOE_NETDEV_DBG(netdev, "Create Interface\n"); | 878 | FCOE_NETDEV_DBG(netdev, "Create Interface\n"); |
| 714 | 879 | ||
| 715 | shost = libfc_host_alloc(&fcoe_shost_template, | 880 | if (!npiv) { |
| 716 | sizeof(struct fcoe_port)); | 881 | lport = libfc_host_alloc(&fcoe_shost_template, |
| 717 | if (!shost) { | 882 | sizeof(struct fcoe_port)); |
| 883 | } else { | ||
| 884 | lport = libfc_vport_create(vport, | ||
| 885 | sizeof(struct fcoe_port)); | ||
| 886 | } | ||
| 887 | if (!lport) { | ||
| 718 | FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); | 888 | FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); |
| 719 | rc = -ENOMEM; | 889 | rc = -ENOMEM; |
| 720 | goto out; | 890 | goto out; |
| 721 | } | 891 | } |
| 722 | lport = shost_priv(shost); | 892 | shost = lport->host; |
| 723 | port = lport_priv(lport); | 893 | port = lport_priv(lport); |
| 724 | port->lport = lport; | 894 | port->lport = lport; |
| 725 | port->fcoe = fcoe; | 895 | port->fcoe = fcoe; |
| 726 | INIT_WORK(&port->destroy_work, fcoe_destroy_work); | 896 | INIT_WORK(&port->destroy_work, fcoe_destroy_work); |
| 727 | 897 | ||
| 728 | /* configure fc_lport, e.g., em */ | 898 | /* configure a fc_lport including the exchange manager */ |
| 729 | rc = fcoe_lport_config(lport); | 899 | rc = fcoe_lport_config(lport); |
| 730 | if (rc) { | 900 | if (rc) { |
| 731 | FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " | 901 | FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " |
| @@ -733,6 +903,13 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, | |||
| 733 | goto out_host_put; | 903 | goto out_host_put; |
| 734 | } | 904 | } |
| 735 | 905 | ||
| 906 | if (npiv) { | ||
| 907 | FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n", | ||
| 908 | vport->node_name, vport->port_name); | ||
| 909 | fc_set_wwnn(lport, vport->node_name); | ||
| 910 | fc_set_wwpn(lport, vport->port_name); | ||
| 911 | } | ||
| 912 | |||
| 736 | /* configure lport network properties */ | 913 | /* configure lport network properties */ |
| 737 | rc = fcoe_netdev_config(lport, netdev); | 914 | rc = fcoe_netdev_config(lport, netdev); |
| 738 | if (rc) { | 915 | if (rc) { |
| @@ -757,21 +934,24 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, | |||
| 757 | goto out_lp_destroy; | 934 | goto out_lp_destroy; |
| 758 | } | 935 | } |
| 759 | 936 | ||
| 760 | /* | 937 | if (!npiv) { |
| 761 | * fcoe_em_alloc() and fcoe_hostlist_add() both | 938 | /* |
| 762 | * need to be atomic with respect to other changes to the hostlist | 939 | * fcoe_em_alloc() and fcoe_hostlist_add() both |
| 763 | * since fcoe_em_alloc() looks for an existing EM | 940 | * need to be atomic with respect to other changes to the |
| 764 | * instance on host list updated by fcoe_hostlist_add(). | 941 | * hostlist since fcoe_em_alloc() looks for an existing EM |
| 765 | * | 942 | * instance on host list updated by fcoe_hostlist_add(). |
| 766 | * This is currently handled through the fcoe_config_mutex begin held. | 943 | * |
| 767 | */ | 944 | * This is currently handled through the fcoe_config_mutex |
| 945 | * begin held. | ||
| 946 | */ | ||
| 768 | 947 | ||
| 769 | /* lport exch manager allocation */ | 948 | /* lport exch manager allocation */ |
| 770 | rc = fcoe_em_config(lport); | 949 | rc = fcoe_em_config(lport); |
| 771 | if (rc) { | 950 | if (rc) { |
| 772 | FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " | 951 | FCOE_NETDEV_DBG(netdev, "Could not configure the EM " |
| 773 | "interface\n"); | 952 | "for the interface\n"); |
| 774 | goto out_lp_destroy; | 953 | goto out_lp_destroy; |
| 954 | } | ||
| 775 | } | 955 | } |
| 776 | 956 | ||
| 777 | fcoe_interface_get(fcoe); | 957 | fcoe_interface_get(fcoe); |
| @@ -786,17 +966,20 @@ out: | |||
| 786 | } | 966 | } |
| 787 | 967 | ||
| 788 | /** | 968 | /** |
| 789 | * fcoe_if_init() - attach to scsi transport | 969 | * fcoe_if_init() - Initialization routine for fcoe.ko |
| 970 | * | ||
| 971 | * Attaches the SW FCoE transport to the FC transport | ||
| 790 | * | 972 | * |
| 791 | * Returns : 0 on success | 973 | * Returns: 0 on success |
| 792 | */ | 974 | */ |
| 793 | static int __init fcoe_if_init(void) | 975 | static int __init fcoe_if_init(void) |
| 794 | { | 976 | { |
| 795 | /* attach to scsi transport */ | 977 | /* attach to scsi transport */ |
| 796 | scsi_transport_fcoe_sw = | 978 | fcoe_transport_template = fc_attach_transport(&fcoe_transport_function); |
| 797 | fc_attach_transport(&fcoe_transport_function); | 979 | fcoe_vport_transport_template = |
| 980 | fc_attach_transport(&fcoe_vport_transport_function); | ||
| 798 | 981 | ||
| 799 | if (!scsi_transport_fcoe_sw) { | 982 | if (!fcoe_transport_template) { |
| 800 | printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); | 983 | printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); |
| 801 | return -ENODEV; | 984 | return -ENODEV; |
| 802 | } | 985 | } |
| @@ -805,20 +988,24 @@ static int __init fcoe_if_init(void) | |||
| 805 | } | 988 | } |
| 806 | 989 | ||
| 807 | /** | 990 | /** |
| 808 | * fcoe_if_exit() - detach from scsi transport | 991 | * fcoe_if_exit() - Tear down fcoe.ko |
| 992 | * | ||
| 993 | * Detaches the SW FCoE transport from the FC transport | ||
| 809 | * | 994 | * |
| 810 | * Returns : 0 on success | 995 | * Returns: 0 on success |
| 811 | */ | 996 | */ |
| 812 | int __exit fcoe_if_exit(void) | 997 | int __exit fcoe_if_exit(void) |
| 813 | { | 998 | { |
| 814 | fc_release_transport(scsi_transport_fcoe_sw); | 999 | fc_release_transport(fcoe_transport_template); |
| 815 | scsi_transport_fcoe_sw = NULL; | 1000 | fc_release_transport(fcoe_vport_transport_template); |
| 1001 | fcoe_transport_template = NULL; | ||
| 1002 | fcoe_vport_transport_template = NULL; | ||
| 816 | return 0; | 1003 | return 0; |
| 817 | } | 1004 | } |
| 818 | 1005 | ||
| 819 | /** | 1006 | /** |
| 820 | * fcoe_percpu_thread_create() - Create a receive thread for an online cpu | 1007 | * fcoe_percpu_thread_create() - Create a receive thread for an online CPU |
| 821 | * @cpu: cpu index for the online cpu | 1008 | * @cpu: The CPU index of the CPU to create a receive thread for |
| 822 | */ | 1009 | */ |
| 823 | static void fcoe_percpu_thread_create(unsigned int cpu) | 1010 | static void fcoe_percpu_thread_create(unsigned int cpu) |
| 824 | { | 1011 | { |
| @@ -841,8 +1028,8 @@ static void fcoe_percpu_thread_create(unsigned int cpu) | |||
| 841 | } | 1028 | } |
| 842 | 1029 | ||
| 843 | /** | 1030 | /** |
| 844 | * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu | 1031 | * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU |
| 845 | * @cpu: cpu index the rx thread is to be removed | 1032 | * @cpu: The CPU index of the CPU whose receive thread is to be destroyed |
| 846 | * | 1033 | * |
| 847 | * Destroys a per-CPU Rx thread. Any pending skbs are moved to the | 1034 | * Destroys a per-CPU Rx thread. Any pending skbs are moved to the |
| 848 | * current CPU's Rx thread. If the thread being destroyed is bound to | 1035 | * current CPU's Rx thread. If the thread being destroyed is bound to |
| @@ -890,7 +1077,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
| 890 | } else { | 1077 | } else { |
| 891 | /* | 1078 | /* |
| 892 | * The targeted CPU is not initialized and cannot accept | 1079 | * The targeted CPU is not initialized and cannot accept |
| 893 | * new skbs. Unlock the targeted CPU and drop the skbs | 1080 | * new skbs. Unlock the targeted CPU and drop the skbs |
| 894 | * on the CPU that is going offline. | 1081 | * on the CPU that is going offline. |
| 895 | */ | 1082 | */ |
| 896 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | 1083 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) |
| @@ -931,12 +1118,12 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
| 931 | } | 1118 | } |
| 932 | 1119 | ||
| 933 | /** | 1120 | /** |
| 934 | * fcoe_cpu_callback() - fcoe cpu hotplug event callback | 1121 | * fcoe_cpu_callback() - Handler for CPU hotplug events |
| 935 | * @nfb: callback data block | 1122 | * @nfb: The callback data block |
| 936 | * @action: event triggering the callback | 1123 | * @action: The event triggering the callback |
| 937 | * @hcpu: index for the cpu of this event | 1124 | * @hcpu: The index of the CPU that the event is for |
| 938 | * | 1125 | * |
| 939 | * This creates or destroys per cpu data for fcoe | 1126 | * This creates or destroys per-CPU data for fcoe |
| 940 | * | 1127 | * |
| 941 | * Returns NOTIFY_OK always. | 1128 | * Returns NOTIFY_OK always. |
| 942 | */ | 1129 | */ |
| @@ -962,25 +1149,22 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, | |||
| 962 | return NOTIFY_OK; | 1149 | return NOTIFY_OK; |
| 963 | } | 1150 | } |
| 964 | 1151 | ||
| 965 | static struct notifier_block fcoe_cpu_notifier = { | ||
| 966 | .notifier_call = fcoe_cpu_callback, | ||
| 967 | }; | ||
| 968 | |||
| 969 | /** | 1152 | /** |
| 970 | * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ | 1153 | * fcoe_rcv() - Receive packets from a net device |
| 971 | * @skb: the receive skb | 1154 | * @skb: The received packet |
| 972 | * @dev: associated net device | 1155 | * @netdev: The net device that the packet was received on |
| 973 | * @ptype: context | 1156 | * @ptype: The packet type context |
| 974 | * @olddev: last device | 1157 | * @olddev: The last device net device |
| 975 | * | 1158 | * |
| 976 | * this function will receive the packet and build fc frame and pass it up | 1159 | * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a |
| 1160 | * FC frame and passes the frame to libfc. | ||
| 977 | * | 1161 | * |
| 978 | * Returns: 0 for success | 1162 | * Returns: 0 for success |
| 979 | */ | 1163 | */ |
| 980 | int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | 1164 | int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, |
| 981 | struct packet_type *ptype, struct net_device *olddev) | 1165 | struct packet_type *ptype, struct net_device *olddev) |
| 982 | { | 1166 | { |
| 983 | struct fc_lport *lp; | 1167 | struct fc_lport *lport; |
| 984 | struct fcoe_rcv_info *fr; | 1168 | struct fcoe_rcv_info *fr; |
| 985 | struct fcoe_interface *fcoe; | 1169 | struct fcoe_interface *fcoe; |
| 986 | struct fc_frame_header *fh; | 1170 | struct fc_frame_header *fh; |
| @@ -988,15 +1172,15 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 988 | unsigned int cpu; | 1172 | unsigned int cpu; |
| 989 | 1173 | ||
| 990 | fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); | 1174 | fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); |
| 991 | lp = fcoe->ctlr.lp; | 1175 | lport = fcoe->ctlr.lp; |
| 992 | if (unlikely(lp == NULL)) { | 1176 | if (unlikely(!lport)) { |
| 993 | FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); | 1177 | FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); |
| 994 | goto err2; | 1178 | goto err2; |
| 995 | } | 1179 | } |
| 996 | if (!lp->link_up) | 1180 | if (!lport->link_up) |
| 997 | goto err2; | 1181 | goto err2; |
| 998 | 1182 | ||
| 999 | FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " | 1183 | FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " |
| 1000 | "data:%p tail:%p end:%p sum:%d dev:%s", | 1184 | "data:%p tail:%p end:%p sum:%d dev:%s", |
| 1001 | skb->len, skb->data_len, skb->head, skb->data, | 1185 | skb->len, skb->data_len, skb->head, skb->data, |
| 1002 | skb_tail_pointer(skb), skb_end_pointer(skb), | 1186 | skb_tail_pointer(skb), skb_end_pointer(skb), |
| @@ -1004,7 +1188,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1004 | 1188 | ||
| 1005 | /* check for FCOE packet type */ | 1189 | /* check for FCOE packet type */ |
| 1006 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { | 1190 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { |
| 1007 | FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); | 1191 | FCOE_NETDEV_DBG(netdev, "Wrong FC type frame"); |
| 1008 | goto err; | 1192 | goto err; |
| 1009 | } | 1193 | } |
| 1010 | 1194 | ||
| @@ -1013,14 +1197,14 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1013 | * and FC headers are pulled into the linear data area. | 1197 | * and FC headers are pulled into the linear data area. |
| 1014 | */ | 1198 | */ |
| 1015 | if (unlikely((skb->len < FCOE_MIN_FRAME) || | 1199 | if (unlikely((skb->len < FCOE_MIN_FRAME) || |
| 1016 | !pskb_may_pull(skb, FCOE_HEADER_LEN))) | 1200 | !pskb_may_pull(skb, FCOE_HEADER_LEN))) |
| 1017 | goto err; | 1201 | goto err; |
| 1018 | 1202 | ||
| 1019 | skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); | 1203 | skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); |
| 1020 | fh = (struct fc_frame_header *) skb_transport_header(skb); | 1204 | fh = (struct fc_frame_header *) skb_transport_header(skb); |
| 1021 | 1205 | ||
| 1022 | fr = fcoe_dev_from_skb(skb); | 1206 | fr = fcoe_dev_from_skb(skb); |
| 1023 | fr->fr_dev = lp; | 1207 | fr->fr_dev = lport; |
| 1024 | fr->ptype = ptype; | 1208 | fr->ptype = ptype; |
| 1025 | 1209 | ||
| 1026 | /* | 1210 | /* |
| @@ -1042,7 +1226,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1042 | * the first CPU now. For non-SMP systems this | 1226 | * the first CPU now. For non-SMP systems this |
| 1043 | * will check the same CPU twice. | 1227 | * will check the same CPU twice. |
| 1044 | */ | 1228 | */ |
| 1045 | FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " | 1229 | FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " |
| 1046 | "ready for incoming skb- using first online " | 1230 | "ready for incoming skb- using first online " |
| 1047 | "CPU.\n"); | 1231 | "CPU.\n"); |
| 1048 | 1232 | ||
| @@ -1061,15 +1245,29 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1061 | * this skb. We also have this receive thread locked, | 1245 | * this skb. We also have this receive thread locked, |
| 1062 | * so we're free to queue skbs into it's queue. | 1246 | * so we're free to queue skbs into it's queue. |
| 1063 | */ | 1247 | */ |
| 1064 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | ||
| 1065 | if (fps->fcoe_rx_list.qlen == 1) | ||
| 1066 | wake_up_process(fps->thread); | ||
| 1067 | 1248 | ||
| 1068 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1249 | /* If this is a SCSI-FCP frame, and this is already executing on the |
| 1250 | * correct CPU, and the queue for this CPU is empty, then go ahead | ||
| 1251 | * and process the frame directly in the softirq context. | ||
| 1252 | * This lets us process completions without context switching from the | ||
| 1253 | * NET_RX softirq, to our receive processing thread, and then back to | ||
| 1254 | * BLOCK softirq context. | ||
| 1255 | */ | ||
| 1256 | if (fh->fh_type == FC_TYPE_FCP && | ||
| 1257 | cpu == smp_processor_id() && | ||
| 1258 | skb_queue_empty(&fps->fcoe_rx_list)) { | ||
| 1259 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | ||
| 1260 | fcoe_recv_frame(skb); | ||
| 1261 | } else { | ||
| 1262 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | ||
| 1263 | if (fps->fcoe_rx_list.qlen == 1) | ||
| 1264 | wake_up_process(fps->thread); | ||
| 1265 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | ||
| 1266 | } | ||
| 1069 | 1267 | ||
| 1070 | return 0; | 1268 | return 0; |
| 1071 | err: | 1269 | err: |
| 1072 | fc_lport_get_stats(lp)->ErrorFrames++; | 1270 | fc_lport_get_stats(lport)->ErrorFrames++; |
| 1073 | 1271 | ||
| 1074 | err2: | 1272 | err2: |
| 1075 | kfree_skb(skb); | 1273 | kfree_skb(skb); |
| @@ -1077,17 +1275,21 @@ err2: | |||
| 1077 | } | 1275 | } |
| 1078 | 1276 | ||
| 1079 | /** | 1277 | /** |
| 1080 | * fcoe_start_io() - pass to netdev to start xmit for fcoe | 1278 | * fcoe_start_io() - Start FCoE I/O |
| 1081 | * @skb: the skb to be xmitted | 1279 | * @skb: The packet to be transmitted |
| 1280 | * | ||
| 1281 | * This routine is called from the net device to start transmitting | ||
| 1282 | * FCoE packets. | ||
| 1082 | * | 1283 | * |
| 1083 | * Returns: 0 for success | 1284 | * Returns: 0 for success |
| 1084 | */ | 1285 | */ |
| 1085 | static inline int fcoe_start_io(struct sk_buff *skb) | 1286 | static inline int fcoe_start_io(struct sk_buff *skb) |
| 1086 | { | 1287 | { |
| 1288 | struct sk_buff *nskb; | ||
| 1087 | int rc; | 1289 | int rc; |
| 1088 | 1290 | ||
| 1089 | skb_get(skb); | 1291 | nskb = skb_clone(skb, GFP_ATOMIC); |
| 1090 | rc = dev_queue_xmit(skb); | 1292 | rc = dev_queue_xmit(nskb); |
| 1091 | if (rc != 0) | 1293 | if (rc != 0) |
| 1092 | return rc; | 1294 | return rc; |
| 1093 | kfree_skb(skb); | 1295 | kfree_skb(skb); |
| @@ -1095,9 +1297,15 @@ static inline int fcoe_start_io(struct sk_buff *skb) | |||
| 1095 | } | 1297 | } |
| 1096 | 1298 | ||
| 1097 | /** | 1299 | /** |
| 1098 | * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof | 1300 | * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC |
| 1099 | * @skb: the skb to be xmitted | 1301 | * @skb: The packet to be transmitted |
| 1100 | * @tlen: total len | 1302 | * @tlen: The total length of the trailer |
| 1303 | * | ||
| 1304 | * This routine allocates a page for frame trailers. The page is re-used if | ||
| 1305 | * there is enough room left on it for the current trailer. If there isn't | ||
| 1306 | * enough buffer left a new page is allocated for the trailer. Reference to | ||
| 1307 | * the page from this function as well as the skbs using the page fragments | ||
| 1308 | * ensure that the page is freed at the appropriate time. | ||
| 1101 | * | 1309 | * |
| 1102 | * Returns: 0 for success | 1310 | * Returns: 0 for success |
| 1103 | */ | 1311 | */ |
| @@ -1136,11 +1344,12 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) | |||
| 1136 | } | 1344 | } |
| 1137 | 1345 | ||
| 1138 | /** | 1346 | /** |
| 1139 | * fcoe_fc_crc() - calculates FC CRC in this fcoe skb | 1347 | * fcoe_fc_crc() - Calculates the CRC for a given frame |
| 1140 | * @fp: the fc_frame containing data to be checksummed | 1348 | * @fp: The frame to be checksumed |
| 1349 | * | ||
| 1350 | * This uses crc32() routine to calculate the CRC for a frame | ||
| 1141 | * | 1351 | * |
| 1142 | * This uses crc32() to calculate the crc for port frame | 1352 | * Return: The 32 bit CRC value |
| 1143 | * Return : 32 bit crc | ||
| 1144 | */ | 1353 | */ |
| 1145 | u32 fcoe_fc_crc(struct fc_frame *fp) | 1354 | u32 fcoe_fc_crc(struct fc_frame *fp) |
| 1146 | { | 1355 | { |
| @@ -1171,13 +1380,13 @@ u32 fcoe_fc_crc(struct fc_frame *fp) | |||
| 1171 | } | 1380 | } |
| 1172 | 1381 | ||
| 1173 | /** | 1382 | /** |
| 1174 | * fcoe_xmit() - FCoE frame transmit function | 1383 | * fcoe_xmit() - Transmit a FCoE frame |
| 1175 | * @lp: the associated local fcoe | 1384 | * @lport: The local port that the frame is to be transmitted for |
| 1176 | * @fp: the fc_frame to be transmitted | 1385 | * @fp: The frame to be transmitted |
| 1177 | * | 1386 | * |
| 1178 | * Return : 0 for success | 1387 | * Return: 0 for success |
| 1179 | */ | 1388 | */ |
| 1180 | int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | 1389 | int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) |
| 1181 | { | 1390 | { |
| 1182 | int wlen; | 1391 | int wlen; |
| 1183 | u32 crc; | 1392 | u32 crc; |
| @@ -1189,7 +1398,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1189 | unsigned int hlen; /* header length implies the version */ | 1398 | unsigned int hlen; /* header length implies the version */ |
| 1190 | unsigned int tlen; /* trailer length */ | 1399 | unsigned int tlen; /* trailer length */ |
| 1191 | unsigned int elen; /* eth header, may include vlan */ | 1400 | unsigned int elen; /* eth header, may include vlan */ |
| 1192 | struct fcoe_port *port = lport_priv(lp); | 1401 | struct fcoe_port *port = lport_priv(lport); |
| 1193 | struct fcoe_interface *fcoe = port->fcoe; | 1402 | struct fcoe_interface *fcoe = port->fcoe; |
| 1194 | u8 sof, eof; | 1403 | u8 sof, eof; |
| 1195 | struct fcoe_hdr *hp; | 1404 | struct fcoe_hdr *hp; |
| @@ -1200,13 +1409,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1200 | skb = fp_skb(fp); | 1409 | skb = fp_skb(fp); |
| 1201 | wlen = skb->len / FCOE_WORD_TO_BYTE; | 1410 | wlen = skb->len / FCOE_WORD_TO_BYTE; |
| 1202 | 1411 | ||
| 1203 | if (!lp->link_up) { | 1412 | if (!lport->link_up) { |
| 1204 | kfree_skb(skb); | 1413 | kfree_skb(skb); |
| 1205 | return 0; | 1414 | return 0; |
| 1206 | } | 1415 | } |
| 1207 | 1416 | ||
| 1208 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && | 1417 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && |
| 1209 | fcoe_ctlr_els_send(&fcoe->ctlr, skb)) | 1418 | fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) |
| 1210 | return 0; | 1419 | return 0; |
| 1211 | 1420 | ||
| 1212 | sof = fr_sof(fp); | 1421 | sof = fr_sof(fp); |
| @@ -1218,7 +1427,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1218 | wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; | 1427 | wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; |
| 1219 | 1428 | ||
| 1220 | /* crc offload */ | 1429 | /* crc offload */ |
| 1221 | if (likely(lp->crc_offload)) { | 1430 | if (likely(lport->crc_offload)) { |
| 1222 | skb->ip_summed = CHECKSUM_PARTIAL; | 1431 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 1223 | skb->csum_start = skb_headroom(skb); | 1432 | skb->csum_start = skb_headroom(skb); |
| 1224 | skb->csum_offset = skb->len; | 1433 | skb->csum_offset = skb->len; |
| @@ -1271,7 +1480,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1271 | if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) | 1480 | if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) |
| 1272 | memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); | 1481 | memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); |
| 1273 | else | 1482 | else |
| 1274 | memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN); | 1483 | memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); |
| 1275 | 1484 | ||
| 1276 | hp = (struct fcoe_hdr *)(eh + 1); | 1485 | hp = (struct fcoe_hdr *)(eh + 1); |
| 1277 | memset(hp, 0, sizeof(*hp)); | 1486 | memset(hp, 0, sizeof(*hp)); |
| @@ -1280,7 +1489,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1280 | hp->fcoe_sof = sof; | 1489 | hp->fcoe_sof = sof; |
| 1281 | 1490 | ||
| 1282 | /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ | 1491 | /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ |
| 1283 | if (lp->seq_offload && fr_max_payload(fp)) { | 1492 | if (lport->seq_offload && fr_max_payload(fp)) { |
| 1284 | skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; | 1493 | skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; |
| 1285 | skb_shinfo(skb)->gso_size = fr_max_payload(fp); | 1494 | skb_shinfo(skb)->gso_size = fr_max_payload(fp); |
| 1286 | } else { | 1495 | } else { |
| @@ -1288,23 +1497,23 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1288 | skb_shinfo(skb)->gso_size = 0; | 1497 | skb_shinfo(skb)->gso_size = 0; |
| 1289 | } | 1498 | } |
| 1290 | /* update tx stats: regardless if LLD fails */ | 1499 | /* update tx stats: regardless if LLD fails */ |
| 1291 | stats = fc_lport_get_stats(lp); | 1500 | stats = fc_lport_get_stats(lport); |
| 1292 | stats->TxFrames++; | 1501 | stats->TxFrames++; |
| 1293 | stats->TxWords += wlen; | 1502 | stats->TxWords += wlen; |
| 1294 | 1503 | ||
| 1295 | /* send down to lld */ | 1504 | /* send down to lld */ |
| 1296 | fr_dev(fp) = lp; | 1505 | fr_dev(fp) = lport; |
| 1297 | if (port->fcoe_pending_queue.qlen) | 1506 | if (port->fcoe_pending_queue.qlen) |
| 1298 | fcoe_check_wait_queue(lp, skb); | 1507 | fcoe_check_wait_queue(lport, skb); |
| 1299 | else if (fcoe_start_io(skb)) | 1508 | else if (fcoe_start_io(skb)) |
| 1300 | fcoe_check_wait_queue(lp, skb); | 1509 | fcoe_check_wait_queue(lport, skb); |
| 1301 | 1510 | ||
| 1302 | return 0; | 1511 | return 0; |
| 1303 | } | 1512 | } |
| 1304 | 1513 | ||
| 1305 | /** | 1514 | /** |
| 1306 | * fcoe_percpu_flush_done() - Indicate percpu queue flush completion. | 1515 | * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion |
| 1307 | * @skb: the skb being completed. | 1516 | * @skb: The completed skb (argument required by destructor) |
| 1308 | */ | 1517 | */ |
| 1309 | static void fcoe_percpu_flush_done(struct sk_buff *skb) | 1518 | static void fcoe_percpu_flush_done(struct sk_buff *skb) |
| 1310 | { | 1519 | { |
| @@ -1312,26 +1521,134 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) | |||
| 1312 | } | 1521 | } |
| 1313 | 1522 | ||
| 1314 | /** | 1523 | /** |
| 1315 | * fcoe_percpu_receive_thread() - recv thread per cpu | 1524 | * fcoe_recv_frame() - process a single received frame |
| 1316 | * @arg: ptr to the fcoe per cpu struct | 1525 | * @skb: frame to process |
| 1317 | * | ||
| 1318 | * Return: 0 for success | ||
| 1319 | */ | 1526 | */ |
| 1320 | int fcoe_percpu_receive_thread(void *arg) | 1527 | static void fcoe_recv_frame(struct sk_buff *skb) |
| 1321 | { | 1528 | { |
| 1322 | struct fcoe_percpu_s *p = arg; | ||
| 1323 | u32 fr_len; | 1529 | u32 fr_len; |
| 1324 | struct fc_lport *lp; | 1530 | struct fc_lport *lport; |
| 1325 | struct fcoe_rcv_info *fr; | 1531 | struct fcoe_rcv_info *fr; |
| 1326 | struct fcoe_dev_stats *stats; | 1532 | struct fcoe_dev_stats *stats; |
| 1327 | struct fc_frame_header *fh; | 1533 | struct fc_frame_header *fh; |
| 1328 | struct sk_buff *skb; | ||
| 1329 | struct fcoe_crc_eof crc_eof; | 1534 | struct fcoe_crc_eof crc_eof; |
| 1330 | struct fc_frame *fp; | 1535 | struct fc_frame *fp; |
| 1331 | u8 *mac = NULL; | 1536 | u8 *mac = NULL; |
| 1332 | struct fcoe_port *port; | 1537 | struct fcoe_port *port; |
| 1333 | struct fcoe_hdr *hp; | 1538 | struct fcoe_hdr *hp; |
| 1334 | 1539 | ||
| 1540 | fr = fcoe_dev_from_skb(skb); | ||
| 1541 | lport = fr->fr_dev; | ||
| 1542 | if (unlikely(!lport)) { | ||
| 1543 | if (skb->destructor != fcoe_percpu_flush_done) | ||
| 1544 | FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); | ||
| 1545 | kfree_skb(skb); | ||
| 1546 | return; | ||
| 1547 | } | ||
| 1548 | |||
| 1549 | FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " | ||
| 1550 | "head:%p data:%p tail:%p end:%p sum:%d dev:%s", | ||
| 1551 | skb->len, skb->data_len, | ||
| 1552 | skb->head, skb->data, skb_tail_pointer(skb), | ||
| 1553 | skb_end_pointer(skb), skb->csum, | ||
| 1554 | skb->dev ? skb->dev->name : "<NULL>"); | ||
| 1555 | |||
| 1556 | /* | ||
| 1557 | * Save source MAC address before discarding header. | ||
| 1558 | */ | ||
| 1559 | port = lport_priv(lport); | ||
| 1560 | if (skb_is_nonlinear(skb)) | ||
| 1561 | skb_linearize(skb); /* not ideal */ | ||
| 1562 | mac = eth_hdr(skb)->h_source; | ||
| 1563 | |||
| 1564 | /* | ||
| 1565 | * Frame length checks and setting up the header pointers | ||
| 1566 | * was done in fcoe_rcv already. | ||
| 1567 | */ | ||
| 1568 | hp = (struct fcoe_hdr *) skb_network_header(skb); | ||
| 1569 | fh = (struct fc_frame_header *) skb_transport_header(skb); | ||
| 1570 | |||
| 1571 | stats = fc_lport_get_stats(lport); | ||
| 1572 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | ||
| 1573 | if (stats->ErrorFrames < 5) | ||
| 1574 | printk(KERN_WARNING "fcoe: FCoE version " | ||
| 1575 | "mismatch: The frame has " | ||
| 1576 | "version %x, but the " | ||
| 1577 | "initiator supports version " | ||
| 1578 | "%x\n", FC_FCOE_DECAPS_VER(hp), | ||
| 1579 | FC_FCOE_VER); | ||
| 1580 | stats->ErrorFrames++; | ||
| 1581 | kfree_skb(skb); | ||
| 1582 | return; | ||
| 1583 | } | ||
| 1584 | |||
| 1585 | skb_pull(skb, sizeof(struct fcoe_hdr)); | ||
| 1586 | fr_len = skb->len - sizeof(struct fcoe_crc_eof); | ||
| 1587 | |||
| 1588 | stats->RxFrames++; | ||
| 1589 | stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; | ||
| 1590 | |||
| 1591 | fp = (struct fc_frame *)skb; | ||
| 1592 | fc_frame_init(fp); | ||
| 1593 | fr_dev(fp) = lport; | ||
| 1594 | fr_sof(fp) = hp->fcoe_sof; | ||
| 1595 | |||
| 1596 | /* Copy out the CRC and EOF trailer for access */ | ||
| 1597 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { | ||
| 1598 | kfree_skb(skb); | ||
| 1599 | return; | ||
| 1600 | } | ||
| 1601 | fr_eof(fp) = crc_eof.fcoe_eof; | ||
| 1602 | fr_crc(fp) = crc_eof.fcoe_crc32; | ||
| 1603 | if (pskb_trim(skb, fr_len)) { | ||
| 1604 | kfree_skb(skb); | ||
| 1605 | return; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | /* | ||
| 1609 | * We only check CRC if no offload is available and if it is | ||
| 1610 | * it's solicited data, in which case, the FCP layer would | ||
| 1611 | * check it during the copy. | ||
| 1612 | */ | ||
| 1613 | if (lport->crc_offload && | ||
| 1614 | skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
| 1615 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | ||
| 1616 | else | ||
| 1617 | fr_flags(fp) |= FCPHF_CRC_UNCHECKED; | ||
| 1618 | |||
| 1619 | fh = fc_frame_header_get(fp); | ||
| 1620 | if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && | ||
| 1621 | fh->fh_type == FC_TYPE_FCP) { | ||
| 1622 | fc_exch_recv(lport, fp); | ||
| 1623 | return; | ||
| 1624 | } | ||
| 1625 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | ||
| 1626 | if (le32_to_cpu(fr_crc(fp)) != | ||
| 1627 | ~crc32(~0, skb->data, fr_len)) { | ||
| 1628 | if (stats->InvalidCRCCount < 5) | ||
| 1629 | printk(KERN_WARNING "fcoe: dropping " | ||
| 1630 | "frame with CRC error\n"); | ||
| 1631 | stats->InvalidCRCCount++; | ||
| 1632 | stats->ErrorFrames++; | ||
| 1633 | fc_frame_free(fp); | ||
| 1634 | return; | ||
| 1635 | } | ||
| 1636 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | ||
| 1637 | } | ||
| 1638 | fc_exch_recv(lport, fp); | ||
| 1639 | } | ||
| 1640 | |||
| 1641 | /** | ||
| 1642 | * fcoe_percpu_receive_thread() - The per-CPU packet receive thread | ||
| 1643 | * @arg: The per-CPU context | ||
| 1644 | * | ||
| 1645 | * Return: 0 for success | ||
| 1646 | */ | ||
| 1647 | int fcoe_percpu_receive_thread(void *arg) | ||
| 1648 | { | ||
| 1649 | struct fcoe_percpu_s *p = arg; | ||
| 1650 | struct sk_buff *skb; | ||
| 1651 | |||
| 1335 | set_user_nice(current, -20); | 1652 | set_user_nice(current, -20); |
| 1336 | 1653 | ||
| 1337 | while (!kthread_should_stop()) { | 1654 | while (!kthread_should_stop()) { |
| @@ -1347,129 +1664,27 @@ int fcoe_percpu_receive_thread(void *arg) | |||
| 1347 | spin_lock_bh(&p->fcoe_rx_list.lock); | 1664 | spin_lock_bh(&p->fcoe_rx_list.lock); |
| 1348 | } | 1665 | } |
| 1349 | spin_unlock_bh(&p->fcoe_rx_list.lock); | 1666 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
| 1350 | fr = fcoe_dev_from_skb(skb); | 1667 | fcoe_recv_frame(skb); |
| 1351 | lp = fr->fr_dev; | ||
| 1352 | if (unlikely(lp == NULL)) { | ||
| 1353 | if (skb->destructor != fcoe_percpu_flush_done) | ||
| 1354 | FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); | ||
| 1355 | kfree_skb(skb); | ||
| 1356 | continue; | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " | ||
| 1360 | "head:%p data:%p tail:%p end:%p sum:%d dev:%s", | ||
| 1361 | skb->len, skb->data_len, | ||
| 1362 | skb->head, skb->data, skb_tail_pointer(skb), | ||
| 1363 | skb_end_pointer(skb), skb->csum, | ||
| 1364 | skb->dev ? skb->dev->name : "<NULL>"); | ||
| 1365 | |||
| 1366 | /* | ||
| 1367 | * Save source MAC address before discarding header. | ||
| 1368 | */ | ||
| 1369 | port = lport_priv(lp); | ||
| 1370 | if (skb_is_nonlinear(skb)) | ||
| 1371 | skb_linearize(skb); /* not ideal */ | ||
| 1372 | mac = eth_hdr(skb)->h_source; | ||
| 1373 | |||
| 1374 | /* | ||
| 1375 | * Frame length checks and setting up the header pointers | ||
| 1376 | * was done in fcoe_rcv already. | ||
| 1377 | */ | ||
| 1378 | hp = (struct fcoe_hdr *) skb_network_header(skb); | ||
| 1379 | fh = (struct fc_frame_header *) skb_transport_header(skb); | ||
| 1380 | |||
| 1381 | stats = fc_lport_get_stats(lp); | ||
| 1382 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | ||
| 1383 | if (stats->ErrorFrames < 5) | ||
| 1384 | printk(KERN_WARNING "fcoe: FCoE version " | ||
| 1385 | "mismatch: The frame has " | ||
| 1386 | "version %x, but the " | ||
| 1387 | "initiator supports version " | ||
| 1388 | "%x\n", FC_FCOE_DECAPS_VER(hp), | ||
| 1389 | FC_FCOE_VER); | ||
| 1390 | stats->ErrorFrames++; | ||
| 1391 | kfree_skb(skb); | ||
| 1392 | continue; | ||
| 1393 | } | ||
| 1394 | |||
| 1395 | skb_pull(skb, sizeof(struct fcoe_hdr)); | ||
| 1396 | fr_len = skb->len - sizeof(struct fcoe_crc_eof); | ||
| 1397 | |||
| 1398 | stats->RxFrames++; | ||
| 1399 | stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; | ||
| 1400 | |||
| 1401 | fp = (struct fc_frame *)skb; | ||
| 1402 | fc_frame_init(fp); | ||
| 1403 | fr_dev(fp) = lp; | ||
| 1404 | fr_sof(fp) = hp->fcoe_sof; | ||
| 1405 | |||
| 1406 | /* Copy out the CRC and EOF trailer for access */ | ||
| 1407 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { | ||
| 1408 | kfree_skb(skb); | ||
| 1409 | continue; | ||
| 1410 | } | ||
| 1411 | fr_eof(fp) = crc_eof.fcoe_eof; | ||
| 1412 | fr_crc(fp) = crc_eof.fcoe_crc32; | ||
| 1413 | if (pskb_trim(skb, fr_len)) { | ||
| 1414 | kfree_skb(skb); | ||
| 1415 | continue; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | /* | ||
| 1419 | * We only check CRC if no offload is available and if it is | ||
| 1420 | * it's solicited data, in which case, the FCP layer would | ||
| 1421 | * check it during the copy. | ||
| 1422 | */ | ||
| 1423 | if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
| 1424 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | ||
| 1425 | else | ||
| 1426 | fr_flags(fp) |= FCPHF_CRC_UNCHECKED; | ||
| 1427 | |||
| 1428 | fh = fc_frame_header_get(fp); | ||
| 1429 | if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && | ||
| 1430 | fh->fh_type == FC_TYPE_FCP) { | ||
| 1431 | fc_exch_recv(lp, fp); | ||
| 1432 | continue; | ||
| 1433 | } | ||
| 1434 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | ||
| 1435 | if (le32_to_cpu(fr_crc(fp)) != | ||
| 1436 | ~crc32(~0, skb->data, fr_len)) { | ||
| 1437 | if (stats->InvalidCRCCount < 5) | ||
| 1438 | printk(KERN_WARNING "fcoe: dropping " | ||
| 1439 | "frame with CRC error\n"); | ||
| 1440 | stats->InvalidCRCCount++; | ||
| 1441 | stats->ErrorFrames++; | ||
| 1442 | fc_frame_free(fp); | ||
| 1443 | continue; | ||
| 1444 | } | ||
| 1445 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | ||
| 1446 | } | ||
| 1447 | if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) && | ||
| 1448 | fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) { | ||
| 1449 | fc_frame_free(fp); | ||
| 1450 | continue; | ||
| 1451 | } | ||
| 1452 | fc_exch_recv(lp, fp); | ||
| 1453 | } | 1668 | } |
| 1454 | return 0; | 1669 | return 0; |
| 1455 | } | 1670 | } |
| 1456 | 1671 | ||
| 1457 | /** | 1672 | /** |
| 1458 | * fcoe_check_wait_queue() - attempt to clear the transmit backlog | 1673 | * fcoe_check_wait_queue() - Attempt to clear the transmit backlog |
| 1459 | * @lp: the fc_lport | 1674 | * @lport: The local port whose backlog is to be cleared |
| 1460 | * | 1675 | * |
| 1461 | * This empties the wait_queue, dequeue the head of the wait_queue queue | 1676 | * This empties the wait_queue, dequeues the head of the wait_queue queue |
| 1462 | * and calls fcoe_start_io() for each packet, if all skb have been | 1677 | * and calls fcoe_start_io() for each packet. If all skb have been |
| 1463 | * transmitted, return qlen or -1 if a error occurs, then restore | 1678 | * transmitted it returns the qlen. If an error occurs it restores |
| 1464 | * wait_queue and try again later. | 1679 | * wait_queue (to try again later) and returns -1. |
| 1465 | * | 1680 | * |
| 1466 | * The wait_queue is used when the skb transmit fails. skb will go | 1681 | * The wait_queue is used when the skb transmit fails. The failed skb |
| 1467 | * in the wait_queue which will be emptied by the timer function or | 1682 | * will go in the wait_queue which will be emptied by the timer function or |
| 1468 | * by the next skb transmit. | 1683 | * by the next skb transmit. |
| 1469 | */ | 1684 | */ |
| 1470 | static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) | 1685 | static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) |
| 1471 | { | 1686 | { |
| 1472 | struct fcoe_port *port = lport_priv(lp); | 1687 | struct fcoe_port *port = lport_priv(lport); |
| 1473 | int rc; | 1688 | int rc; |
| 1474 | 1689 | ||
| 1475 | spin_lock_bh(&port->fcoe_pending_queue.lock); | 1690 | spin_lock_bh(&port->fcoe_pending_queue.lock); |
| @@ -1501,19 +1716,19 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) | |||
| 1501 | } | 1716 | } |
| 1502 | 1717 | ||
| 1503 | if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) | 1718 | if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) |
| 1504 | lp->qfull = 0; | 1719 | lport->qfull = 0; |
| 1505 | if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) | 1720 | if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) |
| 1506 | mod_timer(&port->timer, jiffies + 2); | 1721 | mod_timer(&port->timer, jiffies + 2); |
| 1507 | port->fcoe_pending_queue_active = 0; | 1722 | port->fcoe_pending_queue_active = 0; |
| 1508 | out: | 1723 | out: |
| 1509 | if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 1724 | if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
| 1510 | lp->qfull = 1; | 1725 | lport->qfull = 1; |
| 1511 | spin_unlock_bh(&port->fcoe_pending_queue.lock); | 1726 | spin_unlock_bh(&port->fcoe_pending_queue.lock); |
| 1512 | return; | 1727 | return; |
| 1513 | } | 1728 | } |
| 1514 | 1729 | ||
| 1515 | /** | 1730 | /** |
| 1516 | * fcoe_dev_setup() - setup link change notification interface | 1731 | * fcoe_dev_setup() - Setup the link change notification interface |
| 1517 | */ | 1732 | */ |
| 1518 | static void fcoe_dev_setup(void) | 1733 | static void fcoe_dev_setup(void) |
| 1519 | { | 1734 | { |
| @@ -1521,7 +1736,7 @@ static void fcoe_dev_setup(void) | |||
| 1521 | } | 1736 | } |
| 1522 | 1737 | ||
| 1523 | /** | 1738 | /** |
| 1524 | * fcoe_dev_cleanup() - cleanup link change notification interface | 1739 | * fcoe_dev_cleanup() - Cleanup the link change notification interface |
| 1525 | */ | 1740 | */ |
| 1526 | static void fcoe_dev_cleanup(void) | 1741 | static void fcoe_dev_cleanup(void) |
| 1527 | { | 1742 | { |
| @@ -1529,19 +1744,19 @@ static void fcoe_dev_cleanup(void) | |||
| 1529 | } | 1744 | } |
| 1530 | 1745 | ||
| 1531 | /** | 1746 | /** |
| 1532 | * fcoe_device_notification() - netdev event notification callback | 1747 | * fcoe_device_notification() - Handler for net device events |
| 1533 | * @notifier: context of the notification | 1748 | * @notifier: The context of the notification |
| 1534 | * @event: type of event | 1749 | * @event: The type of event |
| 1535 | * @ptr: fixed array for output parsed ifname | 1750 | * @ptr: The net device that the event was on |
| 1536 | * | 1751 | * |
| 1537 | * This function is called by the ethernet driver in case of link change event | 1752 | * This function is called by the Ethernet driver in case of link change event. |
| 1538 | * | 1753 | * |
| 1539 | * Returns: 0 for success | 1754 | * Returns: 0 for success |
| 1540 | */ | 1755 | */ |
| 1541 | static int fcoe_device_notification(struct notifier_block *notifier, | 1756 | static int fcoe_device_notification(struct notifier_block *notifier, |
| 1542 | ulong event, void *ptr) | 1757 | ulong event, void *ptr) |
| 1543 | { | 1758 | { |
| 1544 | struct fc_lport *lp = NULL; | 1759 | struct fc_lport *lport = NULL; |
| 1545 | struct net_device *netdev = ptr; | 1760 | struct net_device *netdev = ptr; |
| 1546 | struct fcoe_interface *fcoe; | 1761 | struct fcoe_interface *fcoe; |
| 1547 | struct fcoe_port *port; | 1762 | struct fcoe_port *port; |
| @@ -1552,11 +1767,11 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
| 1552 | 1767 | ||
| 1553 | list_for_each_entry(fcoe, &fcoe_hostlist, list) { | 1768 | list_for_each_entry(fcoe, &fcoe_hostlist, list) { |
| 1554 | if (fcoe->netdev == netdev) { | 1769 | if (fcoe->netdev == netdev) { |
| 1555 | lp = fcoe->ctlr.lp; | 1770 | lport = fcoe->ctlr.lp; |
| 1556 | break; | 1771 | break; |
| 1557 | } | 1772 | } |
| 1558 | } | 1773 | } |
| 1559 | if (lp == NULL) { | 1774 | if (!lport) { |
| 1560 | rc = NOTIFY_DONE; | 1775 | rc = NOTIFY_DONE; |
| 1561 | goto out; | 1776 | goto out; |
| 1562 | } | 1777 | } |
| @@ -1570,10 +1785,12 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
| 1570 | case NETDEV_CHANGE: | 1785 | case NETDEV_CHANGE: |
| 1571 | break; | 1786 | break; |
| 1572 | case NETDEV_CHANGEMTU: | 1787 | case NETDEV_CHANGEMTU: |
| 1788 | if (netdev->features & NETIF_F_FCOE_MTU) | ||
| 1789 | break; | ||
| 1573 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + | 1790 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + |
| 1574 | sizeof(struct fcoe_crc_eof)); | 1791 | sizeof(struct fcoe_crc_eof)); |
| 1575 | if (mfs >= FC_MIN_MAX_FRAME) | 1792 | if (mfs >= FC_MIN_MAX_FRAME) |
| 1576 | fc_set_mfs(lp, mfs); | 1793 | fc_set_mfs(lport, mfs); |
| 1577 | break; | 1794 | break; |
| 1578 | case NETDEV_REGISTER: | 1795 | case NETDEV_REGISTER: |
| 1579 | break; | 1796 | break; |
| @@ -1588,22 +1805,22 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
| 1588 | FCOE_NETDEV_DBG(netdev, "Unknown event %ld " | 1805 | FCOE_NETDEV_DBG(netdev, "Unknown event %ld " |
| 1589 | "from netdev netlink\n", event); | 1806 | "from netdev netlink\n", event); |
| 1590 | } | 1807 | } |
| 1591 | if (link_possible && !fcoe_link_ok(lp)) | 1808 | if (link_possible && !fcoe_link_ok(lport)) |
| 1592 | fcoe_ctlr_link_up(&fcoe->ctlr); | 1809 | fcoe_ctlr_link_up(&fcoe->ctlr); |
| 1593 | else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { | 1810 | else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { |
| 1594 | stats = fc_lport_get_stats(lp); | 1811 | stats = fc_lport_get_stats(lport); |
| 1595 | stats->LinkFailureCount++; | 1812 | stats->LinkFailureCount++; |
| 1596 | fcoe_clean_pending_queue(lp); | 1813 | fcoe_clean_pending_queue(lport); |
| 1597 | } | 1814 | } |
| 1598 | out: | 1815 | out: |
| 1599 | return rc; | 1816 | return rc; |
| 1600 | } | 1817 | } |
| 1601 | 1818 | ||
| 1602 | /** | 1819 | /** |
| 1603 | * fcoe_if_to_netdev() - parse a name buffer to get netdev | 1820 | * fcoe_if_to_netdev() - Parse a name buffer to get a net device |
| 1604 | * @buffer: incoming buffer to be copied | 1821 | * @buffer: The name of the net device |
| 1605 | * | 1822 | * |
| 1606 | * Returns: NULL or ptr to net_device | 1823 | * Returns: NULL or a ptr to net_device |
| 1607 | */ | 1824 | */ |
| 1608 | static struct net_device *fcoe_if_to_netdev(const char *buffer) | 1825 | static struct net_device *fcoe_if_to_netdev(const char *buffer) |
| 1609 | { | 1826 | { |
| @@ -1621,9 +1838,11 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) | |||
| 1621 | } | 1838 | } |
| 1622 | 1839 | ||
| 1623 | /** | 1840 | /** |
| 1624 | * fcoe_destroy() - handles the destroy from sysfs | 1841 | * fcoe_destroy() - Destroy a FCoE interface |
| 1625 | * @buffer: expected to be an eth if name | 1842 | * @buffer: The name of the Ethernet interface to be destroyed |
| 1626 | * @kp: associated kernel param | 1843 | * @kp: The associated kernel parameter |
| 1844 | * | ||
| 1845 | * Called from sysfs. | ||
| 1627 | * | 1846 | * |
| 1628 | * Returns: 0 for success | 1847 | * Returns: 0 for success |
| 1629 | */ | 1848 | */ |
| @@ -1631,7 +1850,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | |||
| 1631 | { | 1850 | { |
| 1632 | struct fcoe_interface *fcoe; | 1851 | struct fcoe_interface *fcoe; |
| 1633 | struct net_device *netdev; | 1852 | struct net_device *netdev; |
| 1634 | int rc; | 1853 | int rc = 0; |
| 1635 | 1854 | ||
| 1636 | mutex_lock(&fcoe_config_mutex); | 1855 | mutex_lock(&fcoe_config_mutex); |
| 1637 | #ifdef CONFIG_FCOE_MODULE | 1856 | #ifdef CONFIG_FCOE_MODULE |
| @@ -1670,6 +1889,10 @@ out_nodev: | |||
| 1670 | return rc; | 1889 | return rc; |
| 1671 | } | 1890 | } |
| 1672 | 1891 | ||
| 1892 | /** | ||
| 1893 | * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context | ||
| 1894 | * @work: Handle to the FCoE port to be destroyed | ||
| 1895 | */ | ||
| 1673 | static void fcoe_destroy_work(struct work_struct *work) | 1896 | static void fcoe_destroy_work(struct work_struct *work) |
| 1674 | { | 1897 | { |
| 1675 | struct fcoe_port *port; | 1898 | struct fcoe_port *port; |
| @@ -1681,9 +1904,11 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
| 1681 | } | 1904 | } |
| 1682 | 1905 | ||
| 1683 | /** | 1906 | /** |
| 1684 | * fcoe_create() - Handles the create call from sysfs | 1907 | * fcoe_create() - Create a fcoe interface |
| 1685 | * @buffer: expected to be an eth if name | 1908 | * @buffer: The name of the Ethernet interface to create on |
| 1686 | * @kp: associated kernel param | 1909 | * @kp: The associated kernel param |
| 1910 | * | ||
| 1911 | * Called from sysfs. | ||
| 1687 | * | 1912 | * |
| 1688 | * Returns: 0 for success | 1913 | * Returns: 0 for success |
| 1689 | */ | 1914 | */ |
| @@ -1726,7 +1951,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
| 1726 | goto out_putdev; | 1951 | goto out_putdev; |
| 1727 | } | 1952 | } |
| 1728 | 1953 | ||
| 1729 | lport = fcoe_if_create(fcoe, &netdev->dev); | 1954 | lport = fcoe_if_create(fcoe, &netdev->dev, 0); |
| 1730 | if (IS_ERR(lport)) { | 1955 | if (IS_ERR(lport)) { |
| 1731 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", | 1956 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", |
| 1732 | netdev->name); | 1957 | netdev->name); |
| @@ -1762,16 +1987,9 @@ out_nodev: | |||
| 1762 | return rc; | 1987 | return rc; |
| 1763 | } | 1988 | } |
| 1764 | 1989 | ||
| 1765 | module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); | ||
| 1766 | __MODULE_PARM_TYPE(create, "string"); | ||
| 1767 | MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); | ||
| 1768 | module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); | ||
| 1769 | __MODULE_PARM_TYPE(destroy, "string"); | ||
| 1770 | MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); | ||
| 1771 | |||
| 1772 | /** | 1990 | /** |
| 1773 | * fcoe_link_ok() - Check if link is ok for the fc_lport | 1991 | * fcoe_link_ok() - Check if the link is OK for a local port |
| 1774 | * @lp: ptr to the fc_lport | 1992 | * @lport: The local port to check link on |
| 1775 | * | 1993 | * |
| 1776 | * Any permanently-disqualifying conditions have been previously checked. | 1994 | * Any permanently-disqualifying conditions have been previously checked. |
| 1777 | * This also updates the speed setting, which may change with link for 100/1000. | 1995 | * This also updates the speed setting, which may change with link for 100/1000. |
| @@ -1783,26 +2001,26 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); | |||
| 1783 | * Returns: 0 if link is OK for use by FCoE. | 2001 | * Returns: 0 if link is OK for use by FCoE. |
| 1784 | * | 2002 | * |
| 1785 | */ | 2003 | */ |
| 1786 | int fcoe_link_ok(struct fc_lport *lp) | 2004 | int fcoe_link_ok(struct fc_lport *lport) |
| 1787 | { | 2005 | { |
| 1788 | struct fcoe_port *port = lport_priv(lp); | 2006 | struct fcoe_port *port = lport_priv(lport); |
| 1789 | struct net_device *dev = port->fcoe->netdev; | 2007 | struct net_device *netdev = port->fcoe->netdev; |
| 1790 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; | 2008 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; |
| 1791 | 2009 | ||
| 1792 | if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && | 2010 | if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) && |
| 1793 | (!dev_ethtool_get_settings(dev, &ecmd))) { | 2011 | (!dev_ethtool_get_settings(netdev, &ecmd))) { |
| 1794 | lp->link_supported_speeds &= | 2012 | lport->link_supported_speeds &= |
| 1795 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | 2013 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); |
| 1796 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | 2014 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | |
| 1797 | SUPPORTED_1000baseT_Full)) | 2015 | SUPPORTED_1000baseT_Full)) |
| 1798 | lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; | 2016 | lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; |
| 1799 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | 2017 | if (ecmd.supported & SUPPORTED_10000baseT_Full) |
| 1800 | lp->link_supported_speeds |= | 2018 | lport->link_supported_speeds |= |
| 1801 | FC_PORTSPEED_10GBIT; | 2019 | FC_PORTSPEED_10GBIT; |
| 1802 | if (ecmd.speed == SPEED_1000) | 2020 | if (ecmd.speed == SPEED_1000) |
| 1803 | lp->link_speed = FC_PORTSPEED_1GBIT; | 2021 | lport->link_speed = FC_PORTSPEED_1GBIT; |
| 1804 | if (ecmd.speed == SPEED_10000) | 2022 | if (ecmd.speed == SPEED_10000) |
| 1805 | lp->link_speed = FC_PORTSPEED_10GBIT; | 2023 | lport->link_speed = FC_PORTSPEED_10GBIT; |
| 1806 | 2024 | ||
| 1807 | return 0; | 2025 | return 0; |
| 1808 | } | 2026 | } |
| @@ -1810,8 +2028,8 @@ int fcoe_link_ok(struct fc_lport *lp) | |||
| 1810 | } | 2028 | } |
| 1811 | 2029 | ||
| 1812 | /** | 2030 | /** |
| 1813 | * fcoe_percpu_clean() - Clear the pending skbs for an lport | 2031 | * fcoe_percpu_clean() - Clear all pending skbs for an local port |
| 1814 | * @lp: the fc_lport | 2032 | * @lport: The local port whose skbs are to be cleared |
| 1815 | * | 2033 | * |
| 1816 | * Must be called with fcoe_create_mutex held to single-thread completion. | 2034 | * Must be called with fcoe_create_mutex held to single-thread completion. |
| 1817 | * | 2035 | * |
| @@ -1820,7 +2038,7 @@ int fcoe_link_ok(struct fc_lport *lp) | |||
| 1820 | * there no packets that will be handled by the lport, but also that any | 2038 | * there no packets that will be handled by the lport, but also that any |
| 1821 | * threads already handling packet have returned. | 2039 | * threads already handling packet have returned. |
| 1822 | */ | 2040 | */ |
| 1823 | void fcoe_percpu_clean(struct fc_lport *lp) | 2041 | void fcoe_percpu_clean(struct fc_lport *lport) |
| 1824 | { | 2042 | { |
| 1825 | struct fcoe_percpu_s *pp; | 2043 | struct fcoe_percpu_s *pp; |
| 1826 | struct fcoe_rcv_info *fr; | 2044 | struct fcoe_rcv_info *fr; |
| @@ -1838,7 +2056,7 @@ void fcoe_percpu_clean(struct fc_lport *lp) | |||
| 1838 | skb = next) { | 2056 | skb = next) { |
| 1839 | next = skb->next; | 2057 | next = skb->next; |
| 1840 | fr = fcoe_dev_from_skb(skb); | 2058 | fr = fcoe_dev_from_skb(skb); |
| 1841 | if (fr->fr_dev == lp) { | 2059 | if (fr->fr_dev == lport) { |
| 1842 | __skb_unlink(skb, list); | 2060 | __skb_unlink(skb, list); |
| 1843 | kfree_skb(skb); | 2061 | kfree_skb(skb); |
| 1844 | } | 2062 | } |
| @@ -1867,13 +2085,11 @@ void fcoe_percpu_clean(struct fc_lport *lp) | |||
| 1867 | 2085 | ||
| 1868 | /** | 2086 | /** |
| 1869 | * fcoe_clean_pending_queue() - Dequeue a skb and free it | 2087 | * fcoe_clean_pending_queue() - Dequeue a skb and free it |
| 1870 | * @lp: the corresponding fc_lport | 2088 | * @lport: The local port to dequeue a skb on |
| 1871 | * | ||
| 1872 | * Returns: none | ||
| 1873 | */ | 2089 | */ |
| 1874 | void fcoe_clean_pending_queue(struct fc_lport *lp) | 2090 | void fcoe_clean_pending_queue(struct fc_lport *lport) |
| 1875 | { | 2091 | { |
| 1876 | struct fcoe_port *port = lport_priv(lp); | 2092 | struct fcoe_port *port = lport_priv(lport); |
| 1877 | struct sk_buff *skb; | 2093 | struct sk_buff *skb; |
| 1878 | 2094 | ||
| 1879 | spin_lock_bh(&port->fcoe_pending_queue.lock); | 2095 | spin_lock_bh(&port->fcoe_pending_queue.lock); |
| @@ -1886,10 +2102,10 @@ void fcoe_clean_pending_queue(struct fc_lport *lp) | |||
| 1886 | } | 2102 | } |
| 1887 | 2103 | ||
| 1888 | /** | 2104 | /** |
| 1889 | * fcoe_reset() - Resets the fcoe | 2105 | * fcoe_reset() - Reset a local port |
| 1890 | * @shost: shost the reset is from | 2106 | * @shost: The SCSI host associated with the local port to be reset |
| 1891 | * | 2107 | * |
| 1892 | * Returns: always 0 | 2108 | * Returns: Always 0 (return value required by FC transport template) |
| 1893 | */ | 2109 | */ |
| 1894 | int fcoe_reset(struct Scsi_Host *shost) | 2110 | int fcoe_reset(struct Scsi_Host *shost) |
| 1895 | { | 2111 | { |
| @@ -1899,30 +2115,33 @@ int fcoe_reset(struct Scsi_Host *shost) | |||
| 1899 | } | 2115 | } |
| 1900 | 2116 | ||
| 1901 | /** | 2117 | /** |
| 1902 | * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device | 2118 | * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device |
| 1903 | * @dev: this is currently ptr to net_device | 2119 | * @netdev: The net device used as a key |
| 2120 | * | ||
| 2121 | * Locking: Must be called with the RNL mutex held. | ||
| 1904 | * | 2122 | * |
| 1905 | * Returns: NULL or the located fcoe_port | 2123 | * Returns: NULL or the FCoE interface |
| 1906 | * Locking: must be called with the RNL mutex held | ||
| 1907 | */ | 2124 | */ |
| 1908 | static struct fcoe_interface * | 2125 | static struct fcoe_interface * |
| 1909 | fcoe_hostlist_lookup_port(const struct net_device *dev) | 2126 | fcoe_hostlist_lookup_port(const struct net_device *netdev) |
| 1910 | { | 2127 | { |
| 1911 | struct fcoe_interface *fcoe; | 2128 | struct fcoe_interface *fcoe; |
| 1912 | 2129 | ||
| 1913 | list_for_each_entry(fcoe, &fcoe_hostlist, list) { | 2130 | list_for_each_entry(fcoe, &fcoe_hostlist, list) { |
| 1914 | if (fcoe->netdev == dev) | 2131 | if (fcoe->netdev == netdev) |
| 1915 | return fcoe; | 2132 | return fcoe; |
| 1916 | } | 2133 | } |
| 1917 | return NULL; | 2134 | return NULL; |
| 1918 | } | 2135 | } |
| 1919 | 2136 | ||
| 1920 | /** | 2137 | /** |
| 1921 | * fcoe_hostlist_lookup() - Find the corresponding lport by netdev | 2138 | * fcoe_hostlist_lookup() - Find the local port associated with a |
| 1922 | * @netdev: ptr to net_device | 2139 | * given net device |
| 2140 | * @netdev: The netdevice used as a key | ||
| 1923 | * | 2141 | * |
| 1924 | * Returns: 0 for success | 2142 | * Locking: Must be called with the RTNL mutex held |
| 1925 | * Locking: must be called with the RTNL mutex held | 2143 | * |
| 2144 | * Returns: NULL or the local port | ||
| 1926 | */ | 2145 | */ |
| 1927 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) | 2146 | static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) |
| 1928 | { | 2147 | { |
| @@ -1933,11 +2152,13 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) | |||
| 1933 | } | 2152 | } |
| 1934 | 2153 | ||
| 1935 | /** | 2154 | /** |
| 1936 | * fcoe_hostlist_add() - Add a lport to lports list | 2155 | * fcoe_hostlist_add() - Add the FCoE interface identified by a local |
| 1937 | * @lp: ptr to the fc_lport to be added | 2156 | * port to the hostlist |
| 2157 | * @lport: The local port that identifies the FCoE interface to be added | ||
| 1938 | * | 2158 | * |
| 1939 | * Returns: 0 for success | ||
| 1940 | * Locking: must be called with the RTNL mutex held | 2159 | * Locking: must be called with the RTNL mutex held |
| 2160 | * | ||
| 2161 | * Returns: 0 for success | ||
| 1941 | */ | 2162 | */ |
| 1942 | static int fcoe_hostlist_add(const struct fc_lport *lport) | 2163 | static int fcoe_hostlist_add(const struct fc_lport *lport) |
| 1943 | { | 2164 | { |
| @@ -1954,15 +2175,15 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) | |||
| 1954 | } | 2175 | } |
| 1955 | 2176 | ||
| 1956 | /** | 2177 | /** |
| 1957 | * fcoe_init() - fcoe module loading initialization | 2178 | * fcoe_init() - Initialize fcoe.ko |
| 1958 | * | 2179 | * |
| 1959 | * Returns 0 on success, negative on failure | 2180 | * Returns: 0 on success, or a negative value on failure |
| 1960 | */ | 2181 | */ |
| 1961 | static int __init fcoe_init(void) | 2182 | static int __init fcoe_init(void) |
| 1962 | { | 2183 | { |
| 2184 | struct fcoe_percpu_s *p; | ||
| 1963 | unsigned int cpu; | 2185 | unsigned int cpu; |
| 1964 | int rc = 0; | 2186 | int rc = 0; |
| 1965 | struct fcoe_percpu_s *p; | ||
| 1966 | 2187 | ||
| 1967 | mutex_lock(&fcoe_config_mutex); | 2188 | mutex_lock(&fcoe_config_mutex); |
| 1968 | 2189 | ||
| @@ -1999,15 +2220,15 @@ out_free: | |||
| 1999 | module_init(fcoe_init); | 2220 | module_init(fcoe_init); |
| 2000 | 2221 | ||
| 2001 | /** | 2222 | /** |
| 2002 | * fcoe_exit() - fcoe module unloading cleanup | 2223 | * fcoe_exit() - Clean up fcoe.ko |
| 2003 | * | 2224 | * |
| 2004 | * Returns 0 on success, negative on failure | 2225 | * Returns: 0 on success or a negative value on failure |
| 2005 | */ | 2226 | */ |
| 2006 | static void __exit fcoe_exit(void) | 2227 | static void __exit fcoe_exit(void) |
| 2007 | { | 2228 | { |
| 2008 | unsigned int cpu; | ||
| 2009 | struct fcoe_interface *fcoe, *tmp; | 2229 | struct fcoe_interface *fcoe, *tmp; |
| 2010 | struct fcoe_port *port; | 2230 | struct fcoe_port *port; |
| 2231 | unsigned int cpu; | ||
| 2011 | 2232 | ||
| 2012 | mutex_lock(&fcoe_config_mutex); | 2233 | mutex_lock(&fcoe_config_mutex); |
| 2013 | 2234 | ||
| @@ -2033,9 +2254,238 @@ static void __exit fcoe_exit(void) | |||
| 2033 | /* flush any asyncronous interface destroys, | 2254 | /* flush any asyncronous interface destroys, |
| 2034 | * this should happen after the netdev notifier is unregistered */ | 2255 | * this should happen after the netdev notifier is unregistered */ |
| 2035 | flush_scheduled_work(); | 2256 | flush_scheduled_work(); |
| 2257 | /* That will flush out all the N_Ports on the hostlist, but now we | ||
| 2258 | * may have NPIV VN_Ports scheduled for destruction */ | ||
| 2259 | flush_scheduled_work(); | ||
| 2036 | 2260 | ||
| 2037 | /* detach from scsi transport | 2261 | /* detach from scsi transport |
| 2038 | * must happen after all destroys are done, therefor after the flush */ | 2262 | * must happen after all destroys are done, therefor after the flush */ |
| 2039 | fcoe_if_exit(); | 2263 | fcoe_if_exit(); |
| 2040 | } | 2264 | } |
| 2041 | module_exit(fcoe_exit); | 2265 | module_exit(fcoe_exit); |
| 2266 | |||
| 2267 | /** | ||
| 2268 | * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler | ||
| 2269 | * @seq: active sequence in the FLOGI or FDISC exchange | ||
| 2270 | * @fp: response frame, or error encoded in a pointer (timeout) | ||
| 2271 | * @arg: pointer the the fcoe_ctlr structure | ||
| 2272 | * | ||
| 2273 | * This handles MAC address managment for FCoE, then passes control on to | ||
| 2274 | * the libfc FLOGI response handler. | ||
| 2275 | */ | ||
| 2276 | static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | ||
| 2277 | { | ||
| 2278 | struct fcoe_ctlr *fip = arg; | ||
| 2279 | struct fc_exch *exch = fc_seq_exch(seq); | ||
| 2280 | struct fc_lport *lport = exch->lp; | ||
| 2281 | u8 *mac; | ||
| 2282 | |||
| 2283 | if (IS_ERR(fp)) | ||
| 2284 | goto done; | ||
| 2285 | |||
| 2286 | mac = fr_cb(fp)->granted_mac; | ||
| 2287 | if (is_zero_ether_addr(mac)) { | ||
| 2288 | /* pre-FIP */ | ||
| 2289 | if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { | ||
| 2290 | fc_frame_free(fp); | ||
| 2291 | return; | ||
| 2292 | } | ||
| 2293 | } | ||
| 2294 | fcoe_update_src_mac(lport, mac); | ||
| 2295 | done: | ||
| 2296 | fc_lport_flogi_resp(seq, fp, lport); | ||
| 2297 | } | ||
| 2298 | |||
| 2299 | /** | ||
| 2300 | * fcoe_logo_resp() - FCoE specific LOGO response handler | ||
| 2301 | * @seq: active sequence in the LOGO exchange | ||
| 2302 | * @fp: response frame, or error encoded in a pointer (timeout) | ||
| 2303 | * @arg: pointer the the fcoe_ctlr structure | ||
| 2304 | * | ||
| 2305 | * This handles MAC address managment for FCoE, then passes control on to | ||
| 2306 | * the libfc LOGO response handler. | ||
| 2307 | */ | ||
| 2308 | static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | ||
| 2309 | { | ||
| 2310 | struct fc_lport *lport = arg; | ||
| 2311 | static u8 zero_mac[ETH_ALEN] = { 0 }; | ||
| 2312 | |||
| 2313 | if (!IS_ERR(fp)) | ||
| 2314 | fcoe_update_src_mac(lport, zero_mac); | ||
| 2315 | fc_lport_logo_resp(seq, fp, lport); | ||
| 2316 | } | ||
| 2317 | |||
| 2318 | /** | ||
| 2319 | * fcoe_elsct_send - FCoE specific ELS handler | ||
| 2320 | * | ||
| 2321 | * This does special case handling of FIP encapsualted ELS exchanges for FCoE, | ||
| 2322 | * using FCoE specific response handlers and passing the FIP controller as | ||
| 2323 | * the argument (the lport is still available from the exchange). | ||
| 2324 | * | ||
| 2325 | * Most of the work here is just handed off to the libfc routine. | ||
| 2326 | */ | ||
| 2327 | static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, | ||
| 2328 | struct fc_frame *fp, unsigned int op, | ||
| 2329 | void (*resp)(struct fc_seq *, | ||
| 2330 | struct fc_frame *, | ||
| 2331 | void *), | ||
| 2332 | void *arg, u32 timeout) | ||
| 2333 | { | ||
| 2334 | struct fcoe_port *port = lport_priv(lport); | ||
| 2335 | struct fcoe_interface *fcoe = port->fcoe; | ||
| 2336 | struct fcoe_ctlr *fip = &fcoe->ctlr; | ||
| 2337 | struct fc_frame_header *fh = fc_frame_header_get(fp); | ||
| 2338 | |||
| 2339 | switch (op) { | ||
| 2340 | case ELS_FLOGI: | ||
| 2341 | case ELS_FDISC: | ||
| 2342 | return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, | ||
| 2343 | fip, timeout); | ||
| 2344 | case ELS_LOGO: | ||
| 2345 | /* only hook onto fabric logouts, not port logouts */ | ||
| 2346 | if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) | ||
| 2347 | break; | ||
| 2348 | return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, | ||
| 2349 | lport, timeout); | ||
| 2350 | } | ||
| 2351 | return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); | ||
| 2352 | } | ||
| 2353 | |||
| 2354 | /** | ||
| 2355 | * fcoe_vport_create() - create an fc_host/scsi_host for a vport | ||
| 2356 | * @vport: fc_vport object to create a new fc_host for | ||
| 2357 | * @disabled: start the new fc_host in a disabled state by default? | ||
| 2358 | * | ||
| 2359 | * Returns: 0 for success | ||
| 2360 | */ | ||
| 2361 | static int fcoe_vport_create(struct fc_vport *vport, bool disabled) | ||
| 2362 | { | ||
| 2363 | struct Scsi_Host *shost = vport_to_shost(vport); | ||
| 2364 | struct fc_lport *n_port = shost_priv(shost); | ||
| 2365 | struct fcoe_port *port = lport_priv(n_port); | ||
| 2366 | struct fcoe_interface *fcoe = port->fcoe; | ||
| 2367 | struct net_device *netdev = fcoe->netdev; | ||
| 2368 | struct fc_lport *vn_port; | ||
| 2369 | |||
| 2370 | mutex_lock(&fcoe_config_mutex); | ||
| 2371 | vn_port = fcoe_if_create(fcoe, &vport->dev, 1); | ||
| 2372 | mutex_unlock(&fcoe_config_mutex); | ||
| 2373 | |||
| 2374 | if (IS_ERR(vn_port)) { | ||
| 2375 | printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", | ||
| 2376 | netdev->name); | ||
| 2377 | return -EIO; | ||
| 2378 | } | ||
| 2379 | |||
| 2380 | if (disabled) { | ||
| 2381 | fc_vport_set_state(vport, FC_VPORT_DISABLED); | ||
| 2382 | } else { | ||
| 2383 | vn_port->boot_time = jiffies; | ||
| 2384 | fc_fabric_login(vn_port); | ||
| 2385 | fc_vport_setlink(vn_port); | ||
| 2386 | } | ||
| 2387 | return 0; | ||
| 2388 | } | ||
| 2389 | |||
| 2390 | /** | ||
| 2391 | * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport | ||
| 2392 | * @vport: fc_vport object that is being destroyed | ||
| 2393 | * | ||
| 2394 | * Returns: 0 for success | ||
| 2395 | */ | ||
| 2396 | static int fcoe_vport_destroy(struct fc_vport *vport) | ||
| 2397 | { | ||
| 2398 | struct Scsi_Host *shost = vport_to_shost(vport); | ||
| 2399 | struct fc_lport *n_port = shost_priv(shost); | ||
| 2400 | struct fc_lport *vn_port = vport->dd_data; | ||
| 2401 | struct fcoe_port *port = lport_priv(vn_port); | ||
| 2402 | |||
| 2403 | mutex_lock(&n_port->lp_mutex); | ||
| 2404 | list_del(&vn_port->list); | ||
| 2405 | mutex_unlock(&n_port->lp_mutex); | ||
| 2406 | schedule_work(&port->destroy_work); | ||
| 2407 | return 0; | ||
| 2408 | } | ||
| 2409 | |||
| 2410 | /** | ||
| 2411 | * fcoe_vport_disable() - change vport state | ||
| 2412 | * @vport: vport to bring online/offline | ||
| 2413 | * @disable: should the vport be disabled? | ||
| 2414 | */ | ||
| 2415 | static int fcoe_vport_disable(struct fc_vport *vport, bool disable) | ||
| 2416 | { | ||
| 2417 | struct fc_lport *lport = vport->dd_data; | ||
| 2418 | |||
| 2419 | if (disable) { | ||
| 2420 | fc_vport_set_state(vport, FC_VPORT_DISABLED); | ||
| 2421 | fc_fabric_logoff(lport); | ||
| 2422 | } else { | ||
| 2423 | lport->boot_time = jiffies; | ||
| 2424 | fc_fabric_login(lport); | ||
| 2425 | fc_vport_setlink(lport); | ||
| 2426 | } | ||
| 2427 | |||
| 2428 | return 0; | ||
| 2429 | } | ||
| 2430 | |||
| 2431 | /** | ||
| 2432 | * fcoe_vport_set_symbolic_name() - append vport string to symbolic name | ||
| 2433 | * @vport: fc_vport with a new symbolic name string | ||
| 2434 | * | ||
| 2435 | * After generating a new symbolic name string, a new RSPN_ID request is | ||
| 2436 | * sent to the name server. There is no response handler, so if it fails | ||
| 2437 | * for some reason it will not be retried. | ||
| 2438 | */ | ||
| 2439 | static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) | ||
| 2440 | { | ||
| 2441 | struct fc_lport *lport = vport->dd_data; | ||
| 2442 | struct fc_frame *fp; | ||
| 2443 | size_t len; | ||
| 2444 | |||
| 2445 | snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, | ||
| 2446 | "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, | ||
| 2447 | fcoe_netdev(lport)->name, vport->symbolic_name); | ||
| 2448 | |||
| 2449 | if (lport->state != LPORT_ST_READY) | ||
| 2450 | return; | ||
| 2451 | |||
| 2452 | len = strnlen(fc_host_symbolic_name(lport->host), 255); | ||
| 2453 | fp = fc_frame_alloc(lport, | ||
| 2454 | sizeof(struct fc_ct_hdr) + | ||
| 2455 | sizeof(struct fc_ns_rspn) + len); | ||
| 2456 | if (!fp) | ||
| 2457 | return; | ||
| 2458 | lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, | ||
| 2459 | NULL, NULL, 3 * lport->r_a_tov); | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | /** | ||
| 2463 | * fcoe_get_lesb() - Fill the FCoE Link Error Status Block | ||
| 2464 | * @lport: the local port | ||
| 2465 | * @fc_lesb: the link error status block | ||
| 2466 | */ | ||
| 2467 | static void fcoe_get_lesb(struct fc_lport *lport, | ||
| 2468 | struct fc_els_lesb *fc_lesb) | ||
| 2469 | { | ||
| 2470 | unsigned int cpu; | ||
| 2471 | u32 lfc, vlfc, mdac; | ||
| 2472 | struct fcoe_dev_stats *devst; | ||
| 2473 | struct fcoe_fc_els_lesb *lesb; | ||
| 2474 | struct net_device *netdev = fcoe_netdev(lport); | ||
| 2475 | |||
| 2476 | lfc = 0; | ||
| 2477 | vlfc = 0; | ||
| 2478 | mdac = 0; | ||
| 2479 | lesb = (struct fcoe_fc_els_lesb *)fc_lesb; | ||
| 2480 | memset(lesb, 0, sizeof(*lesb)); | ||
| 2481 | for_each_possible_cpu(cpu) { | ||
| 2482 | devst = per_cpu_ptr(lport->dev_stats, cpu); | ||
| 2483 | lfc += devst->LinkFailureCount; | ||
| 2484 | vlfc += devst->VLinkFailureCount; | ||
| 2485 | mdac += devst->MissDiscAdvCount; | ||
| 2486 | } | ||
| 2487 | lesb->lesb_link_fail = htonl(lfc); | ||
| 2488 | lesb->lesb_vlink_fail = htonl(vlfc); | ||
| 2489 | lesb->lesb_miss_fka = htonl(mdac); | ||
| 2490 | lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors); | ||
| 2491 | } | ||
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index ce7f60fb1bc0..c69b2c56c2d1 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define FCOE_NAME "fcoe" | 32 | #define FCOE_NAME "fcoe" |
| 33 | #define FCOE_VENDOR "Open-FCoE.org" | 33 | #define FCOE_VENDOR "Open-FCoE.org" |
| 34 | 34 | ||
| 35 | #define FCOE_MAX_LUN 255 | 35 | #define FCOE_MAX_LUN 0xFFFF |
| 36 | #define FCOE_MAX_FCP_TARGET 256 | 36 | #define FCOE_MAX_FCP_TARGET 256 |
| 37 | 37 | ||
| 38 | #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 | 38 | #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 |
| @@ -40,11 +40,17 @@ | |||
| 40 | #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ | 40 | #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ |
| 41 | #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ | 41 | #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ |
| 42 | 42 | ||
| 43 | /* | ||
| 44 | * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload) | ||
| 45 | * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes | ||
| 46 | */ | ||
| 47 | #define FCOE_MTU 2158 | ||
| 48 | |||
| 43 | unsigned int fcoe_debug_logging; | 49 | unsigned int fcoe_debug_logging; |
| 44 | module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); | 50 | module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); |
| 45 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | 51 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); |
| 46 | 52 | ||
| 47 | #define FCOE_LOGGING 0x01 /* General logging, not categorized */ | 53 | #define FCOE_LOGGING 0x01 /* General logging, not categorized */ |
| 48 | #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ | 54 | #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ |
| 49 | 55 | ||
| 50 | #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ | 56 | #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ |
| @@ -64,8 +70,13 @@ do { \ | |||
| 64 | printk(KERN_INFO "fcoe: %s: " fmt, \ | 70 | printk(KERN_INFO "fcoe: %s: " fmt, \ |
| 65 | netdev->name, ##args);) | 71 | netdev->name, ##args);) |
| 66 | 72 | ||
| 67 | /* | 73 | /** |
| 68 | * this percpu struct for fcoe | 74 | * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads |
| 75 | * @thread: The thread context | ||
| 76 | * @fcoe_rx_list: The queue of pending packets to process | ||
| 77 | * @page: The memory page for calculating frame trailer CRCs | ||
| 78 | * @crc_eof_offset: The offset into the CRC page pointing to available | ||
| 79 | * memory for a new trailer | ||
| 69 | */ | 80 | */ |
| 70 | struct fcoe_percpu_s { | 81 | struct fcoe_percpu_s { |
| 71 | struct task_struct *thread; | 82 | struct task_struct *thread; |
| @@ -74,37 +85,62 @@ struct fcoe_percpu_s { | |||
| 74 | int crc_eof_offset; | 85 | int crc_eof_offset; |
| 75 | }; | 86 | }; |
| 76 | 87 | ||
| 77 | /* | 88 | /** |
| 78 | * an FCoE interface, 1:1 with netdev | 89 | * struct fcoe_interface - A FCoE interface |
| 90 | * @list: Handle for a list of FCoE interfaces | ||
| 91 | * @netdev: The associated net device | ||
| 92 | * @fcoe_packet_type: FCoE packet type | ||
| 93 | * @fip_packet_type: FIP packet type | ||
| 94 | * @ctlr: The FCoE controller (for FIP) | ||
| 95 | * @oem: The offload exchange manager for all local port | ||
| 96 | * instances associated with this port | ||
| 97 | * @kref: The kernel reference | ||
| 98 | * | ||
| 99 | * This structure is 1:1 with a net devive. | ||
| 79 | */ | 100 | */ |
| 80 | struct fcoe_interface { | 101 | struct fcoe_interface { |
| 81 | struct list_head list; | 102 | struct list_head list; |
| 82 | struct net_device *netdev; | 103 | struct net_device *netdev; |
| 83 | struct packet_type fcoe_packet_type; | 104 | struct packet_type fcoe_packet_type; |
| 84 | struct packet_type fip_packet_type; | 105 | struct packet_type fip_packet_type; |
| 85 | struct fcoe_ctlr ctlr; | 106 | struct fcoe_ctlr ctlr; |
| 86 | struct fc_exch_mgr *oem; /* offload exchange manager */ | 107 | struct fc_exch_mgr *oem; |
| 87 | struct kref kref; | 108 | struct kref kref; |
| 88 | }; | 109 | }; |
| 89 | 110 | ||
| 90 | /* | 111 | /** |
| 91 | * the FCoE private structure that's allocated along with the | 112 | * struct fcoe_port - The FCoE private structure |
| 92 | * Scsi_Host and libfc fc_lport structures | 113 | * @fcoe: The associated fcoe interface |
| 114 | * @lport: The associated local port | ||
| 115 | * @fcoe_pending_queue: The pending Rx queue of skbs | ||
| 116 | * @fcoe_pending_queue_active: Indicates if the pending queue is active | ||
| 117 | * @timer: The queue timer | ||
| 118 | * @destroy_work: Handle for work context | ||
| 119 | * (to prevent RTNL deadlocks) | ||
| 120 | * @data_srt_addr: Source address for data | ||
| 121 | * | ||
| 122 | * An instance of this structure is to be allocated along with the | ||
| 123 | * Scsi_Host and libfc fc_lport structures. | ||
| 93 | */ | 124 | */ |
| 94 | struct fcoe_port { | 125 | struct fcoe_port { |
| 95 | struct fcoe_interface *fcoe; | 126 | struct fcoe_interface *fcoe; |
| 96 | struct fc_lport *lport; | 127 | struct fc_lport *lport; |
| 97 | struct sk_buff_head fcoe_pending_queue; | 128 | struct sk_buff_head fcoe_pending_queue; |
| 98 | u8 fcoe_pending_queue_active; | 129 | u8 fcoe_pending_queue_active; |
| 99 | struct timer_list timer; /* queue timer */ | 130 | struct timer_list timer; |
| 100 | struct work_struct destroy_work; /* to prevent rtnl deadlocks */ | 131 | struct work_struct destroy_work; |
| 132 | u8 data_src_addr[ETH_ALEN]; | ||
| 101 | }; | 133 | }; |
| 102 | 134 | ||
| 103 | #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) | 135 | #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) |
| 104 | 136 | ||
| 105 | static inline struct net_device *fcoe_netdev(const struct fc_lport *lp) | 137 | /** |
| 138 | * fcoe_netdev() - Return the net device associated with a local port | ||
| 139 | * @lport: The local port to get the net device from | ||
| 140 | */ | ||
| 141 | static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) | ||
| 106 | { | 142 | { |
| 107 | return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev; | 143 | return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev; |
| 108 | } | 144 | } |
| 109 | 145 | ||
| 110 | #endif /* _FCOE_H_ */ | 146 | #endif /* _FCOE_H_ */ |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 11ae5c94608b..9823291395ad 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
| @@ -59,26 +59,30 @@ unsigned int libfcoe_debug_logging; | |||
| 59 | module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); | 59 | module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); |
| 60 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | 60 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); |
| 61 | 61 | ||
| 62 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ | 62 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ |
| 63 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ | 63 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ |
| 64 | 64 | ||
| 65 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ | 65 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ |
| 66 | do { \ | 66 | do { \ |
| 67 | if (unlikely(libfcoe_debug_logging & LEVEL)) \ | 67 | if (unlikely(libfcoe_debug_logging & LEVEL)) \ |
| 68 | do { \ | 68 | do { \ |
| 69 | CMD; \ | 69 | CMD; \ |
| 70 | } while (0); \ | 70 | } while (0); \ |
| 71 | } while (0) | 71 | } while (0) |
| 72 | 72 | ||
| 73 | #define LIBFCOE_DBG(fmt, args...) \ | 73 | #define LIBFCOE_DBG(fmt, args...) \ |
| 74 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ | 74 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ |
| 75 | printk(KERN_INFO "libfcoe: " fmt, ##args);) | 75 | printk(KERN_INFO "libfcoe: " fmt, ##args);) |
| 76 | 76 | ||
| 77 | #define LIBFCOE_FIP_DBG(fmt, args...) \ | 77 | #define LIBFCOE_FIP_DBG(fip, fmt, args...) \ |
| 78 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ | 78 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ |
| 79 | printk(KERN_INFO "fip: " fmt, ##args);) | 79 | printk(KERN_INFO "host%d: fip: " fmt, \ |
| 80 | (fip)->lp->host->host_no, ##args);) | ||
| 80 | 81 | ||
| 81 | /* | 82 | /** |
| 83 | * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid | ||
| 84 | * @fcf: The FCF to check | ||
| 85 | * | ||
| 82 | * Return non-zero if FCF fcoe_size has been validated. | 86 | * Return non-zero if FCF fcoe_size has been validated. |
| 83 | */ | 87 | */ |
| 84 | static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) | 88 | static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) |
| @@ -86,7 +90,10 @@ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) | |||
| 86 | return (fcf->flags & FIP_FL_SOL) != 0; | 90 | return (fcf->flags & FIP_FL_SOL) != 0; |
| 87 | } | 91 | } |
| 88 | 92 | ||
| 89 | /* | 93 | /** |
| 94 | * fcoe_ctlr_fcf_usable() - Check if a FCF is usable | ||
| 95 | * @fcf: The FCF to check | ||
| 96 | * | ||
| 90 | * Return non-zero if the FCF is usable. | 97 | * Return non-zero if the FCF is usable. |
| 91 | */ | 98 | */ |
| 92 | static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) | 99 | static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) |
| @@ -97,12 +104,13 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) | |||
| 97 | } | 104 | } |
| 98 | 105 | ||
| 99 | /** | 106 | /** |
| 100 | * fcoe_ctlr_init() - Initialize the FCoE Controller instance. | 107 | * fcoe_ctlr_init() - Initialize the FCoE Controller instance |
| 101 | * @fip: FCoE controller. | 108 | * @fip: The FCoE controller to initialize |
| 102 | */ | 109 | */ |
| 103 | void fcoe_ctlr_init(struct fcoe_ctlr *fip) | 110 | void fcoe_ctlr_init(struct fcoe_ctlr *fip) |
| 104 | { | 111 | { |
| 105 | fip->state = FIP_ST_LINK_WAIT; | 112 | fip->state = FIP_ST_LINK_WAIT; |
| 113 | fip->mode = FIP_ST_AUTO; | ||
| 106 | INIT_LIST_HEAD(&fip->fcfs); | 114 | INIT_LIST_HEAD(&fip->fcfs); |
| 107 | spin_lock_init(&fip->lock); | 115 | spin_lock_init(&fip->lock); |
| 108 | fip->flogi_oxid = FC_XID_UNKNOWN; | 116 | fip->flogi_oxid = FC_XID_UNKNOWN; |
| @@ -114,8 +122,8 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip) | |||
| 114 | EXPORT_SYMBOL(fcoe_ctlr_init); | 122 | EXPORT_SYMBOL(fcoe_ctlr_init); |
| 115 | 123 | ||
| 116 | /** | 124 | /** |
| 117 | * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller. | 125 | * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller |
| 118 | * @fip: FCoE controller. | 126 | * @fip: The FCoE controller whose FCFs are to be reset |
| 119 | * | 127 | * |
| 120 | * Called with &fcoe_ctlr lock held. | 128 | * Called with &fcoe_ctlr lock held. |
| 121 | */ | 129 | */ |
| @@ -134,8 +142,8 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) | |||
| 134 | } | 142 | } |
| 135 | 143 | ||
| 136 | /** | 144 | /** |
| 137 | * fcoe_ctlr_destroy() - Disable and tear-down the FCoE controller. | 145 | * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller |
| 138 | * @fip: FCoE controller. | 146 | * @fip: The FCoE controller to tear down |
| 139 | * | 147 | * |
| 140 | * This is called by FCoE drivers before freeing the &fcoe_ctlr. | 148 | * This is called by FCoE drivers before freeing the &fcoe_ctlr. |
| 141 | * | 149 | * |
| @@ -148,9 +156,7 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) | |||
| 148 | void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) | 156 | void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) |
| 149 | { | 157 | { |
| 150 | cancel_work_sync(&fip->recv_work); | 158 | cancel_work_sync(&fip->recv_work); |
| 151 | spin_lock_bh(&fip->fip_recv_list.lock); | 159 | skb_queue_purge(&fip->fip_recv_list); |
| 152 | __skb_queue_purge(&fip->fip_recv_list); | ||
| 153 | spin_unlock_bh(&fip->fip_recv_list.lock); | ||
| 154 | 160 | ||
| 155 | spin_lock_bh(&fip->lock); | 161 | spin_lock_bh(&fip->lock); |
| 156 | fip->state = FIP_ST_DISABLED; | 162 | fip->state = FIP_ST_DISABLED; |
| @@ -162,8 +168,8 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) | |||
| 162 | EXPORT_SYMBOL(fcoe_ctlr_destroy); | 168 | EXPORT_SYMBOL(fcoe_ctlr_destroy); |
| 163 | 169 | ||
| 164 | /** | 170 | /** |
| 165 | * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port. | 171 | * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port |
| 166 | * @fip: FCoE controller. | 172 | * @fip: The FCoE controller to get the maximum FCoE size from |
| 167 | * | 173 | * |
| 168 | * Returns the maximum packet size including the FCoE header and trailer, | 174 | * Returns the maximum packet size including the FCoE header and trailer, |
| 169 | * but not including any Ethernet or VLAN headers. | 175 | * but not including any Ethernet or VLAN headers. |
| @@ -180,9 +186,9 @@ static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) | |||
| 180 | } | 186 | } |
| 181 | 187 | ||
| 182 | /** | 188 | /** |
| 183 | * fcoe_ctlr_solicit() - Send a solicitation. | 189 | * fcoe_ctlr_solicit() - Send a FIP solicitation |
| 184 | * @fip: FCoE controller. | 190 | * @fip: The FCoE controller to send the solicitation on |
| 185 | * @fcf: Destination FCF. If NULL, a multicast solicitation is sent. | 191 | * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) |
| 186 | */ | 192 | */ |
| 187 | static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) | 193 | static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) |
| 188 | { | 194 | { |
| @@ -241,8 +247,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) | |||
| 241 | } | 247 | } |
| 242 | 248 | ||
| 243 | /** | 249 | /** |
| 244 | * fcoe_ctlr_link_up() - Start FCoE controller. | 250 | * fcoe_ctlr_link_up() - Start FCoE controller |
| 245 | * @fip: FCoE controller. | 251 | * @fip: The FCoE controller to start |
| 246 | * | 252 | * |
| 247 | * Called from the LLD when the network link is ready. | 253 | * Called from the LLD when the network link is ready. |
| 248 | */ | 254 | */ |
| @@ -255,11 +261,12 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) | |||
| 255 | spin_unlock_bh(&fip->lock); | 261 | spin_unlock_bh(&fip->lock); |
| 256 | fc_linkup(fip->lp); | 262 | fc_linkup(fip->lp); |
| 257 | } else if (fip->state == FIP_ST_LINK_WAIT) { | 263 | } else if (fip->state == FIP_ST_LINK_WAIT) { |
| 258 | fip->state = FIP_ST_AUTO; | 264 | fip->state = fip->mode; |
| 259 | fip->last_link = 1; | 265 | fip->last_link = 1; |
| 260 | fip->link = 1; | 266 | fip->link = 1; |
| 261 | spin_unlock_bh(&fip->lock); | 267 | spin_unlock_bh(&fip->lock); |
| 262 | LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); | 268 | if (fip->state == FIP_ST_AUTO) |
| 269 | LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); | ||
| 263 | fc_linkup(fip->lp); | 270 | fc_linkup(fip->lp); |
| 264 | fcoe_ctlr_solicit(fip, NULL); | 271 | fcoe_ctlr_solicit(fip, NULL); |
| 265 | } else | 272 | } else |
| @@ -268,45 +275,23 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) | |||
| 268 | EXPORT_SYMBOL(fcoe_ctlr_link_up); | 275 | EXPORT_SYMBOL(fcoe_ctlr_link_up); |
| 269 | 276 | ||
| 270 | /** | 277 | /** |
| 271 | * fcoe_ctlr_reset() - Reset FIP. | 278 | * fcoe_ctlr_reset() - Reset a FCoE controller |
| 272 | * @fip: FCoE controller. | 279 | * @fip: The FCoE controller to reset |
| 273 | * @new_state: FIP state to be entered. | ||
| 274 | * | ||
| 275 | * Returns non-zero if the link was up and now isn't. | ||
| 276 | */ | 280 | */ |
| 277 | static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) | 281 | static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) |
| 278 | { | 282 | { |
| 279 | struct fc_lport *lp = fip->lp; | ||
| 280 | int link_dropped; | ||
| 281 | |||
| 282 | spin_lock_bh(&fip->lock); | ||
| 283 | fcoe_ctlr_reset_fcfs(fip); | 283 | fcoe_ctlr_reset_fcfs(fip); |
| 284 | del_timer(&fip->timer); | 284 | del_timer(&fip->timer); |
| 285 | fip->state = new_state; | ||
| 286 | fip->ctlr_ka_time = 0; | 285 | fip->ctlr_ka_time = 0; |
| 287 | fip->port_ka_time = 0; | 286 | fip->port_ka_time = 0; |
| 288 | fip->sol_time = 0; | 287 | fip->sol_time = 0; |
| 289 | fip->flogi_oxid = FC_XID_UNKNOWN; | 288 | fip->flogi_oxid = FC_XID_UNKNOWN; |
| 290 | fip->map_dest = 0; | 289 | fip->map_dest = 0; |
| 291 | fip->last_link = 0; | ||
| 292 | link_dropped = fip->link; | ||
| 293 | fip->link = 0; | ||
| 294 | spin_unlock_bh(&fip->lock); | ||
| 295 | |||
| 296 | if (link_dropped) | ||
| 297 | fc_linkdown(lp); | ||
| 298 | |||
| 299 | if (new_state == FIP_ST_ENABLED) { | ||
| 300 | fcoe_ctlr_solicit(fip, NULL); | ||
| 301 | fc_linkup(lp); | ||
| 302 | link_dropped = 0; | ||
| 303 | } | ||
| 304 | return link_dropped; | ||
| 305 | } | 290 | } |
| 306 | 291 | ||
| 307 | /** | 292 | /** |
| 308 | * fcoe_ctlr_link_down() - Stop FCoE controller. | 293 | * fcoe_ctlr_link_down() - Stop a FCoE controller |
| 309 | * @fip: FCoE controller. | 294 | * @fip: The FCoE controller to be stopped |
| 310 | * | 295 | * |
| 311 | * Returns non-zero if the link was up and now isn't. | 296 | * Returns non-zero if the link was up and now isn't. |
| 312 | * | 297 | * |
| @@ -315,15 +300,29 @@ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) | |||
| 315 | */ | 300 | */ |
| 316 | int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) | 301 | int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) |
| 317 | { | 302 | { |
| 318 | return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); | 303 | int link_dropped; |
| 304 | |||
| 305 | LIBFCOE_FIP_DBG(fip, "link down.\n"); | ||
| 306 | spin_lock_bh(&fip->lock); | ||
| 307 | fcoe_ctlr_reset(fip); | ||
| 308 | link_dropped = fip->link; | ||
| 309 | fip->link = 0; | ||
| 310 | fip->last_link = 0; | ||
| 311 | fip->state = FIP_ST_LINK_WAIT; | ||
| 312 | spin_unlock_bh(&fip->lock); | ||
| 313 | |||
| 314 | if (link_dropped) | ||
| 315 | fc_linkdown(fip->lp); | ||
| 316 | return link_dropped; | ||
| 319 | } | 317 | } |
| 320 | EXPORT_SYMBOL(fcoe_ctlr_link_down); | 318 | EXPORT_SYMBOL(fcoe_ctlr_link_down); |
| 321 | 319 | ||
| 322 | /** | 320 | /** |
| 323 | * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF. | 321 | * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF |
| 324 | * @fip: FCoE controller. | 322 | * @fip: The FCoE controller to send the FKA on |
| 325 | * @ports: 0 for controller keep-alive, 1 for port keep-alive. | 323 | * @lport: libfc fc_lport to send from |
| 326 | * @sa: source MAC address. | 324 | * @ports: 0 for controller keep-alive, 1 for port keep-alive |
| 325 | * @sa: The source MAC address | ||
| 327 | * | 326 | * |
| 328 | * A controller keep-alive is sent every fka_period (typically 8 seconds). | 327 | * A controller keep-alive is sent every fka_period (typically 8 seconds). |
| 329 | * The source MAC is the native MAC address. | 328 | * The source MAC is the native MAC address. |
| @@ -332,7 +331,9 @@ EXPORT_SYMBOL(fcoe_ctlr_link_down); | |||
| 332 | * The source MAC is the assigned mapped source address. | 331 | * The source MAC is the assigned mapped source address. |
| 333 | * The destination is the FCF's F-port. | 332 | * The destination is the FCF's F-port. |
| 334 | */ | 333 | */ |
| 335 | static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | 334 | static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, |
| 335 | struct fc_lport *lport, | ||
| 336 | int ports, u8 *sa) | ||
| 336 | { | 337 | { |
| 337 | struct sk_buff *skb; | 338 | struct sk_buff *skb; |
| 338 | struct fip_kal { | 339 | struct fip_kal { |
| @@ -350,8 +351,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | |||
| 350 | if (!fcf || !fc_host_port_id(lp->host)) | 351 | if (!fcf || !fc_host_port_id(lp->host)) |
| 351 | return; | 352 | return; |
| 352 | 353 | ||
| 353 | len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr); | 354 | len = sizeof(*kal) + ports * sizeof(*vn); |
| 354 | BUG_ON(len < sizeof(*kal) + sizeof(*vn)); | ||
| 355 | skb = dev_alloc_skb(len); | 355 | skb = dev_alloc_skb(len); |
| 356 | if (!skb) | 356 | if (!skb) |
| 357 | return; | 357 | return; |
| @@ -366,7 +366,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | |||
| 366 | kal->fip.fip_op = htons(FIP_OP_CTRL); | 366 | kal->fip.fip_op = htons(FIP_OP_CTRL); |
| 367 | kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; | 367 | kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; |
| 368 | kal->fip.fip_dl_len = htons((sizeof(kal->mac) + | 368 | kal->fip.fip_dl_len = htons((sizeof(kal->mac) + |
| 369 | ports * sizeof(*vn)) / FIP_BPW); | 369 | ports * sizeof(*vn)) / FIP_BPW); |
| 370 | kal->fip.fip_flags = htons(FIP_FL_FPMA); | 370 | kal->fip.fip_flags = htons(FIP_FL_FPMA); |
| 371 | if (fip->spma) | 371 | if (fip->spma) |
| 372 | kal->fip.fip_flags |= htons(FIP_FL_SPMA); | 372 | kal->fip.fip_flags |= htons(FIP_FL_SPMA); |
| @@ -374,16 +374,14 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | |||
| 374 | kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; | 374 | kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; |
| 375 | kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; | 375 | kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; |
| 376 | memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); | 376 | memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); |
| 377 | |||
| 378 | if (ports) { | 377 | if (ports) { |
| 379 | vn = (struct fip_vn_desc *)(kal + 1); | 378 | vn = (struct fip_vn_desc *)(kal + 1); |
| 380 | vn->fd_desc.fip_dtype = FIP_DT_VN_ID; | 379 | vn->fd_desc.fip_dtype = FIP_DT_VN_ID; |
| 381 | vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; | 380 | vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; |
| 382 | memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN); | 381 | memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); |
| 383 | hton24(vn->fd_fc_id, fc_host_port_id(lp->host)); | 382 | hton24(vn->fd_fc_id, fc_host_port_id(lp->host)); |
| 384 | put_unaligned_be64(lp->wwpn, &vn->fd_wwpn); | 383 | put_unaligned_be64(lp->wwpn, &vn->fd_wwpn); |
| 385 | } | 384 | } |
| 386 | |||
| 387 | skb_put(skb, len); | 385 | skb_put(skb, len); |
| 388 | skb->protocol = htons(ETH_P_FIP); | 386 | skb->protocol = htons(ETH_P_FIP); |
| 389 | skb_reset_mac_header(skb); | 387 | skb_reset_mac_header(skb); |
| @@ -392,10 +390,10 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | |||
| 392 | } | 390 | } |
| 393 | 391 | ||
| 394 | /** | 392 | /** |
| 395 | * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it. | 393 | * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it |
| 396 | * @fip: FCoE controller. | 394 | * @fip: The FCoE controller for the ELS frame |
| 397 | * @dtype: FIP descriptor type for the frame. | 395 | * @dtype: The FIP descriptor type for the frame |
| 398 | * @skb: FCoE ELS frame including FC header but no FCoE headers. | 396 | * @skb: The FCoE ELS frame including FC header but no FCoE headers |
| 399 | * | 397 | * |
| 400 | * Returns non-zero error code on failure. | 398 | * Returns non-zero error code on failure. |
| 401 | * | 399 | * |
| @@ -405,7 +403,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) | |||
| 405 | * Headroom includes the FIP encapsulation description, FIP header, and | 403 | * Headroom includes the FIP encapsulation description, FIP header, and |
| 406 | * Ethernet header. The tailroom is for the FIP MAC descriptor. | 404 | * Ethernet header. The tailroom is for the FIP MAC descriptor. |
| 407 | */ | 405 | */ |
| 408 | static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, | 406 | static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, |
| 409 | u8 dtype, struct sk_buff *skb) | 407 | u8 dtype, struct sk_buff *skb) |
| 410 | { | 408 | { |
| 411 | struct fip_encaps_head { | 409 | struct fip_encaps_head { |
| @@ -449,8 +447,8 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, | |||
| 449 | memset(mac, 0, sizeof(mac)); | 447 | memset(mac, 0, sizeof(mac)); |
| 450 | mac->fd_desc.fip_dtype = FIP_DT_MAC; | 448 | mac->fd_desc.fip_dtype = FIP_DT_MAC; |
| 451 | mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; | 449 | mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; |
| 452 | if (dtype != FIP_DT_FLOGI) | 450 | if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) |
| 453 | memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); | 451 | memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); |
| 454 | else if (fip->spma) | 452 | else if (fip->spma) |
| 455 | memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); | 453 | memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); |
| 456 | 454 | ||
| @@ -463,6 +461,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, | |||
| 463 | /** | 461 | /** |
| 464 | * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. | 462 | * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. |
| 465 | * @fip: FCoE controller. | 463 | * @fip: FCoE controller. |
| 464 | * @lport: libfc fc_lport to send from | ||
| 466 | * @skb: FCoE ELS frame including FC header but no FCoE headers. | 465 | * @skb: FCoE ELS frame including FC header but no FCoE headers. |
| 467 | * | 466 | * |
| 468 | * Returns a non-zero error code if the frame should not be sent. | 467 | * Returns a non-zero error code if the frame should not be sent. |
| @@ -471,11 +470,13 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, | |||
| 471 | * The caller must check that the length is a multiple of 4. | 470 | * The caller must check that the length is a multiple of 4. |
| 472 | * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). | 471 | * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). |
| 473 | */ | 472 | */ |
| 474 | int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | 473 | int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, |
| 474 | struct sk_buff *skb) | ||
| 475 | { | 475 | { |
| 476 | struct fc_frame_header *fh; | 476 | struct fc_frame_header *fh; |
| 477 | u16 old_xid; | 477 | u16 old_xid; |
| 478 | u8 op; | 478 | u8 op; |
| 479 | u8 mac[ETH_ALEN]; | ||
| 479 | 480 | ||
| 480 | fh = (struct fc_frame_header *)skb->data; | 481 | fh = (struct fc_frame_header *)skb->data; |
| 481 | op = *(u8 *)(fh + 1); | 482 | op = *(u8 *)(fh + 1); |
| @@ -498,6 +499,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 498 | 499 | ||
| 499 | if (fip->state == FIP_ST_NON_FIP) | 500 | if (fip->state == FIP_ST_NON_FIP) |
| 500 | return 0; | 501 | return 0; |
| 502 | if (!fip->sel_fcf) | ||
| 503 | goto drop; | ||
| 501 | 504 | ||
| 502 | switch (op) { | 505 | switch (op) { |
| 503 | case ELS_FLOGI: | 506 | case ELS_FLOGI: |
| @@ -530,14 +533,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 530 | * FLOGI. | 533 | * FLOGI. |
| 531 | */ | 534 | */ |
| 532 | fip->flogi_oxid = FC_XID_UNKNOWN; | 535 | fip->flogi_oxid = FC_XID_UNKNOWN; |
| 533 | fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id); | 536 | fc_fcoe_set_mac(mac, fh->fh_d_id); |
| 537 | fip->update_mac(lport, mac); | ||
| 534 | return 0; | 538 | return 0; |
| 535 | default: | 539 | default: |
| 536 | if (fip->state != FIP_ST_ENABLED) | 540 | if (fip->state != FIP_ST_ENABLED) |
| 537 | goto drop; | 541 | goto drop; |
| 538 | return 0; | 542 | return 0; |
| 539 | } | 543 | } |
| 540 | if (fcoe_ctlr_encaps(fip, op, skb)) | 544 | if (fcoe_ctlr_encaps(fip, lport, op, skb)) |
| 541 | goto drop; | 545 | goto drop; |
| 542 | fip->send(fip, skb); | 546 | fip->send(fip, skb); |
| 543 | return -EINPROGRESS; | 547 | return -EINPROGRESS; |
| @@ -547,9 +551,9 @@ drop: | |||
| 547 | } | 551 | } |
| 548 | EXPORT_SYMBOL(fcoe_ctlr_els_send); | 552 | EXPORT_SYMBOL(fcoe_ctlr_els_send); |
| 549 | 553 | ||
| 550 | /* | 554 | /** |
| 551 | * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller. | 555 | * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller |
| 552 | * @fip: FCoE controller. | 556 | * @fip: The FCoE controller to free FCFs on |
| 553 | * | 557 | * |
| 554 | * Called with lock held. | 558 | * Called with lock held. |
| 555 | * | 559 | * |
| @@ -558,14 +562,28 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send); | |||
| 558 | * times its keep-alive period including fuzz. | 562 | * times its keep-alive period including fuzz. |
| 559 | * | 563 | * |
| 560 | * In addition, determine the time when an FCF selection can occur. | 564 | * In addition, determine the time when an FCF selection can occur. |
| 565 | * | ||
| 566 | * Also, increment the MissDiscAdvCount when no advertisement is received | ||
| 567 | * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). | ||
| 561 | */ | 568 | */ |
| 562 | static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) | 569 | static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) |
| 563 | { | 570 | { |
| 564 | struct fcoe_fcf *fcf; | 571 | struct fcoe_fcf *fcf; |
| 565 | struct fcoe_fcf *next; | 572 | struct fcoe_fcf *next; |
| 566 | unsigned long sel_time = 0; | 573 | unsigned long sel_time = 0; |
| 574 | unsigned long mda_time = 0; | ||
| 567 | 575 | ||
| 568 | list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { | 576 | list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { |
| 577 | mda_time = fcf->fka_period + (fcf->fka_period >> 1); | ||
| 578 | if ((fip->sel_fcf == fcf) && | ||
| 579 | (time_after(jiffies, fcf->time + mda_time))) { | ||
| 580 | mod_timer(&fip->timer, jiffies + mda_time); | ||
| 581 | fc_lport_get_stats(fip->lp)->MissDiscAdvCount++; | ||
| 582 | printk(KERN_INFO "libfcoe: host%d: Missing Discovery " | ||
| 583 | "Advertisement for fab %llx count %lld\n", | ||
| 584 | fip->lp->host->host_no, fcf->fabric_name, | ||
| 585 | fc_lport_get_stats(fip->lp)->MissDiscAdvCount); | ||
| 586 | } | ||
| 569 | if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + | 587 | if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + |
| 570 | msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { | 588 | msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { |
| 571 | if (fip->sel_fcf == fcf) | 589 | if (fip->sel_fcf == fcf) |
| @@ -574,6 +592,7 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) | |||
| 574 | WARN_ON(!fip->fcf_count); | 592 | WARN_ON(!fip->fcf_count); |
| 575 | fip->fcf_count--; | 593 | fip->fcf_count--; |
| 576 | kfree(fcf); | 594 | kfree(fcf); |
| 595 | fc_lport_get_stats(fip->lp)->VLinkFailureCount++; | ||
| 577 | } else if (fcoe_ctlr_mtu_valid(fcf) && | 596 | } else if (fcoe_ctlr_mtu_valid(fcf) && |
| 578 | (!sel_time || time_before(sel_time, fcf->time))) { | 597 | (!sel_time || time_before(sel_time, fcf->time))) { |
| 579 | sel_time = fcf->time; | 598 | sel_time = fcf->time; |
| @@ -590,14 +609,16 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) | |||
| 590 | } | 609 | } |
| 591 | 610 | ||
| 592 | /** | 611 | /** |
| 593 | * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry. | 612 | * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry |
| 594 | * @skb: received FIP advertisement frame | 613 | * @fip: The FCoE controller receiving the advertisement |
| 595 | * @fcf: resulting FCF entry. | 614 | * @skb: The received FIP advertisement frame |
| 615 | * @fcf: The resulting FCF entry | ||
| 596 | * | 616 | * |
| 597 | * Returns zero on a valid parsed advertisement, | 617 | * Returns zero on a valid parsed advertisement, |
| 598 | * otherwise returns non zero value. | 618 | * otherwise returns non zero value. |
| 599 | */ | 619 | */ |
| 600 | static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | 620 | static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, |
| 621 | struct sk_buff *skb, struct fcoe_fcf *fcf) | ||
| 601 | { | 622 | { |
| 602 | struct fip_header *fiph; | 623 | struct fip_header *fiph; |
| 603 | struct fip_desc *desc = NULL; | 624 | struct fip_desc *desc = NULL; |
| @@ -636,7 +657,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
| 636 | ((struct fip_mac_desc *)desc)->fd_mac, | 657 | ((struct fip_mac_desc *)desc)->fd_mac, |
| 637 | ETH_ALEN); | 658 | ETH_ALEN); |
| 638 | if (!is_valid_ether_addr(fcf->fcf_mac)) { | 659 | if (!is_valid_ether_addr(fcf->fcf_mac)) { |
| 639 | LIBFCOE_FIP_DBG("Invalid MAC address " | 660 | LIBFCOE_FIP_DBG(fip, "Invalid MAC address " |
| 640 | "in FIP adv\n"); | 661 | "in FIP adv\n"); |
| 641 | return -EINVAL; | 662 | return -EINVAL; |
| 642 | } | 663 | } |
| @@ -659,6 +680,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
| 659 | if (dlen != sizeof(struct fip_fka_desc)) | 680 | if (dlen != sizeof(struct fip_fka_desc)) |
| 660 | goto len_err; | 681 | goto len_err; |
| 661 | fka = (struct fip_fka_desc *)desc; | 682 | fka = (struct fip_fka_desc *)desc; |
| 683 | if (fka->fd_flags & FIP_FKA_ADV_D) | ||
| 684 | fcf->fd_flags = 1; | ||
| 662 | t = ntohl(fka->fd_fka_period); | 685 | t = ntohl(fka->fd_fka_period); |
| 663 | if (t >= FCOE_CTLR_MIN_FKA) | 686 | if (t >= FCOE_CTLR_MIN_FKA) |
| 664 | fcf->fka_period = msecs_to_jiffies(t); | 687 | fcf->fka_period = msecs_to_jiffies(t); |
| @@ -670,7 +693,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
| 670 | case FIP_DT_LOGO: | 693 | case FIP_DT_LOGO: |
| 671 | case FIP_DT_ELP: | 694 | case FIP_DT_ELP: |
| 672 | default: | 695 | default: |
| 673 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " | 696 | LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " |
| 674 | "in FIP adv\n", desc->fip_dtype); | 697 | "in FIP adv\n", desc->fip_dtype); |
| 675 | /* standard says ignore unknown descriptors >= 128 */ | 698 | /* standard says ignore unknown descriptors >= 128 */ |
| 676 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 699 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
| @@ -687,15 +710,15 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
| 687 | return 0; | 710 | return 0; |
| 688 | 711 | ||
| 689 | len_err: | 712 | len_err: |
| 690 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 713 | LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", |
| 691 | desc->fip_dtype, dlen); | 714 | desc->fip_dtype, dlen); |
| 692 | return -EINVAL; | 715 | return -EINVAL; |
| 693 | } | 716 | } |
| 694 | 717 | ||
| 695 | /** | 718 | /** |
| 696 | * fcoe_ctlr_recv_adv() - Handle an incoming advertisement. | 719 | * fcoe_ctlr_recv_adv() - Handle an incoming advertisement |
| 697 | * @fip: FCoE controller. | 720 | * @fip: The FCoE controller receiving the advertisement |
| 698 | * @skb: Received FIP packet. | 721 | * @skb: The received FIP packet |
| 699 | */ | 722 | */ |
| 700 | static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | 723 | static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) |
| 701 | { | 724 | { |
| @@ -706,7 +729,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 706 | int first = 0; | 729 | int first = 0; |
| 707 | int mtu_valid; | 730 | int mtu_valid; |
| 708 | 731 | ||
| 709 | if (fcoe_ctlr_parse_adv(skb, &new)) | 732 | if (fcoe_ctlr_parse_adv(fip, skb, &new)) |
| 710 | return; | 733 | return; |
| 711 | 734 | ||
| 712 | spin_lock_bh(&fip->lock); | 735 | spin_lock_bh(&fip->lock); |
| @@ -752,7 +775,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 752 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); | 775 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); |
| 753 | fcf->time = jiffies; | 776 | fcf->time = jiffies; |
| 754 | if (!found) { | 777 | if (!found) { |
| 755 | LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", | 778 | LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n", |
| 756 | fcf->fabric_name, fcf->fc_map, mtu_valid); | 779 | fcf->fabric_name, fcf->fc_map, mtu_valid); |
| 757 | } | 780 | } |
| 758 | 781 | ||
| @@ -778,7 +801,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 778 | */ | 801 | */ |
| 779 | if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) { | 802 | if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) { |
| 780 | fip->sel_time = jiffies + | 803 | fip->sel_time = jiffies + |
| 781 | msecs_to_jiffies(FCOE_CTLR_START_DELAY); | 804 | msecs_to_jiffies(FCOE_CTLR_START_DELAY); |
| 782 | if (!timer_pending(&fip->timer) || | 805 | if (!timer_pending(&fip->timer) || |
| 783 | time_before(fip->sel_time, fip->timer.expires)) | 806 | time_before(fip->sel_time, fip->timer.expires)) |
| 784 | mod_timer(&fip->timer, fip->sel_time); | 807 | mod_timer(&fip->timer, fip->sel_time); |
| @@ -788,15 +811,15 @@ out: | |||
| 788 | } | 811 | } |
| 789 | 812 | ||
| 790 | /** | 813 | /** |
| 791 | * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame. | 814 | * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame |
| 792 | * @fip: FCoE controller. | 815 | * @fip: The FCoE controller which received the packet |
| 793 | * @skb: Received FIP packet. | 816 | * @skb: The received FIP packet |
| 794 | */ | 817 | */ |
| 795 | static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | 818 | static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) |
| 796 | { | 819 | { |
| 797 | struct fc_lport *lp = fip->lp; | 820 | struct fc_lport *lport = fip->lp; |
| 798 | struct fip_header *fiph; | 821 | struct fip_header *fiph; |
| 799 | struct fc_frame *fp; | 822 | struct fc_frame *fp = (struct fc_frame *)skb; |
| 800 | struct fc_frame_header *fh = NULL; | 823 | struct fc_frame_header *fh = NULL; |
| 801 | struct fip_desc *desc; | 824 | struct fip_desc *desc; |
| 802 | struct fip_encaps *els; | 825 | struct fip_encaps *els; |
| @@ -831,10 +854,11 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 831 | ((struct fip_mac_desc *)desc)->fd_mac, | 854 | ((struct fip_mac_desc *)desc)->fd_mac, |
| 832 | ETH_ALEN); | 855 | ETH_ALEN); |
| 833 | if (!is_valid_ether_addr(granted_mac)) { | 856 | if (!is_valid_ether_addr(granted_mac)) { |
| 834 | LIBFCOE_FIP_DBG("Invalid MAC address " | 857 | LIBFCOE_FIP_DBG(fip, "Invalid MAC address " |
| 835 | "in FIP ELS\n"); | 858 | "in FIP ELS\n"); |
| 836 | goto drop; | 859 | goto drop; |
| 837 | } | 860 | } |
| 861 | memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); | ||
| 838 | break; | 862 | break; |
| 839 | case FIP_DT_FLOGI: | 863 | case FIP_DT_FLOGI: |
| 840 | case FIP_DT_FDISC: | 864 | case FIP_DT_FDISC: |
| @@ -850,7 +874,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 850 | els_dtype = desc->fip_dtype; | 874 | els_dtype = desc->fip_dtype; |
| 851 | break; | 875 | break; |
| 852 | default: | 876 | default: |
| 853 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " | 877 | LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " |
| 854 | "in FIP adv\n", desc->fip_dtype); | 878 | "in FIP adv\n", desc->fip_dtype); |
| 855 | /* standard says ignore unknown descriptors >= 128 */ | 879 | /* standard says ignore unknown descriptors >= 128 */ |
| 856 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 880 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
| @@ -867,11 +891,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 867 | 891 | ||
| 868 | if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && | 892 | if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && |
| 869 | fip->flogi_oxid == ntohs(fh->fh_ox_id) && | 893 | fip->flogi_oxid == ntohs(fh->fh_ox_id) && |
| 870 | els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) { | 894 | els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) |
| 871 | fip->flogi_oxid = FC_XID_UNKNOWN; | 895 | fip->flogi_oxid = FC_XID_UNKNOWN; |
| 872 | fip->update_mac(fip, fip->data_src_addr, granted_mac); | ||
| 873 | memcpy(fip->data_src_addr, granted_mac, ETH_ALEN); | ||
| 874 | } | ||
| 875 | 896 | ||
| 876 | /* | 897 | /* |
| 877 | * Convert skb into an fc_frame containing only the ELS. | 898 | * Convert skb into an fc_frame containing only the ELS. |
| @@ -882,32 +903,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 882 | fc_frame_init(fp); | 903 | fc_frame_init(fp); |
| 883 | fr_sof(fp) = FC_SOF_I3; | 904 | fr_sof(fp) = FC_SOF_I3; |
| 884 | fr_eof(fp) = FC_EOF_T; | 905 | fr_eof(fp) = FC_EOF_T; |
| 885 | fr_dev(fp) = lp; | 906 | fr_dev(fp) = lport; |
| 886 | 907 | ||
| 887 | stats = fc_lport_get_stats(lp); | 908 | stats = fc_lport_get_stats(lport); |
| 888 | stats->RxFrames++; | 909 | stats->RxFrames++; |
| 889 | stats->RxWords += skb->len / FIP_BPW; | 910 | stats->RxWords += skb->len / FIP_BPW; |
| 890 | 911 | ||
| 891 | fc_exch_recv(lp, fp); | 912 | fc_exch_recv(lport, fp); |
| 892 | return; | 913 | return; |
| 893 | 914 | ||
| 894 | len_err: | 915 | len_err: |
| 895 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 916 | LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", |
| 896 | desc->fip_dtype, dlen); | 917 | desc->fip_dtype, dlen); |
| 897 | drop: | 918 | drop: |
| 898 | kfree_skb(skb); | 919 | kfree_skb(skb); |
| 899 | } | 920 | } |
| 900 | 921 | ||
| 901 | /** | 922 | /** |
| 902 | * fcoe_ctlr_recv_els() - Handle an incoming link reset frame. | 923 | * fcoe_ctlr_recv_els() - Handle an incoming link reset frame |
| 903 | * @fip: FCoE controller. | 924 | * @fip: The FCoE controller that received the frame |
| 904 | * @fh: Received FIP header. | 925 | * @fh: The received FIP header |
| 905 | * | 926 | * |
| 906 | * There may be multiple VN_Port descriptors. | 927 | * There may be multiple VN_Port descriptors. |
| 907 | * The overall length has already been checked. | 928 | * The overall length has already been checked. |
| 908 | */ | 929 | */ |
| 909 | static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | 930 | static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, |
| 910 | struct fip_header *fh) | 931 | struct fip_header *fh) |
| 911 | { | 932 | { |
| 912 | struct fip_desc *desc; | 933 | struct fip_desc *desc; |
| 913 | struct fip_mac_desc *mp; | 934 | struct fip_mac_desc *mp; |
| @@ -916,13 +937,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
| 916 | size_t rlen; | 937 | size_t rlen; |
| 917 | size_t dlen; | 938 | size_t dlen; |
| 918 | struct fcoe_fcf *fcf = fip->sel_fcf; | 939 | struct fcoe_fcf *fcf = fip->sel_fcf; |
| 919 | struct fc_lport *lp = fip->lp; | 940 | struct fc_lport *lport = fip->lp; |
| 920 | u32 desc_mask; | 941 | u32 desc_mask; |
| 921 | 942 | ||
| 922 | LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); | 943 | LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); |
| 923 | if (!fcf) | 944 | if (!fcf) |
| 924 | return; | 945 | return; |
| 925 | if (!fcf || !fc_host_port_id(lp->host)) | 946 | if (!fcf || !fc_host_port_id(lport->host)) |
| 926 | return; | 947 | return; |
| 927 | 948 | ||
| 928 | /* | 949 | /* |
| @@ -958,9 +979,10 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
| 958 | if (dlen < sizeof(*vp)) | 979 | if (dlen < sizeof(*vp)) |
| 959 | return; | 980 | return; |
| 960 | if (compare_ether_addr(vp->fd_mac, | 981 | if (compare_ether_addr(vp->fd_mac, |
| 961 | fip->data_src_addr) == 0 && | 982 | fip->get_src_addr(lport)) == 0 && |
| 962 | get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn && | 983 | get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && |
| 963 | ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host)) | 984 | ntoh24(vp->fd_fc_id) == |
| 985 | fc_host_port_id(lport->host)) | ||
| 964 | desc_mask &= ~BIT(FIP_DT_VN_ID); | 986 | desc_mask &= ~BIT(FIP_DT_VN_ID); |
| 965 | break; | 987 | break; |
| 966 | default: | 988 | default: |
| @@ -977,33 +999,39 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
| 977 | * reset only if all required descriptors were present and valid. | 999 | * reset only if all required descriptors were present and valid. |
| 978 | */ | 1000 | */ |
| 979 | if (desc_mask) { | 1001 | if (desc_mask) { |
| 980 | LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); | 1002 | LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", |
| 1003 | desc_mask); | ||
| 981 | } else { | 1004 | } else { |
| 982 | LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); | 1005 | LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); |
| 983 | fcoe_ctlr_reset(fip, FIP_ST_ENABLED); | 1006 | |
| 1007 | spin_lock_bh(&fip->lock); | ||
| 1008 | fc_lport_get_stats(lport)->VLinkFailureCount++; | ||
| 1009 | fcoe_ctlr_reset(fip); | ||
| 1010 | spin_unlock_bh(&fip->lock); | ||
| 1011 | |||
| 1012 | fc_lport_reset(fip->lp); | ||
| 1013 | fcoe_ctlr_solicit(fip, NULL); | ||
| 984 | } | 1014 | } |
| 985 | } | 1015 | } |
| 986 | 1016 | ||
| 987 | /** | 1017 | /** |
| 988 | * fcoe_ctlr_recv() - Receive a FIP frame. | 1018 | * fcoe_ctlr_recv() - Receive a FIP packet |
| 989 | * @fip: FCoE controller. | 1019 | * @fip: The FCoE controller that received the packet |
| 990 | * @skb: Received FIP packet. | 1020 | * @skb: The received FIP packet |
| 991 | * | 1021 | * |
| 992 | * This is called from NET_RX_SOFTIRQ. | 1022 | * This may be called from either NET_RX_SOFTIRQ or IRQ. |
| 993 | */ | 1023 | */ |
| 994 | void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | 1024 | void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) |
| 995 | { | 1025 | { |
| 996 | spin_lock_bh(&fip->fip_recv_list.lock); | 1026 | skb_queue_tail(&fip->fip_recv_list, skb); |
| 997 | __skb_queue_tail(&fip->fip_recv_list, skb); | ||
| 998 | spin_unlock_bh(&fip->fip_recv_list.lock); | ||
| 999 | schedule_work(&fip->recv_work); | 1027 | schedule_work(&fip->recv_work); |
| 1000 | } | 1028 | } |
| 1001 | EXPORT_SYMBOL(fcoe_ctlr_recv); | 1029 | EXPORT_SYMBOL(fcoe_ctlr_recv); |
| 1002 | 1030 | ||
| 1003 | /** | 1031 | /** |
| 1004 | * fcoe_ctlr_recv_handler() - Receive a FIP frame. | 1032 | * fcoe_ctlr_recv_handler() - Receive a FIP frame |
| 1005 | * @fip: FCoE controller. | 1033 | * @fip: The FCoE controller that received the frame |
| 1006 | * @skb: Received FIP packet. | 1034 | * @skb: The received FIP frame |
| 1007 | * | 1035 | * |
| 1008 | * Returns non-zero if the frame is dropped. | 1036 | * Returns non-zero if the frame is dropped. |
| 1009 | */ | 1037 | */ |
| @@ -1038,7 +1066,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 1038 | fip->map_dest = 0; | 1066 | fip->map_dest = 0; |
| 1039 | fip->state = FIP_ST_ENABLED; | 1067 | fip->state = FIP_ST_ENABLED; |
| 1040 | state = FIP_ST_ENABLED; | 1068 | state = FIP_ST_ENABLED; |
| 1041 | LIBFCOE_FIP_DBG("Using FIP mode\n"); | 1069 | LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); |
| 1042 | } | 1070 | } |
| 1043 | spin_unlock_bh(&fip->lock); | 1071 | spin_unlock_bh(&fip->lock); |
| 1044 | if (state != FIP_ST_ENABLED) | 1072 | if (state != FIP_ST_ENABLED) |
| @@ -1060,8 +1088,8 @@ drop: | |||
| 1060 | } | 1088 | } |
| 1061 | 1089 | ||
| 1062 | /** | 1090 | /** |
| 1063 | * fcoe_ctlr_select() - Select the best FCF, if possible. | 1091 | * fcoe_ctlr_select() - Select the best FCF (if possible) |
| 1064 | * @fip: FCoE controller. | 1092 | * @fip: The FCoE controller |
| 1065 | * | 1093 | * |
| 1066 | * If there are conflicting advertisements, no FCF can be chosen. | 1094 | * If there are conflicting advertisements, no FCF can be chosen. |
| 1067 | * | 1095 | * |
| @@ -1073,11 +1101,11 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
| 1073 | struct fcoe_fcf *best = NULL; | 1101 | struct fcoe_fcf *best = NULL; |
| 1074 | 1102 | ||
| 1075 | list_for_each_entry(fcf, &fip->fcfs, list) { | 1103 | list_for_each_entry(fcf, &fip->fcfs, list) { |
| 1076 | LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " | 1104 | LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x " |
| 1077 | "val %d\n", fcf->fabric_name, fcf->vfid, | 1105 | "val %d\n", fcf->fabric_name, fcf->vfid, |
| 1078 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); | 1106 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); |
| 1079 | if (!fcoe_ctlr_fcf_usable(fcf)) { | 1107 | if (!fcoe_ctlr_fcf_usable(fcf)) { |
| 1080 | LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " | 1108 | LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid " |
| 1081 | "%savailable\n", fcf->fabric_name, | 1109 | "%savailable\n", fcf->fabric_name, |
| 1082 | fcf->fc_map, (fcf->flags & FIP_FL_SOL) | 1110 | fcf->fc_map, (fcf->flags & FIP_FL_SOL) |
| 1083 | ? "" : "in", (fcf->flags & FIP_FL_AVAIL) | 1111 | ? "" : "in", (fcf->flags & FIP_FL_AVAIL) |
| @@ -1091,7 +1119,7 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
| 1091 | if (fcf->fabric_name != best->fabric_name || | 1119 | if (fcf->fabric_name != best->fabric_name || |
| 1092 | fcf->vfid != best->vfid || | 1120 | fcf->vfid != best->vfid || |
| 1093 | fcf->fc_map != best->fc_map) { | 1121 | fcf->fc_map != best->fc_map) { |
| 1094 | LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " | 1122 | LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " |
| 1095 | "or FC-MAP\n"); | 1123 | "or FC-MAP\n"); |
| 1096 | return; | 1124 | return; |
| 1097 | } | 1125 | } |
| @@ -1102,8 +1130,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
| 1102 | } | 1130 | } |
| 1103 | 1131 | ||
| 1104 | /** | 1132 | /** |
| 1105 | * fcoe_ctlr_timeout() - FIP timer function. | 1133 | * fcoe_ctlr_timeout() - FIP timeout handler |
| 1106 | * @arg: &fcoe_ctlr pointer. | 1134 | * @arg: The FCoE controller that timed out |
| 1107 | * | 1135 | * |
| 1108 | * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives. | 1136 | * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives. |
| 1109 | */ | 1137 | */ |
| @@ -1113,8 +1141,6 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
| 1113 | struct fcoe_fcf *sel; | 1141 | struct fcoe_fcf *sel; |
| 1114 | struct fcoe_fcf *fcf; | 1142 | struct fcoe_fcf *fcf; |
| 1115 | unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); | 1143 | unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); |
| 1116 | u8 send_ctlr_ka; | ||
| 1117 | u8 send_port_ka; | ||
| 1118 | 1144 | ||
| 1119 | spin_lock_bh(&fip->lock); | 1145 | spin_lock_bh(&fip->lock); |
| 1120 | if (fip->state == FIP_ST_DISABLED) { | 1146 | if (fip->state == FIP_ST_DISABLED) { |
| @@ -1140,53 +1166,47 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
| 1140 | fip->lp->host->host_no, sel->fcf_mac); | 1166 | fip->lp->host->host_no, sel->fcf_mac); |
| 1141 | memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); | 1167 | memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); |
| 1142 | fip->port_ka_time = jiffies + | 1168 | fip->port_ka_time = jiffies + |
| 1143 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | 1169 | msecs_to_jiffies(FIP_VN_KA_PERIOD); |
| 1144 | fip->ctlr_ka_time = jiffies + sel->fka_period; | 1170 | fip->ctlr_ka_time = jiffies + sel->fka_period; |
| 1145 | fip->link = 1; | ||
| 1146 | } else { | 1171 | } else { |
| 1147 | printk(KERN_NOTICE "libfcoe: host%d: " | 1172 | printk(KERN_NOTICE "libfcoe: host%d: " |
| 1148 | "FIP Fibre-Channel Forwarder timed out. " | 1173 | "FIP Fibre-Channel Forwarder timed out. " |
| 1149 | "Starting FCF discovery.\n", | 1174 | "Starting FCF discovery.\n", |
| 1150 | fip->lp->host->host_no); | 1175 | fip->lp->host->host_no); |
| 1151 | fip->link = 0; | 1176 | fip->reset_req = 1; |
| 1177 | schedule_work(&fip->link_work); | ||
| 1152 | } | 1178 | } |
| 1153 | schedule_work(&fip->link_work); | ||
| 1154 | } | 1179 | } |
| 1155 | 1180 | ||
| 1156 | send_ctlr_ka = 0; | 1181 | if (sel && !sel->fd_flags) { |
| 1157 | send_port_ka = 0; | ||
| 1158 | if (sel) { | ||
| 1159 | if (time_after_eq(jiffies, fip->ctlr_ka_time)) { | 1182 | if (time_after_eq(jiffies, fip->ctlr_ka_time)) { |
| 1160 | fip->ctlr_ka_time = jiffies + sel->fka_period; | 1183 | fip->ctlr_ka_time = jiffies + sel->fka_period; |
| 1161 | send_ctlr_ka = 1; | 1184 | fip->send_ctlr_ka = 1; |
| 1162 | } | 1185 | } |
| 1163 | if (time_after(next_timer, fip->ctlr_ka_time)) | 1186 | if (time_after(next_timer, fip->ctlr_ka_time)) |
| 1164 | next_timer = fip->ctlr_ka_time; | 1187 | next_timer = fip->ctlr_ka_time; |
| 1165 | 1188 | ||
| 1166 | if (time_after_eq(jiffies, fip->port_ka_time)) { | 1189 | if (time_after_eq(jiffies, fip->port_ka_time)) { |
| 1167 | fip->port_ka_time += jiffies + | 1190 | fip->port_ka_time += jiffies + |
| 1168 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | 1191 | msecs_to_jiffies(FIP_VN_KA_PERIOD); |
| 1169 | send_port_ka = 1; | 1192 | fip->send_port_ka = 1; |
| 1170 | } | 1193 | } |
| 1171 | if (time_after(next_timer, fip->port_ka_time)) | 1194 | if (time_after(next_timer, fip->port_ka_time)) |
| 1172 | next_timer = fip->port_ka_time; | 1195 | next_timer = fip->port_ka_time; |
| 1173 | mod_timer(&fip->timer, next_timer); | 1196 | mod_timer(&fip->timer, next_timer); |
| 1174 | } else if (fip->sel_time) { | 1197 | } else if (fip->sel_time) { |
| 1175 | next_timer = fip->sel_time + | 1198 | next_timer = fip->sel_time + |
| 1176 | msecs_to_jiffies(FCOE_CTLR_START_DELAY); | 1199 | msecs_to_jiffies(FCOE_CTLR_START_DELAY); |
| 1177 | mod_timer(&fip->timer, next_timer); | 1200 | mod_timer(&fip->timer, next_timer); |
| 1178 | } | 1201 | } |
| 1202 | if (fip->send_ctlr_ka || fip->send_port_ka) | ||
| 1203 | schedule_work(&fip->link_work); | ||
| 1179 | spin_unlock_bh(&fip->lock); | 1204 | spin_unlock_bh(&fip->lock); |
| 1180 | |||
| 1181 | if (send_ctlr_ka) | ||
| 1182 | fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr); | ||
| 1183 | if (send_port_ka) | ||
| 1184 | fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr); | ||
| 1185 | } | 1205 | } |
| 1186 | 1206 | ||
| 1187 | /** | 1207 | /** |
| 1188 | * fcoe_ctlr_link_work() - worker thread function for link changes. | 1208 | * fcoe_ctlr_link_work() - Worker thread function for link changes |
| 1189 | * @work: pointer to link_work member inside &fcoe_ctlr. | 1209 | * @work: Handle to a FCoE controller |
| 1190 | * | 1210 | * |
| 1191 | * See if the link status has changed and if so, report it. | 1211 | * See if the link status has changed and if so, report it. |
| 1192 | * | 1212 | * |
| @@ -1196,27 +1216,49 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
| 1196 | static void fcoe_ctlr_link_work(struct work_struct *work) | 1216 | static void fcoe_ctlr_link_work(struct work_struct *work) |
| 1197 | { | 1217 | { |
| 1198 | struct fcoe_ctlr *fip; | 1218 | struct fcoe_ctlr *fip; |
| 1219 | struct fc_lport *vport; | ||
| 1220 | u8 *mac; | ||
| 1199 | int link; | 1221 | int link; |
| 1200 | int last_link; | 1222 | int last_link; |
| 1223 | int reset; | ||
| 1201 | 1224 | ||
| 1202 | fip = container_of(work, struct fcoe_ctlr, link_work); | 1225 | fip = container_of(work, struct fcoe_ctlr, link_work); |
| 1203 | spin_lock_bh(&fip->lock); | 1226 | spin_lock_bh(&fip->lock); |
| 1204 | last_link = fip->last_link; | 1227 | last_link = fip->last_link; |
| 1205 | link = fip->link; | 1228 | link = fip->link; |
| 1206 | fip->last_link = link; | 1229 | fip->last_link = link; |
| 1230 | reset = fip->reset_req; | ||
| 1231 | fip->reset_req = 0; | ||
| 1207 | spin_unlock_bh(&fip->lock); | 1232 | spin_unlock_bh(&fip->lock); |
| 1208 | 1233 | ||
| 1209 | if (last_link != link) { | 1234 | if (last_link != link) { |
| 1210 | if (link) | 1235 | if (link) |
| 1211 | fc_linkup(fip->lp); | 1236 | fc_linkup(fip->lp); |
| 1212 | else | 1237 | else |
| 1213 | fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); | 1238 | fc_linkdown(fip->lp); |
| 1239 | } else if (reset && link) | ||
| 1240 | fc_lport_reset(fip->lp); | ||
| 1241 | |||
| 1242 | if (fip->send_ctlr_ka) { | ||
| 1243 | fip->send_ctlr_ka = 0; | ||
| 1244 | fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); | ||
| 1245 | } | ||
| 1246 | if (fip->send_port_ka) { | ||
| 1247 | fip->send_port_ka = 0; | ||
| 1248 | mutex_lock(&fip->lp->lp_mutex); | ||
| 1249 | mac = fip->get_src_addr(fip->lp); | ||
| 1250 | fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); | ||
| 1251 | list_for_each_entry(vport, &fip->lp->vports, list) { | ||
| 1252 | mac = fip->get_src_addr(vport); | ||
| 1253 | fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); | ||
| 1254 | } | ||
| 1255 | mutex_unlock(&fip->lp->lp_mutex); | ||
| 1214 | } | 1256 | } |
| 1215 | } | 1257 | } |
| 1216 | 1258 | ||
| 1217 | /** | 1259 | /** |
| 1218 | * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames. | 1260 | * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames |
| 1219 | * @recv_work: pointer to recv_work member inside &fcoe_ctlr. | 1261 | * @recv_work: Handle to a FCoE controller |
| 1220 | */ | 1262 | */ |
| 1221 | static void fcoe_ctlr_recv_work(struct work_struct *recv_work) | 1263 | static void fcoe_ctlr_recv_work(struct work_struct *recv_work) |
| 1222 | { | 1264 | { |
| @@ -1224,20 +1266,14 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) | |||
| 1224 | struct sk_buff *skb; | 1266 | struct sk_buff *skb; |
| 1225 | 1267 | ||
| 1226 | fip = container_of(recv_work, struct fcoe_ctlr, recv_work); | 1268 | fip = container_of(recv_work, struct fcoe_ctlr, recv_work); |
| 1227 | spin_lock_bh(&fip->fip_recv_list.lock); | 1269 | while ((skb = skb_dequeue(&fip->fip_recv_list))) |
| 1228 | while ((skb = __skb_dequeue(&fip->fip_recv_list))) { | ||
| 1229 | spin_unlock_bh(&fip->fip_recv_list.lock); | ||
| 1230 | fcoe_ctlr_recv_handler(fip, skb); | 1270 | fcoe_ctlr_recv_handler(fip, skb); |
| 1231 | spin_lock_bh(&fip->fip_recv_list.lock); | ||
| 1232 | } | ||
| 1233 | spin_unlock_bh(&fip->fip_recv_list.lock); | ||
| 1234 | } | 1271 | } |
| 1235 | 1272 | ||
| 1236 | /** | 1273 | /** |
| 1237 | * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request. | 1274 | * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response |
| 1238 | * @fip: FCoE controller. | 1275 | * @fip: The FCoE controller |
| 1239 | * @fp: FC frame. | 1276 | * @fp: The FC frame to snoop |
| 1240 | * @sa: Ethernet source MAC address from received FCoE frame. | ||
| 1241 | * | 1277 | * |
| 1242 | * Snoop potential response to FLOGI or even incoming FLOGI. | 1278 | * Snoop potential response to FLOGI or even incoming FLOGI. |
| 1243 | * | 1279 | * |
| @@ -1245,15 +1281,18 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) | |||
| 1245 | * by fip->flogi_oxid != FC_XID_UNKNOWN. | 1281 | * by fip->flogi_oxid != FC_XID_UNKNOWN. |
| 1246 | * | 1282 | * |
| 1247 | * The caller is responsible for freeing the frame. | 1283 | * The caller is responsible for freeing the frame. |
| 1284 | * Fill in the granted_mac address. | ||
| 1248 | * | 1285 | * |
| 1249 | * Return non-zero if the frame should not be delivered to libfc. | 1286 | * Return non-zero if the frame should not be delivered to libfc. |
| 1250 | */ | 1287 | */ |
| 1251 | int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | 1288 | int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, |
| 1289 | struct fc_frame *fp) | ||
| 1252 | { | 1290 | { |
| 1253 | struct fc_frame_header *fh; | 1291 | struct fc_frame_header *fh; |
| 1254 | u8 op; | 1292 | u8 op; |
| 1255 | u8 mac[ETH_ALEN]; | 1293 | u8 *sa; |
| 1256 | 1294 | ||
| 1295 | sa = eth_hdr(&fp->skb)->h_source; | ||
| 1257 | fh = fc_frame_header_get(fp); | 1296 | fh = fc_frame_header_get(fp); |
| 1258 | if (fh->fh_type != FC_TYPE_ELS) | 1297 | if (fh->fh_type != FC_TYPE_ELS) |
| 1259 | return 0; | 1298 | return 0; |
| @@ -1268,7 +1307,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
| 1268 | return -EINVAL; | 1307 | return -EINVAL; |
| 1269 | } | 1308 | } |
| 1270 | fip->state = FIP_ST_NON_FIP; | 1309 | fip->state = FIP_ST_NON_FIP; |
| 1271 | LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); | 1310 | LIBFCOE_FIP_DBG(fip, |
| 1311 | "received FLOGI LS_ACC using non-FIP mode\n"); | ||
| 1272 | 1312 | ||
| 1273 | /* | 1313 | /* |
| 1274 | * FLOGI accepted. | 1314 | * FLOGI accepted. |
| @@ -1283,11 +1323,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
| 1283 | fip->map_dest = 0; | 1323 | fip->map_dest = 0; |
| 1284 | } | 1324 | } |
| 1285 | fip->flogi_oxid = FC_XID_UNKNOWN; | 1325 | fip->flogi_oxid = FC_XID_UNKNOWN; |
| 1286 | memcpy(mac, fip->data_src_addr, ETH_ALEN); | ||
| 1287 | fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id); | ||
| 1288 | spin_unlock_bh(&fip->lock); | 1326 | spin_unlock_bh(&fip->lock); |
| 1289 | 1327 | fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); | |
| 1290 | fip->update_mac(fip, mac, fip->data_src_addr); | ||
| 1291 | } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { | 1328 | } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { |
| 1292 | /* | 1329 | /* |
| 1293 | * Save source MAC for point-to-point responses. | 1330 | * Save source MAC for point-to-point responses. |
| @@ -1297,7 +1334,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
| 1297 | memcpy(fip->dest_addr, sa, ETH_ALEN); | 1334 | memcpy(fip->dest_addr, sa, ETH_ALEN); |
| 1298 | fip->map_dest = 0; | 1335 | fip->map_dest = 0; |
| 1299 | if (fip->state == FIP_ST_NON_FIP) | 1336 | if (fip->state == FIP_ST_NON_FIP) |
| 1300 | LIBFCOE_FIP_DBG("received FLOGI REQ, " | 1337 | LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, " |
| 1301 | "using non-FIP mode\n"); | 1338 | "using non-FIP mode\n"); |
| 1302 | fip->state = FIP_ST_NON_FIP; | 1339 | fip->state = FIP_ST_NON_FIP; |
| 1303 | } | 1340 | } |
| @@ -1308,10 +1345,10 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
| 1308 | EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); | 1345 | EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); |
| 1309 | 1346 | ||
| 1310 | /** | 1347 | /** |
| 1311 | * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. | 1348 | * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN |
| 1312 | * @mac: mac address | 1349 | * @mac: The MAC address to convert |
| 1313 | * @scheme: check port | 1350 | * @scheme: The scheme to use when converting |
| 1314 | * @port: port indicator for converting | 1351 | * @port: The port indicator for converting |
| 1315 | * | 1352 | * |
| 1316 | * Returns: u64 fc world wide name | 1353 | * Returns: u64 fc world wide name |
| 1317 | */ | 1354 | */ |
| @@ -1349,24 +1386,26 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], | |||
| 1349 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); | 1386 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); |
| 1350 | 1387 | ||
| 1351 | /** | 1388 | /** |
| 1352 | * fcoe_libfc_config() - sets up libfc related properties for lport | 1389 | * fcoe_libfc_config() - Sets up libfc related properties for local port |
| 1353 | * @lp: ptr to the fc_lport | 1390 | * @lp: The local port to configure libfc for |
| 1354 | * @tt: libfc function template | 1391 | * @tt: The libfc function template |
| 1355 | * | 1392 | * |
| 1356 | * Returns : 0 for success | 1393 | * Returns : 0 for success |
| 1357 | */ | 1394 | */ |
| 1358 | int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) | 1395 | int fcoe_libfc_config(struct fc_lport *lport, |
| 1396 | struct libfc_function_template *tt) | ||
| 1359 | { | 1397 | { |
| 1360 | /* Set the function pointers set by the LLDD */ | 1398 | /* Set the function pointers set by the LLDD */ |
| 1361 | memcpy(&lp->tt, tt, sizeof(*tt)); | 1399 | memcpy(&lport->tt, tt, sizeof(*tt)); |
| 1362 | if (fc_fcp_init(lp)) | 1400 | if (fc_fcp_init(lport)) |
| 1363 | return -ENOMEM; | 1401 | return -ENOMEM; |
| 1364 | fc_exch_init(lp); | 1402 | fc_exch_init(lport); |
| 1365 | fc_elsct_init(lp); | 1403 | fc_elsct_init(lport); |
| 1366 | fc_lport_init(lp); | 1404 | fc_lport_init(lport); |
| 1367 | fc_rport_init(lp); | 1405 | fc_rport_init(lport); |
| 1368 | fc_disc_init(lp); | 1406 | fc_disc_init(lport); |
| 1369 | 1407 | ||
| 1370 | return 0; | 1408 | return 0; |
| 1371 | } | 1409 | } |
| 1372 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | 1410 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); |
| 1411 | |||
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index e4c0a3d7d87b..bb208a6091e7 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
| 24 | #include <scsi/libfc.h> | 24 | #include <scsi/libfc.h> |
| 25 | #include <scsi/libfcoe.h> | ||
| 25 | #include "fnic_io.h" | 26 | #include "fnic_io.h" |
| 26 | #include "fnic_res.h" | 27 | #include "fnic_res.h" |
| 27 | #include "vnic_dev.h" | 28 | #include "vnic_dev.h" |
| @@ -44,7 +45,7 @@ | |||
| 44 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ | 45 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ |
| 45 | #define FNIC_DFLT_QUEUE_DEPTH 32 | 46 | #define FNIC_DFLT_QUEUE_DEPTH 32 |
| 46 | #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ | 47 | #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ |
| 47 | 48 | #define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */ | |
| 48 | /* | 49 | /* |
| 49 | * Tag bits used for special requests. | 50 | * Tag bits used for special requests. |
| 50 | */ | 51 | */ |
| @@ -145,6 +146,7 @@ struct mempool; | |||
| 145 | /* Per-instance private data structure */ | 146 | /* Per-instance private data structure */ |
| 146 | struct fnic { | 147 | struct fnic { |
| 147 | struct fc_lport *lport; | 148 | struct fc_lport *lport; |
| 149 | struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ | ||
| 148 | struct vnic_dev_bar bar0; | 150 | struct vnic_dev_bar bar0; |
| 149 | 151 | ||
| 150 | struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; | 152 | struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; |
| @@ -162,23 +164,16 @@ struct fnic { | |||
| 162 | unsigned int wq_count; | 164 | unsigned int wq_count; |
| 163 | unsigned int cq_count; | 165 | unsigned int cq_count; |
| 164 | 166 | ||
| 165 | u32 fcoui_mode:1; /* use fcoui address*/ | ||
| 166 | u32 vlan_hw_insert:1; /* let hw insert the tag */ | 167 | u32 vlan_hw_insert:1; /* let hw insert the tag */ |
| 167 | u32 in_remove:1; /* fnic device in removal */ | 168 | u32 in_remove:1; /* fnic device in removal */ |
| 168 | u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ | 169 | u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ |
| 169 | 170 | ||
| 170 | struct completion *remove_wait; /* device remove thread blocks */ | 171 | struct completion *remove_wait; /* device remove thread blocks */ |
| 171 | 172 | ||
| 172 | struct fc_frame *flogi; | ||
| 173 | struct fc_frame *flogi_resp; | ||
| 174 | u16 flogi_oxid; | ||
| 175 | unsigned long s_id; | ||
| 176 | enum fnic_state state; | 173 | enum fnic_state state; |
| 177 | spinlock_t fnic_lock; | 174 | spinlock_t fnic_lock; |
| 178 | 175 | ||
| 179 | u16 vlan_id; /* VLAN tag including priority */ | 176 | u16 vlan_id; /* VLAN tag including priority */ |
| 180 | u8 mac_addr[ETH_ALEN]; | ||
| 181 | u8 dest_addr[ETH_ALEN]; | ||
| 182 | u8 data_src_addr[ETH_ALEN]; | 177 | u8 data_src_addr[ETH_ALEN]; |
| 183 | u64 fcp_input_bytes; /* internal statistic */ | 178 | u64 fcp_input_bytes; /* internal statistic */ |
| 184 | u64 fcp_output_bytes; /* internal statistic */ | 179 | u64 fcp_output_bytes; /* internal statistic */ |
| @@ -205,6 +200,7 @@ struct fnic { | |||
| 205 | struct work_struct link_work; | 200 | struct work_struct link_work; |
| 206 | struct work_struct frame_work; | 201 | struct work_struct frame_work; |
| 207 | struct sk_buff_head frame_queue; | 202 | struct sk_buff_head frame_queue; |
| 203 | struct sk_buff_head tx_queue; | ||
| 208 | 204 | ||
| 209 | /* copy work queue cache line section */ | 205 | /* copy work queue cache line section */ |
| 210 | ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; | 206 | ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; |
| @@ -224,6 +220,11 @@ struct fnic { | |||
| 224 | ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; | 220 | ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; |
| 225 | }; | 221 | }; |
| 226 | 222 | ||
| 223 | static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) | ||
| 224 | { | ||
| 225 | return container_of(fip, struct fnic, ctlr); | ||
| 226 | } | ||
| 227 | |||
| 227 | extern struct workqueue_struct *fnic_event_queue; | 228 | extern struct workqueue_struct *fnic_event_queue; |
| 228 | extern struct device_attribute *fnic_attrs[]; | 229 | extern struct device_attribute *fnic_attrs[]; |
| 229 | 230 | ||
| @@ -239,7 +240,11 @@ void fnic_handle_link(struct work_struct *work); | |||
| 239 | int fnic_rq_cmpl_handler(struct fnic *fnic, int); | 240 | int fnic_rq_cmpl_handler(struct fnic *fnic, int); |
| 240 | int fnic_alloc_rq_frame(struct vnic_rq *rq); | 241 | int fnic_alloc_rq_frame(struct vnic_rq *rq); |
| 241 | void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); | 242 | void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); |
| 242 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); | 243 | void fnic_flush_tx(struct fnic *); |
| 244 | void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); | ||
| 245 | void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); | ||
| 246 | void fnic_update_mac(struct fc_lport *, u8 *new); | ||
| 247 | void fnic_update_mac_locked(struct fnic *, u8 *new); | ||
| 243 | 248 | ||
| 244 | int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 249 | int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); |
| 245 | int fnic_abort_cmd(struct scsi_cmnd *); | 250 | int fnic_abort_cmd(struct scsi_cmnd *); |
| @@ -252,7 +257,7 @@ void fnic_empty_scsi_cleanup(struct fc_lport *); | |||
| 252 | void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); | 257 | void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); |
| 253 | int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); | 258 | int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); |
| 254 | int fnic_wq_cmpl_handler(struct fnic *fnic, int); | 259 | int fnic_wq_cmpl_handler(struct fnic *fnic, int); |
| 255 | int fnic_flogi_reg_handler(struct fnic *fnic); | 260 | int fnic_flogi_reg_handler(struct fnic *fnic, u32); |
| 256 | void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | 261 | void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, |
| 257 | struct fcpio_host_req *desc); | 262 | struct fcpio_host_req *desc); |
| 258 | int fnic_fw_reset_handler(struct fnic *fnic); | 263 | int fnic_fw_reset_handler(struct fnic *fnic); |
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 50db3e36a619..54f8d0e5407f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/if_ether.h> | 23 | #include <linux/if_ether.h> |
| 24 | #include <linux/if_vlan.h> | 24 | #include <linux/if_vlan.h> |
| 25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
| 26 | #include <scsi/fc/fc_fip.h> | ||
| 26 | #include <scsi/fc/fc_els.h> | 27 | #include <scsi/fc/fc_els.h> |
| 27 | #include <scsi/fc/fc_fcoe.h> | 28 | #include <scsi/fc/fc_fcoe.h> |
| 28 | #include <scsi/fc_frame.h> | 29 | #include <scsi/fc_frame.h> |
| @@ -34,6 +35,8 @@ | |||
| 34 | 35 | ||
| 35 | struct workqueue_struct *fnic_event_queue; | 36 | struct workqueue_struct *fnic_event_queue; |
| 36 | 37 | ||
| 38 | static void fnic_set_eth_mode(struct fnic *); | ||
| 39 | |||
| 37 | void fnic_handle_link(struct work_struct *work) | 40 | void fnic_handle_link(struct work_struct *work) |
| 38 | { | 41 | { |
| 39 | struct fnic *fnic = container_of(work, struct fnic, link_work); | 42 | struct fnic *fnic = container_of(work, struct fnic, link_work); |
| @@ -64,10 +67,10 @@ void fnic_handle_link(struct work_struct *work) | |||
| 64 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 67 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 65 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
| 66 | "link down\n"); | 69 | "link down\n"); |
| 67 | fc_linkdown(fnic->lport); | 70 | fcoe_ctlr_link_down(&fnic->ctlr); |
| 68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 71 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
| 69 | "link up\n"); | 72 | "link up\n"); |
| 70 | fc_linkup(fnic->lport); | 73 | fcoe_ctlr_link_up(&fnic->ctlr); |
| 71 | } else | 74 | } else |
| 72 | /* UP -> UP */ | 75 | /* UP -> UP */ |
| 73 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 76 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| @@ -76,13 +79,13 @@ void fnic_handle_link(struct work_struct *work) | |||
| 76 | /* DOWN -> UP */ | 79 | /* DOWN -> UP */ |
| 77 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 80 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 78 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); | 81 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); |
| 79 | fc_linkup(fnic->lport); | 82 | fcoe_ctlr_link_up(&fnic->ctlr); |
| 80 | } else { | 83 | } else { |
| 81 | /* UP -> DOWN */ | 84 | /* UP -> DOWN */ |
| 82 | fnic->lport->host_stats.link_failure_count++; | 85 | fnic->lport->host_stats.link_failure_count++; |
| 83 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 86 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 84 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); | 87 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); |
| 85 | fc_linkdown(fnic->lport); | 88 | fcoe_ctlr_link_down(&fnic->ctlr); |
| 86 | } | 89 | } |
| 87 | 90 | ||
| 88 | } | 91 | } |
| @@ -107,197 +110,179 @@ void fnic_handle_frame(struct work_struct *work) | |||
| 107 | return; | 110 | return; |
| 108 | } | 111 | } |
| 109 | fp = (struct fc_frame *)skb; | 112 | fp = (struct fc_frame *)skb; |
| 110 | /* if Flogi resp frame, register the address */ | 113 | |
| 111 | if (fr_flags(fp)) { | 114 | /* |
| 112 | vnic_dev_add_addr(fnic->vdev, | 115 | * If we're in a transitional state, just re-queue and return. |
| 113 | fnic->data_src_addr); | 116 | * The queue will be serviced when we get to a stable state. |
| 114 | fr_flags(fp) = 0; | 117 | */ |
| 118 | if (fnic->state != FNIC_IN_FC_MODE && | ||
| 119 | fnic->state != FNIC_IN_ETH_MODE) { | ||
| 120 | skb_queue_head(&fnic->frame_queue, skb); | ||
| 121 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 122 | return; | ||
| 115 | } | 123 | } |
| 116 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 124 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 117 | 125 | ||
| 118 | fc_exch_recv(lp, fp); | 126 | fc_exch_recv(lp, fp); |
| 119 | } | 127 | } |
| 120 | |||
| 121 | } | ||
| 122 | |||
| 123 | static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, | ||
| 124 | u32 len, u8 sof, u8 eof) | ||
| 125 | { | ||
| 126 | struct fc_frame *fp = (struct fc_frame *)skb; | ||
| 127 | |||
| 128 | skb_trim(skb, len); | ||
| 129 | fr_eof(fp) = eof; | ||
| 130 | fr_sof(fp) = sof; | ||
| 131 | } | 128 | } |
| 132 | 129 | ||
| 133 | 130 | /** | |
| 134 | static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) | 131 | * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. |
| 132 | * @fnic: fnic instance. | ||
| 133 | * @skb: Ethernet Frame. | ||
| 134 | */ | ||
| 135 | static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) | ||
| 135 | { | 136 | { |
| 136 | struct fc_frame *fp; | 137 | struct fc_frame *fp; |
| 137 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
| 138 | struct vlan_ethhdr *vh; | ||
| 139 | struct fcoe_hdr *fcoe_hdr; | 139 | struct fcoe_hdr *fcoe_hdr; |
| 140 | struct fcoe_crc_eof *ft; | 140 | struct fcoe_crc_eof *ft; |
| 141 | u32 transport_len = 0; | ||
| 142 | 141 | ||
| 142 | /* | ||
| 143 | * Undo VLAN encapsulation if present. | ||
| 144 | */ | ||
| 143 | eh = (struct ethhdr *)skb->data; | 145 | eh = (struct ethhdr *)skb->data; |
| 144 | vh = (struct vlan_ethhdr *)skb->data; | 146 | if (eh->h_proto == htons(ETH_P_8021Q)) { |
| 145 | if (vh->h_vlan_proto == htons(ETH_P_8021Q) && | 147 | memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); |
| 146 | vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { | 148 | eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); |
| 147 | skb_pull(skb, sizeof(struct vlan_ethhdr)); | 149 | skb_reset_mac_header(skb); |
| 148 | transport_len += sizeof(struct vlan_ethhdr); | 150 | } |
| 149 | } else if (eh->h_proto == htons(ETH_P_FCOE)) { | 151 | if (eh->h_proto == htons(ETH_P_FIP)) { |
| 150 | transport_len += sizeof(struct ethhdr); | 152 | skb_pull(skb, sizeof(*eh)); |
| 151 | skb_pull(skb, sizeof(struct ethhdr)); | 153 | fcoe_ctlr_recv(&fnic->ctlr, skb); |
| 152 | } else | 154 | return 1; /* let caller know packet was used */ |
| 153 | return -1; | 155 | } |
| 156 | if (eh->h_proto != htons(ETH_P_FCOE)) | ||
| 157 | goto drop; | ||
| 158 | skb_set_network_header(skb, sizeof(*eh)); | ||
| 159 | skb_pull(skb, sizeof(*eh)); | ||
| 154 | 160 | ||
| 155 | fcoe_hdr = (struct fcoe_hdr *)skb->data; | 161 | fcoe_hdr = (struct fcoe_hdr *)skb->data; |
| 156 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) | 162 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) |
| 157 | return -1; | 163 | goto drop; |
| 158 | 164 | ||
| 159 | fp = (struct fc_frame *)skb; | 165 | fp = (struct fc_frame *)skb; |
| 160 | fc_frame_init(fp); | 166 | fc_frame_init(fp); |
| 161 | fr_sof(fp) = fcoe_hdr->fcoe_sof; | 167 | fr_sof(fp) = fcoe_hdr->fcoe_sof; |
| 162 | skb_pull(skb, sizeof(struct fcoe_hdr)); | 168 | skb_pull(skb, sizeof(struct fcoe_hdr)); |
| 163 | transport_len += sizeof(struct fcoe_hdr); | 169 | skb_reset_transport_header(skb); |
| 164 | 170 | ||
| 165 | ft = (struct fcoe_crc_eof *)(skb->data + len - | 171 | ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); |
| 166 | transport_len - sizeof(*ft)); | ||
| 167 | fr_eof(fp) = ft->fcoe_eof; | 172 | fr_eof(fp) = ft->fcoe_eof; |
| 168 | skb_trim(skb, len - transport_len - sizeof(*ft)); | 173 | skb_trim(skb, skb->len - sizeof(*ft)); |
| 169 | return 0; | 174 | return 0; |
| 175 | drop: | ||
| 176 | dev_kfree_skb_irq(skb); | ||
| 177 | return -1; | ||
| 170 | } | 178 | } |
| 171 | 179 | ||
| 172 | static inline int fnic_handle_flogi_resp(struct fnic *fnic, | 180 | /** |
| 173 | struct fc_frame *fp) | 181 | * fnic_update_mac_locked() - set data MAC address and filters. |
| 182 | * @fnic: fnic instance. | ||
| 183 | * @new: newly-assigned FCoE MAC address. | ||
| 184 | * | ||
| 185 | * Called with the fnic lock held. | ||
| 186 | */ | ||
| 187 | void fnic_update_mac_locked(struct fnic *fnic, u8 *new) | ||
| 174 | { | 188 | { |
| 175 | u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; | 189 | u8 *ctl = fnic->ctlr.ctl_src_addr; |
| 176 | struct ethhdr *eth_hdr; | 190 | u8 *data = fnic->data_src_addr; |
| 177 | struct fc_frame_header *fh; | ||
| 178 | int ret = 0; | ||
| 179 | unsigned long flags; | ||
| 180 | struct fc_frame *old_flogi_resp = NULL; | ||
| 181 | 191 | ||
| 182 | fh = (struct fc_frame_header *)fr_hdr(fp); | 192 | if (is_zero_ether_addr(new)) |
| 193 | new = ctl; | ||
| 194 | if (!compare_ether_addr(data, new)) | ||
| 195 | return; | ||
| 196 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); | ||
| 197 | if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) | ||
| 198 | vnic_dev_del_addr(fnic->vdev, data); | ||
| 199 | memcpy(data, new, ETH_ALEN); | ||
| 200 | if (compare_ether_addr(new, ctl)) | ||
| 201 | vnic_dev_add_addr(fnic->vdev, new); | ||
| 202 | } | ||
| 183 | 203 | ||
| 184 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 204 | /** |
| 205 | * fnic_update_mac() - set data MAC address and filters. | ||
| 206 | * @lport: local port. | ||
| 207 | * @new: newly-assigned FCoE MAC address. | ||
| 208 | */ | ||
| 209 | void fnic_update_mac(struct fc_lport *lport, u8 *new) | ||
| 210 | { | ||
| 211 | struct fnic *fnic = lport_priv(lport); | ||
| 185 | 212 | ||
| 186 | if (fnic->state == FNIC_IN_ETH_MODE) { | 213 | spin_lock_irq(&fnic->fnic_lock); |
| 214 | fnic_update_mac_locked(fnic, new); | ||
| 215 | spin_unlock_irq(&fnic->fnic_lock); | ||
| 216 | } | ||
| 187 | 217 | ||
| 188 | /* | 218 | /** |
| 189 | * Check if oxid matches on taking the lock. A new Flogi | 219 | * fnic_set_port_id() - set the port_ID after successful FLOGI. |
| 190 | * issued by libFC might have changed the fnic cached oxid | 220 | * @lport: local port. |
| 191 | */ | 221 | * @port_id: assigned FC_ID. |
| 192 | if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { | 222 | * @fp: received frame containing the FLOGI accept or NULL. |
| 193 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 223 | * |
| 194 | "Flogi response oxid not" | 224 | * This is called from libfc when a new FC_ID has been assigned. |
| 195 | " matching cached oxid, dropping frame" | 225 | * This causes us to reset the firmware to FC_MODE and setup the new MAC |
| 196 | "\n"); | 226 | * address and FC_ID. |
| 197 | ret = -1; | 227 | * |
| 198 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 228 | * It is also called with FC_ID 0 when we're logged off. |
| 199 | dev_kfree_skb_irq(fp_skb(fp)); | 229 | * |
| 200 | goto handle_flogi_resp_end; | 230 | * If the FC_ID is due to point-to-point, fp may be NULL. |
| 201 | } | 231 | */ |
| 232 | void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) | ||
| 233 | { | ||
| 234 | struct fnic *fnic = lport_priv(lport); | ||
| 235 | u8 *mac; | ||
| 236 | int ret; | ||
| 202 | 237 | ||
| 203 | /* Drop older cached flogi response frame, cache this frame */ | 238 | FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", |
| 204 | old_flogi_resp = fnic->flogi_resp; | 239 | port_id, fp); |
| 205 | fnic->flogi_resp = fp; | ||
| 206 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
| 207 | 240 | ||
| 208 | /* | 241 | /* |
| 209 | * this frame is part of flogi get the src mac addr from this | 242 | * If we're clearing the FC_ID, change to use the ctl_src_addr. |
| 210 | * frame if the src mac is fcoui based then we mark the | 243 | * Set ethernet mode to send FLOGI. |
| 211 | * address mode flag to use fcoui base for dst mac addr | 244 | */ |
| 212 | * otherwise we have to store the fcoe gateway addr | 245 | if (!port_id) { |
| 213 | */ | 246 | fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); |
| 214 | eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); | 247 | fnic_set_eth_mode(fnic); |
| 215 | memcpy(mac, eth_hdr->h_source, ETH_ALEN); | 248 | return; |
| 249 | } | ||
| 216 | 250 | ||
| 217 | if (ntoh24(mac) == FC_FCOE_OUI) | 251 | if (fp) { |
| 218 | fnic->fcoui_mode = 1; | 252 | mac = fr_cb(fp)->granted_mac; |
| 219 | else { | 253 | if (is_zero_ether_addr(mac)) { |
| 220 | fnic->fcoui_mode = 0; | 254 | /* non-FIP - FLOGI already accepted - ignore return */ |
| 221 | memcpy(fnic->dest_addr, mac, ETH_ALEN); | 255 | fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); |
| 222 | } | 256 | } |
| 257 | fnic_update_mac(lport, mac); | ||
| 258 | } | ||
| 223 | 259 | ||
| 224 | /* | 260 | /* Change state to reflect transition to FC mode */ |
| 225 | * Except for Flogi frame, all outbound frames from us have the | 261 | spin_lock_irq(&fnic->fnic_lock); |
| 226 | * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses | 262 | if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) |
| 227 | * the vnic MAC address as the Eth Src address | ||
| 228 | */ | ||
| 229 | fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); | ||
| 230 | |||
| 231 | /* We get our s_id from the d_id of the flogi resp frame */ | ||
| 232 | fnic->s_id = ntoh24(fh->fh_d_id); | ||
| 233 | |||
| 234 | /* Change state to reflect transition from Eth to FC mode */ | ||
| 235 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; | 263 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; |
| 236 | 264 | else { | |
| 237 | } else { | ||
| 238 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 265 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
| 239 | "Unexpected fnic state %s while" | 266 | "Unexpected fnic state %s while" |
| 240 | " processing flogi resp\n", | 267 | " processing flogi resp\n", |
| 241 | fnic_state_to_str(fnic->state)); | 268 | fnic_state_to_str(fnic->state)); |
| 242 | ret = -1; | 269 | spin_unlock_irq(&fnic->fnic_lock); |
| 243 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 270 | return; |
| 244 | dev_kfree_skb_irq(fp_skb(fp)); | ||
| 245 | goto handle_flogi_resp_end; | ||
| 246 | } | 271 | } |
| 247 | 272 | spin_unlock_irq(&fnic->fnic_lock); | |
| 248 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 249 | |||
| 250 | /* Drop older cached frame */ | ||
| 251 | if (old_flogi_resp) | ||
| 252 | dev_kfree_skb_irq(fp_skb(old_flogi_resp)); | ||
| 253 | 273 | ||
| 254 | /* | 274 | /* |
| 255 | * send flogi reg request to firmware, this will put the fnic in | 275 | * Send FLOGI registration to firmware to set up FC mode. |
| 256 | * in FC mode | 276 | * The new address will be set up when registration completes. |
| 257 | */ | 277 | */ |
| 258 | ret = fnic_flogi_reg_handler(fnic); | 278 | ret = fnic_flogi_reg_handler(fnic, port_id); |
| 259 | 279 | ||
| 260 | if (ret < 0) { | 280 | if (ret < 0) { |
| 261 | int free_fp = 1; | 281 | spin_lock_irq(&fnic->fnic_lock); |
| 262 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
| 263 | /* | ||
| 264 | * free the frame is some other thread is not | ||
| 265 | * pointing to it | ||
| 266 | */ | ||
| 267 | if (fnic->flogi_resp != fp) | ||
| 268 | free_fp = 0; | ||
| 269 | else | ||
| 270 | fnic->flogi_resp = NULL; | ||
| 271 | |||
| 272 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) | 282 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) |
| 273 | fnic->state = FNIC_IN_ETH_MODE; | 283 | fnic->state = FNIC_IN_ETH_MODE; |
| 274 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 284 | spin_unlock_irq(&fnic->fnic_lock); |
| 275 | if (free_fp) | ||
| 276 | dev_kfree_skb_irq(fp_skb(fp)); | ||
| 277 | } | 285 | } |
| 278 | |||
| 279 | handle_flogi_resp_end: | ||
| 280 | return ret; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* Returns 1 for a response that matches cached flogi oxid */ | ||
| 284 | static inline int is_matching_flogi_resp_frame(struct fnic *fnic, | ||
| 285 | struct fc_frame *fp) | ||
| 286 | { | ||
| 287 | struct fc_frame_header *fh; | ||
| 288 | int ret = 0; | ||
| 289 | u32 f_ctl; | ||
| 290 | |||
| 291 | fh = fc_frame_header_get(fp); | ||
| 292 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
| 293 | |||
| 294 | if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && | ||
| 295 | fh->fh_r_ctl == FC_RCTL_ELS_REP && | ||
| 296 | (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && | ||
| 297 | fh->fh_type == FC_TYPE_ELS) | ||
| 298 | ret = 1; | ||
| 299 | |||
| 300 | return ret; | ||
| 301 | } | 286 | } |
| 302 | 287 | ||
| 303 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | 288 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc |
| @@ -326,6 +311,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
| 326 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, | 311 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, |
| 327 | PCI_DMA_FROMDEVICE); | 312 | PCI_DMA_FROMDEVICE); |
| 328 | skb = buf->os_buf; | 313 | skb = buf->os_buf; |
| 314 | fp = (struct fc_frame *)skb; | ||
| 329 | buf->os_buf = NULL; | 315 | buf->os_buf = NULL; |
| 330 | 316 | ||
| 331 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); | 317 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); |
| @@ -338,6 +324,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
| 338 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, | 324 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, |
| 339 | &vlan); | 325 | &vlan); |
| 340 | eth_hdrs_stripped = 1; | 326 | eth_hdrs_stripped = 1; |
| 327 | skb_trim(skb, fcp_bytes_written); | ||
| 328 | fr_sof(fp) = sof; | ||
| 329 | fr_eof(fp) = eof; | ||
| 341 | 330 | ||
| 342 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { | 331 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { |
| 343 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | 332 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, |
| @@ -352,6 +341,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
| 352 | &ipv4_csum_ok, &ipv6, &ipv4, | 341 | &ipv4_csum_ok, &ipv6, &ipv4, |
| 353 | &ipv4_fragment, &fcs_ok); | 342 | &ipv4_fragment, &fcs_ok); |
| 354 | eth_hdrs_stripped = 0; | 343 | eth_hdrs_stripped = 0; |
| 344 | skb_trim(skb, bytes_written); | ||
| 345 | if (!fcs_ok) { | ||
| 346 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
| 347 | "fcs error. dropping packet.\n"); | ||
| 348 | goto drop; | ||
| 349 | } | ||
| 350 | if (fnic_import_rq_eth_pkt(fnic, skb)) | ||
| 351 | return; | ||
| 355 | 352 | ||
| 356 | } else { | 353 | } else { |
| 357 | /* wrong CQ type*/ | 354 | /* wrong CQ type*/ |
| @@ -370,43 +367,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
| 370 | goto drop; | 367 | goto drop; |
| 371 | } | 368 | } |
| 372 | 369 | ||
| 373 | if (eth_hdrs_stripped) | ||
| 374 | fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); | ||
| 375 | else if (fnic_import_rq_eth_pkt(skb, bytes_written)) | ||
| 376 | goto drop; | ||
| 377 | |||
| 378 | fp = (struct fc_frame *)skb; | ||
| 379 | |||
| 380 | /* | ||
| 381 | * If frame is an ELS response that matches the cached FLOGI OX_ID, | ||
| 382 | * and is accept, issue flogi_reg_request copy wq request to firmware | ||
| 383 | * to register the S_ID and determine whether FC_OUI mode or GW mode. | ||
| 384 | */ | ||
| 385 | if (is_matching_flogi_resp_frame(fnic, fp)) { | ||
| 386 | if (!eth_hdrs_stripped) { | ||
| 387 | if (fc_frame_payload_op(fp) == ELS_LS_ACC) { | ||
| 388 | fnic_handle_flogi_resp(fnic, fp); | ||
| 389 | return; | ||
| 390 | } | ||
| 391 | /* | ||
| 392 | * Recd. Flogi reject. No point registering | ||
| 393 | * with fw, but forward to libFC | ||
| 394 | */ | ||
| 395 | goto forward; | ||
| 396 | } | ||
| 397 | goto drop; | ||
| 398 | } | ||
| 399 | if (!eth_hdrs_stripped) | ||
| 400 | goto drop; | ||
| 401 | |||
| 402 | forward: | ||
| 403 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 370 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
| 404 | if (fnic->stop_rx_link_events) { | 371 | if (fnic->stop_rx_link_events) { |
| 405 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 372 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 406 | goto drop; | 373 | goto drop; |
| 407 | } | 374 | } |
| 408 | /* Use fr_flags to indicate whether succ. flogi resp or not */ | ||
| 409 | fr_flags(fp) = 0; | ||
| 410 | fr_dev(fp) = fnic->lport; | 375 | fr_dev(fp) = fnic->lport; |
| 411 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 376 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 412 | 377 | ||
| @@ -494,12 +459,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |||
| 494 | buf->os_buf = NULL; | 459 | buf->os_buf = NULL; |
| 495 | } | 460 | } |
| 496 | 461 | ||
| 497 | static inline int is_flogi_frame(struct fc_frame_header *fh) | 462 | /** |
| 463 | * fnic_eth_send() - Send Ethernet frame. | ||
| 464 | * @fip: fcoe_ctlr instance. | ||
| 465 | * @skb: Ethernet Frame, FIP, without VLAN encapsulation. | ||
| 466 | */ | ||
| 467 | void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | ||
| 498 | { | 468 | { |
| 499 | return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; | 469 | struct fnic *fnic = fnic_from_ctlr(fip); |
| 470 | struct vnic_wq *wq = &fnic->wq[0]; | ||
| 471 | dma_addr_t pa; | ||
| 472 | struct ethhdr *eth_hdr; | ||
| 473 | struct vlan_ethhdr *vlan_hdr; | ||
| 474 | unsigned long flags; | ||
| 475 | |||
| 476 | if (!fnic->vlan_hw_insert) { | ||
| 477 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); | ||
| 478 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, | ||
| 479 | sizeof(*vlan_hdr) - sizeof(*eth_hdr)); | ||
| 480 | memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); | ||
| 481 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); | ||
| 482 | vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; | ||
| 483 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); | ||
| 484 | } | ||
| 485 | |||
| 486 | pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | ||
| 487 | |||
| 488 | spin_lock_irqsave(&fnic->wq_lock[0], flags); | ||
| 489 | if (!vnic_wq_desc_avail(wq)) { | ||
| 490 | pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); | ||
| 491 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
| 492 | kfree_skb(skb); | ||
| 493 | return; | ||
| 494 | } | ||
| 495 | |||
| 496 | fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, | ||
| 497 | fnic->vlan_hw_insert, fnic->vlan_id, 1); | ||
| 498 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
| 500 | } | 499 | } |
| 501 | 500 | ||
| 502 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | 501 | /* |
| 502 | * Send FC frame. | ||
| 503 | */ | ||
| 504 | static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | ||
| 503 | { | 505 | { |
| 504 | struct vnic_wq *wq = &fnic->wq[0]; | 506 | struct vnic_wq *wq = &fnic->wq[0]; |
| 505 | struct sk_buff *skb; | 507 | struct sk_buff *skb; |
| @@ -515,6 +517,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
| 515 | fh = fc_frame_header_get(fp); | 517 | fh = fc_frame_header_get(fp); |
| 516 | skb = fp_skb(fp); | 518 | skb = fp_skb(fp); |
| 517 | 519 | ||
| 520 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && | ||
| 521 | fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) | ||
| 522 | return 0; | ||
| 523 | |||
| 518 | if (!fnic->vlan_hw_insert) { | 524 | if (!fnic->vlan_hw_insert) { |
| 519 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); | 525 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); |
| 520 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); | 526 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); |
| @@ -530,16 +536,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
| 530 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); | 536 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); |
| 531 | } | 537 | } |
| 532 | 538 | ||
| 533 | if (is_flogi_frame(fh)) { | 539 | if (fnic->ctlr.map_dest) |
| 534 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | 540 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); |
| 535 | memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); | 541 | else |
| 536 | } else { | 542 | memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); |
| 537 | if (fnic->fcoui_mode) | 543 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); |
| 538 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | ||
| 539 | else | ||
| 540 | memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); | ||
| 541 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); | ||
| 542 | } | ||
| 543 | 544 | ||
| 544 | tot_len = skb->len; | 545 | tot_len = skb->len; |
| 545 | BUG_ON(tot_len % 4); | 546 | BUG_ON(tot_len % 4); |
| @@ -578,109 +579,85 @@ fnic_send_frame_end: | |||
| 578 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) | 579 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) |
| 579 | { | 580 | { |
| 580 | struct fnic *fnic = lport_priv(lp); | 581 | struct fnic *fnic = lport_priv(lp); |
| 581 | struct fc_frame_header *fh; | ||
| 582 | int ret = 0; | ||
| 583 | enum fnic_state old_state; | ||
| 584 | unsigned long flags; | 582 | unsigned long flags; |
| 585 | struct fc_frame *old_flogi = NULL; | ||
| 586 | struct fc_frame *old_flogi_resp = NULL; | ||
| 587 | 583 | ||
| 588 | if (fnic->in_remove) { | 584 | if (fnic->in_remove) { |
| 589 | dev_kfree_skb(fp_skb(fp)); | 585 | dev_kfree_skb(fp_skb(fp)); |
| 590 | ret = -1; | 586 | return -1; |
| 591 | goto fnic_send_end; | ||
| 592 | } | 587 | } |
| 593 | 588 | ||
| 594 | fh = fc_frame_header_get(fp); | 589 | /* |
| 595 | /* if not an Flogi frame, send it out, this is the common case */ | 590 | * Queue frame if in a transitional state. |
| 596 | if (!is_flogi_frame(fh)) | 591 | * This occurs while registering the Port_ID / MAC address after FLOGI. |
| 597 | return fnic_send_frame(fnic, fp); | 592 | */ |
| 593 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
| 594 | if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { | ||
| 595 | skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); | ||
| 596 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 597 | return 0; | ||
| 598 | } | ||
| 599 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 598 | 600 | ||
| 599 | /* Flogi frame, now enter the state machine */ | 601 | return fnic_send_frame(fnic, fp); |
| 602 | } | ||
| 600 | 603 | ||
| 601 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 604 | /** |
| 602 | again: | 605 | * fnic_flush_tx() - send queued frames. |
| 603 | /* Get any old cached frames, free them after dropping lock */ | 606 | * @fnic: fnic device |
| 604 | old_flogi = fnic->flogi; | 607 | * |
| 605 | fnic->flogi = NULL; | 608 | * Send frames that were waiting to go out in FC or Ethernet mode. |
| 606 | old_flogi_resp = fnic->flogi_resp; | 609 | * Whenever changing modes we purge queued frames, so these frames should |
| 607 | fnic->flogi_resp = NULL; | 610 | * be queued for the stable mode that we're in, either FC or Ethernet. |
| 611 | * | ||
| 612 | * Called without fnic_lock held. | ||
| 613 | */ | ||
| 614 | void fnic_flush_tx(struct fnic *fnic) | ||
| 615 | { | ||
| 616 | struct sk_buff *skb; | ||
| 617 | struct fc_frame *fp; | ||
| 608 | 618 | ||
| 609 | fnic->flogi_oxid = FC_XID_UNKNOWN; | 619 | while ((skb = skb_dequeue(&fnic->frame_queue))) { |
| 620 | fp = (struct fc_frame *)skb; | ||
| 621 | fnic_send_frame(fnic, fp); | ||
| 622 | } | ||
| 623 | } | ||
| 610 | 624 | ||
| 625 | /** | ||
| 626 | * fnic_set_eth_mode() - put fnic into ethernet mode. | ||
| 627 | * @fnic: fnic device | ||
| 628 | * | ||
| 629 | * Called without fnic lock held. | ||
| 630 | */ | ||
| 631 | static void fnic_set_eth_mode(struct fnic *fnic) | ||
| 632 | { | ||
| 633 | unsigned long flags; | ||
| 634 | enum fnic_state old_state; | ||
| 635 | int ret; | ||
| 636 | |||
| 637 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
| 638 | again: | ||
| 611 | old_state = fnic->state; | 639 | old_state = fnic->state; |
| 612 | switch (old_state) { | 640 | switch (old_state) { |
| 613 | case FNIC_IN_FC_MODE: | 641 | case FNIC_IN_FC_MODE: |
| 614 | case FNIC_IN_ETH_TRANS_FC_MODE: | 642 | case FNIC_IN_ETH_TRANS_FC_MODE: |
| 615 | default: | 643 | default: |
| 616 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 644 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
| 617 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
| 618 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 619 | 646 | ||
| 620 | if (old_flogi) { | ||
| 621 | dev_kfree_skb(fp_skb(old_flogi)); | ||
| 622 | old_flogi = NULL; | ||
| 623 | } | ||
| 624 | if (old_flogi_resp) { | ||
| 625 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
| 626 | old_flogi_resp = NULL; | ||
| 627 | } | ||
| 628 | |||
| 629 | ret = fnic_fw_reset_handler(fnic); | 647 | ret = fnic_fw_reset_handler(fnic); |
| 630 | 648 | ||
| 631 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 649 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
| 632 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) | 650 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) |
| 633 | goto again; | 651 | goto again; |
| 634 | if (ret) { | 652 | if (ret) |
| 635 | fnic->state = old_state; | 653 | fnic->state = old_state; |
| 636 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 637 | dev_kfree_skb(fp_skb(fp)); | ||
| 638 | goto fnic_send_end; | ||
| 639 | } | ||
| 640 | old_flogi = fnic->flogi; | ||
| 641 | fnic->flogi = fp; | ||
| 642 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
| 643 | old_flogi_resp = fnic->flogi_resp; | ||
| 644 | fnic->flogi_resp = NULL; | ||
| 645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 646 | break; | 654 | break; |
| 647 | 655 | ||
| 648 | case FNIC_IN_FC_TRANS_ETH_MODE: | 656 | case FNIC_IN_FC_TRANS_ETH_MODE: |
| 649 | /* | ||
| 650 | * A reset is pending with the firmware. Store the flogi | ||
| 651 | * and its oxid. The transition out of this state happens | ||
| 652 | * only when Firmware completes the reset, either with | ||
| 653 | * success or failed. If success, transition to | ||
| 654 | * FNIC_IN_ETH_MODE, if fail, then transition to | ||
| 655 | * FNIC_IN_FC_MODE | ||
| 656 | */ | ||
| 657 | fnic->flogi = fp; | ||
| 658 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
| 659 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 660 | break; | ||
| 661 | |||
| 662 | case FNIC_IN_ETH_MODE: | 657 | case FNIC_IN_ETH_MODE: |
| 663 | /* | ||
| 664 | * The fw/hw is already in eth mode. Store the oxid, | ||
| 665 | * and send the flogi frame out. The transition out of this | ||
| 666 | * state happens only we receive flogi response from the | ||
| 667 | * network, and the oxid matches the cached oxid when the | ||
| 668 | * flogi frame was sent out. If they match, then we issue | ||
| 669 | * a flogi_reg request and transition to state | ||
| 670 | * FNIC_IN_ETH_TRANS_FC_MODE | ||
| 671 | */ | ||
| 672 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
| 673 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 674 | ret = fnic_send_frame(fnic, fp); | ||
| 675 | break; | 658 | break; |
| 676 | } | 659 | } |
| 677 | 660 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | |
| 678 | fnic_send_end: | ||
| 679 | if (old_flogi) | ||
| 680 | dev_kfree_skb(fp_skb(old_flogi)); | ||
| 681 | if (old_flogi_resp) | ||
| 682 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
| 683 | return ret; | ||
| 684 | } | 661 | } |
| 685 | 662 | ||
| 686 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, | 663 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, |
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 2b3064828aea..5c1f223cabce 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c | |||
| @@ -48,9 +48,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { | 50 | if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { |
| 51 | work_done += fnic_wq_copy_cmpl_handler(fnic, 8); | 51 | work_done += fnic_wq_copy_cmpl_handler(fnic, -1); |
| 52 | work_done += fnic_wq_cmpl_handler(fnic, 4); | 52 | work_done += fnic_wq_cmpl_handler(fnic, -1); |
| 53 | work_done += fnic_rq_cmpl_handler(fnic, 4); | 53 | work_done += fnic_rq_cmpl_handler(fnic, -1); |
| 54 | 54 | ||
| 55 | vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], | 55 | vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], |
| 56 | work_done, | 56 | work_done, |
| @@ -66,9 +66,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) | |||
| 66 | struct fnic *fnic = data; | 66 | struct fnic *fnic = data; |
| 67 | unsigned long work_done = 0; | 67 | unsigned long work_done = 0; |
| 68 | 68 | ||
| 69 | work_done += fnic_wq_copy_cmpl_handler(fnic, 8); | 69 | work_done += fnic_wq_copy_cmpl_handler(fnic, -1); |
| 70 | work_done += fnic_wq_cmpl_handler(fnic, 4); | 70 | work_done += fnic_wq_cmpl_handler(fnic, -1); |
| 71 | work_done += fnic_rq_cmpl_handler(fnic, 4); | 71 | work_done += fnic_rq_cmpl_handler(fnic, -1); |
| 72 | 72 | ||
| 73 | vnic_intr_return_credits(&fnic->intr[0], | 73 | vnic_intr_return_credits(&fnic->intr[0], |
| 74 | work_done, | 74 | work_done, |
| @@ -83,7 +83,7 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data) | |||
| 83 | struct fnic *fnic = data; | 83 | struct fnic *fnic = data; |
| 84 | unsigned long rq_work_done = 0; | 84 | unsigned long rq_work_done = 0; |
| 85 | 85 | ||
| 86 | rq_work_done = fnic_rq_cmpl_handler(fnic, 4); | 86 | rq_work_done = fnic_rq_cmpl_handler(fnic, -1); |
| 87 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], | 87 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], |
| 88 | rq_work_done, | 88 | rq_work_done, |
| 89 | 1 /* unmask intr */, | 89 | 1 /* unmask intr */, |
| @@ -97,7 +97,7 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data) | |||
| 97 | struct fnic *fnic = data; | 97 | struct fnic *fnic = data; |
| 98 | unsigned long wq_work_done = 0; | 98 | unsigned long wq_work_done = 0; |
| 99 | 99 | ||
| 100 | wq_work_done = fnic_wq_cmpl_handler(fnic, 4); | 100 | wq_work_done = fnic_wq_cmpl_handler(fnic, -1); |
| 101 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], | 101 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], |
| 102 | wq_work_done, | 102 | wq_work_done, |
| 103 | 1 /* unmask intr */, | 103 | 1 /* unmask intr */, |
| @@ -110,7 +110,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) | |||
| 110 | struct fnic *fnic = data; | 110 | struct fnic *fnic = data; |
| 111 | unsigned long wq_copy_work_done = 0; | 111 | unsigned long wq_copy_work_done = 0; |
| 112 | 112 | ||
| 113 | wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); | 113 | wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); |
| 114 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], | 114 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], |
| 115 | wq_copy_work_done, | 115 | wq_copy_work_done, |
| 116 | 1 /* unmask intr */, | 116 | 1 /* unmask intr */, |
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 71c7bbe26d05..fe1b1031f7ab 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
| 28 | #include <linux/if_ether.h> | ||
| 29 | #include <scsi/fc/fc_fip.h> | ||
| 28 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
| 29 | #include <scsi/scsi_transport.h> | 31 | #include <scsi/scsi_transport.h> |
| 30 | #include <scsi/scsi_transport_fc.h> | 32 | #include <scsi/scsi_transport_fc.h> |
| @@ -68,6 +70,7 @@ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); | |||
| 68 | 70 | ||
| 69 | static struct libfc_function_template fnic_transport_template = { | 71 | static struct libfc_function_template fnic_transport_template = { |
| 70 | .frame_send = fnic_send, | 72 | .frame_send = fnic_send, |
| 73 | .lport_set_port_id = fnic_set_port_id, | ||
| 71 | .fcp_abort_io = fnic_empty_scsi_cleanup, | 74 | .fcp_abort_io = fnic_empty_scsi_cleanup, |
| 72 | .fcp_cleanup = fnic_empty_scsi_cleanup, | 75 | .fcp_cleanup = fnic_empty_scsi_cleanup, |
| 73 | .exch_mgr_reset = fnic_exch_mgr_reset | 76 | .exch_mgr_reset = fnic_exch_mgr_reset |
| @@ -140,6 +143,7 @@ static struct fc_function_template fnic_fc_functions = { | |||
| 140 | .get_fc_host_stats = fnic_get_stats, | 143 | .get_fc_host_stats = fnic_get_stats, |
| 141 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), | 144 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), |
| 142 | .terminate_rport_io = fnic_terminate_rport_io, | 145 | .terminate_rport_io = fnic_terminate_rport_io, |
| 146 | .bsg_request = fc_lport_bsg_request, | ||
| 143 | }; | 147 | }; |
| 144 | 148 | ||
| 145 | static void fnic_get_host_speed(struct Scsi_Host *shost) | 149 | static void fnic_get_host_speed(struct Scsi_Host *shost) |
| @@ -324,9 +328,6 @@ static int fnic_cleanup(struct fnic *fnic) | |||
| 324 | { | 328 | { |
| 325 | unsigned int i; | 329 | unsigned int i; |
| 326 | int err; | 330 | int err; |
| 327 | unsigned long flags; | ||
| 328 | struct fc_frame *flogi = NULL; | ||
| 329 | struct fc_frame *flogi_resp = NULL; | ||
| 330 | 331 | ||
| 331 | vnic_dev_disable(fnic->vdev); | 332 | vnic_dev_disable(fnic->vdev); |
| 332 | for (i = 0; i < fnic->intr_count; i++) | 333 | for (i = 0; i < fnic->intr_count; i++) |
| @@ -367,24 +368,6 @@ static int fnic_cleanup(struct fnic *fnic) | |||
| 367 | for (i = 0; i < fnic->intr_count; i++) | 368 | for (i = 0; i < fnic->intr_count; i++) |
| 368 | vnic_intr_clean(&fnic->intr[i]); | 369 | vnic_intr_clean(&fnic->intr[i]); |
| 369 | 370 | ||
| 370 | /* | ||
| 371 | * Remove cached flogi and flogi resp frames if any | ||
| 372 | * These frames are not in any queue, and therefore queue | ||
| 373 | * cleanup does not clean them. So clean them explicitly | ||
| 374 | */ | ||
| 375 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
| 376 | flogi = fnic->flogi; | ||
| 377 | fnic->flogi = NULL; | ||
| 378 | flogi_resp = fnic->flogi_resp; | ||
| 379 | fnic->flogi_resp = NULL; | ||
| 380 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
| 381 | |||
| 382 | if (flogi) | ||
| 383 | dev_kfree_skb(fp_skb(flogi)); | ||
| 384 | |||
| 385 | if (flogi_resp) | ||
| 386 | dev_kfree_skb(fp_skb(flogi_resp)); | ||
| 387 | |||
| 388 | mempool_destroy(fnic->io_req_pool); | 371 | mempool_destroy(fnic->io_req_pool); |
| 389 | for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) | 372 | for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) |
| 390 | mempool_destroy(fnic->io_sgl_pool[i]); | 373 | mempool_destroy(fnic->io_sgl_pool[i]); |
| @@ -409,6 +392,17 @@ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) | |||
| 409 | return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); | 392 | return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); |
| 410 | } | 393 | } |
| 411 | 394 | ||
| 395 | /** | ||
| 396 | * fnic_get_mac() - get assigned data MAC address for FIP code. | ||
| 397 | * @lport: local port. | ||
| 398 | */ | ||
| 399 | static u8 *fnic_get_mac(struct fc_lport *lport) | ||
| 400 | { | ||
| 401 | struct fnic *fnic = lport_priv(lport); | ||
| 402 | |||
| 403 | return fnic->data_src_addr; | ||
| 404 | } | ||
| 405 | |||
| 412 | static int __devinit fnic_probe(struct pci_dev *pdev, | 406 | static int __devinit fnic_probe(struct pci_dev *pdev, |
| 413 | const struct pci_device_id *ent) | 407 | const struct pci_device_id *ent) |
| 414 | { | 408 | { |
| @@ -424,17 +418,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 424 | * Allocate SCSI Host and set up association between host, | 418 | * Allocate SCSI Host and set up association between host, |
| 425 | * local port, and fnic | 419 | * local port, and fnic |
| 426 | */ | 420 | */ |
| 427 | host = scsi_host_alloc(&fnic_host_template, | 421 | lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); |
| 428 | sizeof(struct fc_lport) + sizeof(struct fnic)); | 422 | if (!lp) { |
| 429 | if (!host) { | 423 | printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); |
| 430 | printk(KERN_ERR PFX "Unable to alloc SCSI host\n"); | ||
| 431 | err = -ENOMEM; | 424 | err = -ENOMEM; |
| 432 | goto err_out; | 425 | goto err_out; |
| 433 | } | 426 | } |
| 434 | lp = shost_priv(host); | 427 | host = lp->host; |
| 435 | lp->host = host; | ||
| 436 | fnic = lport_priv(lp); | 428 | fnic = lport_priv(lp); |
| 437 | fnic->lport = lp; | 429 | fnic->lport = lp; |
| 430 | fnic->ctlr.lp = lp; | ||
| 438 | 431 | ||
| 439 | snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, | 432 | snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, |
| 440 | host->host_no); | 433 | host->host_no); |
| @@ -543,12 +536,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 543 | goto err_out_dev_close; | 536 | goto err_out_dev_close; |
| 544 | } | 537 | } |
| 545 | 538 | ||
| 546 | err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); | 539 | err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); |
| 547 | if (err) { | 540 | if (err) { |
| 548 | shost_printk(KERN_ERR, fnic->lport->host, | 541 | shost_printk(KERN_ERR, fnic->lport->host, |
| 549 | "vNIC get MAC addr failed \n"); | 542 | "vNIC get MAC addr failed \n"); |
| 550 | goto err_out_dev_close; | 543 | goto err_out_dev_close; |
| 551 | } | 544 | } |
| 545 | /* set data_src for point-to-point mode and to keep it non-zero */ | ||
| 546 | memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); | ||
| 552 | 547 | ||
| 553 | /* Get vNIC configuration */ | 548 | /* Get vNIC configuration */ |
| 554 | err = fnic_get_vnic_config(fnic); | 549 | err = fnic_get_vnic_config(fnic); |
| @@ -560,6 +555,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 560 | } | 555 | } |
| 561 | host->max_lun = fnic->config.luns_per_tgt; | 556 | host->max_lun = fnic->config.luns_per_tgt; |
| 562 | host->max_id = FNIC_MAX_FCP_TARGET; | 557 | host->max_id = FNIC_MAX_FCP_TARGET; |
| 558 | host->max_cmd_len = FNIC_MAX_CMD_LEN; | ||
| 563 | 559 | ||
| 564 | fnic_get_res_counts(fnic); | 560 | fnic_get_res_counts(fnic); |
| 565 | 561 | ||
| @@ -571,19 +567,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 571 | goto err_out_dev_close; | 567 | goto err_out_dev_close; |
| 572 | } | 568 | } |
| 573 | 569 | ||
| 574 | err = fnic_request_intr(fnic); | ||
| 575 | if (err) { | ||
| 576 | shost_printk(KERN_ERR, fnic->lport->host, | ||
| 577 | "Unable to request irq.\n"); | ||
| 578 | goto err_out_clear_intr; | ||
| 579 | } | ||
| 580 | |||
| 581 | err = fnic_alloc_vnic_resources(fnic); | 570 | err = fnic_alloc_vnic_resources(fnic); |
| 582 | if (err) { | 571 | if (err) { |
| 583 | shost_printk(KERN_ERR, fnic->lport->host, | 572 | shost_printk(KERN_ERR, fnic->lport->host, |
| 584 | "Failed to alloc vNIC resources, " | 573 | "Failed to alloc vNIC resources, " |
| 585 | "aborting.\n"); | 574 | "aborting.\n"); |
| 586 | goto err_out_free_intr; | 575 | goto err_out_clear_intr; |
| 587 | } | 576 | } |
| 588 | 577 | ||
| 589 | 578 | ||
| @@ -623,9 +612,21 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 623 | fnic->vlan_hw_insert = 1; | 612 | fnic->vlan_hw_insert = 1; |
| 624 | fnic->vlan_id = 0; | 613 | fnic->vlan_id = 0; |
| 625 | 614 | ||
| 626 | fnic->flogi_oxid = FC_XID_UNKNOWN; | 615 | /* Initialize the FIP fcoe_ctrl struct */ |
| 627 | fnic->flogi = NULL; | 616 | fnic->ctlr.send = fnic_eth_send; |
| 628 | fnic->flogi_resp = NULL; | 617 | fnic->ctlr.update_mac = fnic_update_mac; |
| 618 | fnic->ctlr.get_src_addr = fnic_get_mac; | ||
| 619 | fcoe_ctlr_init(&fnic->ctlr); | ||
| 620 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { | ||
| 621 | shost_printk(KERN_INFO, fnic->lport->host, | ||
| 622 | "firmware supports FIP\n"); | ||
| 623 | vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); | ||
| 624 | vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); | ||
| 625 | } else { | ||
| 626 | shost_printk(KERN_INFO, fnic->lport->host, | ||
| 627 | "firmware uses non-FIP mode\n"); | ||
| 628 | fnic->ctlr.mode = FIP_ST_NON_FIP; | ||
| 629 | } | ||
| 629 | fnic->state = FNIC_IN_FC_MODE; | 630 | fnic->state = FNIC_IN_FC_MODE; |
| 630 | 631 | ||
| 631 | /* Enable hardware stripping of vlan header on ingress */ | 632 | /* Enable hardware stripping of vlan header on ingress */ |
| @@ -716,6 +717,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 716 | INIT_WORK(&fnic->link_work, fnic_handle_link); | 717 | INIT_WORK(&fnic->link_work, fnic_handle_link); |
| 717 | INIT_WORK(&fnic->frame_work, fnic_handle_frame); | 718 | INIT_WORK(&fnic->frame_work, fnic_handle_frame); |
| 718 | skb_queue_head_init(&fnic->frame_queue); | 719 | skb_queue_head_init(&fnic->frame_queue); |
| 720 | skb_queue_head_init(&fnic->tx_queue); | ||
| 719 | 721 | ||
| 720 | /* Enable all queues */ | 722 | /* Enable all queues */ |
| 721 | for (i = 0; i < fnic->raw_wq_count; i++) | 723 | for (i = 0; i < fnic->raw_wq_count; i++) |
| @@ -728,6 +730,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 728 | fc_fabric_login(lp); | 730 | fc_fabric_login(lp); |
| 729 | 731 | ||
| 730 | vnic_dev_enable(fnic->vdev); | 732 | vnic_dev_enable(fnic->vdev); |
| 733 | |||
| 734 | err = fnic_request_intr(fnic); | ||
| 735 | if (err) { | ||
| 736 | shost_printk(KERN_ERR, fnic->lport->host, | ||
| 737 | "Unable to request irq.\n"); | ||
| 738 | goto err_out_free_exch_mgr; | ||
| 739 | } | ||
| 740 | |||
| 731 | for (i = 0; i < fnic->intr_count; i++) | 741 | for (i = 0; i < fnic->intr_count; i++) |
| 732 | vnic_intr_unmask(&fnic->intr[i]); | 742 | vnic_intr_unmask(&fnic->intr[i]); |
| 733 | 743 | ||
| @@ -738,8 +748,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
| 738 | err_out_free_exch_mgr: | 748 | err_out_free_exch_mgr: |
| 739 | fc_exch_mgr_free(lp); | 749 | fc_exch_mgr_free(lp); |
| 740 | err_out_remove_scsi_host: | 750 | err_out_remove_scsi_host: |
| 741 | fc_remove_host(fnic->lport->host); | 751 | fc_remove_host(lp->host); |
| 742 | scsi_remove_host(fnic->lport->host); | 752 | scsi_remove_host(lp->host); |
| 743 | err_out_free_rq_buf: | 753 | err_out_free_rq_buf: |
| 744 | for (i = 0; i < fnic->rq_count; i++) | 754 | for (i = 0; i < fnic->rq_count; i++) |
| 745 | vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); | 755 | vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); |
| @@ -752,8 +762,6 @@ err_out_free_ioreq_pool: | |||
| 752 | mempool_destroy(fnic->io_req_pool); | 762 | mempool_destroy(fnic->io_req_pool); |
| 753 | err_out_free_resources: | 763 | err_out_free_resources: |
| 754 | fnic_free_vnic_resources(fnic); | 764 | fnic_free_vnic_resources(fnic); |
| 755 | err_out_free_intr: | ||
| 756 | fnic_free_intr(fnic); | ||
| 757 | err_out_clear_intr: | 765 | err_out_clear_intr: |
| 758 | fnic_clear_intr_mode(fnic); | 766 | fnic_clear_intr_mode(fnic); |
| 759 | err_out_dev_close: | 767 | err_out_dev_close: |
| @@ -775,6 +783,7 @@ err_out: | |||
| 775 | static void __devexit fnic_remove(struct pci_dev *pdev) | 783 | static void __devexit fnic_remove(struct pci_dev *pdev) |
| 776 | { | 784 | { |
| 777 | struct fnic *fnic = pci_get_drvdata(pdev); | 785 | struct fnic *fnic = pci_get_drvdata(pdev); |
| 786 | struct fc_lport *lp = fnic->lport; | ||
| 778 | unsigned long flags; | 787 | unsigned long flags; |
| 779 | 788 | ||
| 780 | /* | 789 | /* |
| @@ -796,6 +805,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) | |||
| 796 | */ | 805 | */ |
| 797 | flush_workqueue(fnic_event_queue); | 806 | flush_workqueue(fnic_event_queue); |
| 798 | skb_queue_purge(&fnic->frame_queue); | 807 | skb_queue_purge(&fnic->frame_queue); |
| 808 | skb_queue_purge(&fnic->tx_queue); | ||
| 799 | 809 | ||
| 800 | /* | 810 | /* |
| 801 | * Log off the fabric. This stops all remote ports, dns port, | 811 | * Log off the fabric. This stops all remote ports, dns port, |
| @@ -808,7 +818,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev) | |||
| 808 | fnic->in_remove = 1; | 818 | fnic->in_remove = 1; |
| 809 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 819 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 810 | 820 | ||
| 811 | fc_lport_destroy(fnic->lport); | 821 | fcoe_ctlr_destroy(&fnic->ctlr); |
| 822 | fc_lport_destroy(lp); | ||
| 812 | 823 | ||
| 813 | /* | 824 | /* |
| 814 | * This stops the fnic device, masks all interrupts. Completed | 825 | * This stops the fnic device, masks all interrupts. Completed |
| @@ -818,6 +829,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) | |||
| 818 | fnic_cleanup(fnic); | 829 | fnic_cleanup(fnic); |
| 819 | 830 | ||
| 820 | BUG_ON(!skb_queue_empty(&fnic->frame_queue)); | 831 | BUG_ON(!skb_queue_empty(&fnic->frame_queue)); |
| 832 | BUG_ON(!skb_queue_empty(&fnic->tx_queue)); | ||
| 821 | 833 | ||
| 822 | spin_lock_irqsave(&fnic_list_lock, flags); | 834 | spin_lock_irqsave(&fnic_list_lock, flags); |
| 823 | list_del(&fnic->list); | 835 | list_del(&fnic->list); |
| @@ -827,8 +839,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev) | |||
| 827 | scsi_remove_host(fnic->lport->host); | 839 | scsi_remove_host(fnic->lport->host); |
| 828 | fc_exch_mgr_free(fnic->lport); | 840 | fc_exch_mgr_free(fnic->lport); |
| 829 | vnic_dev_notify_unset(fnic->vdev); | 841 | vnic_dev_notify_unset(fnic->vdev); |
| 830 | fnic_free_vnic_resources(fnic); | ||
| 831 | fnic_free_intr(fnic); | 842 | fnic_free_intr(fnic); |
| 843 | fnic_free_vnic_resources(fnic); | ||
| 832 | fnic_clear_intr_mode(fnic); | 844 | fnic_clear_intr_mode(fnic); |
| 833 | vnic_dev_close(fnic->vdev); | 845 | vnic_dev_close(fnic->vdev); |
| 834 | vnic_dev_unregister(fnic->vdev); | 846 | vnic_dev_unregister(fnic->vdev); |
| @@ -836,7 +848,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) | |||
| 836 | pci_release_regions(pdev); | 848 | pci_release_regions(pdev); |
| 837 | pci_disable_device(pdev); | 849 | pci_disable_device(pdev); |
| 838 | pci_set_drvdata(pdev, NULL); | 850 | pci_set_drvdata(pdev, NULL); |
| 839 | scsi_host_put(fnic->lport->host); | 851 | scsi_host_put(lp->host); |
| 840 | } | 852 | } |
| 841 | 853 | ||
| 842 | static struct pci_driver fnic_driver = { | 854 | static struct pci_driver fnic_driver = { |
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c index 7ba61ec715d2..50488f8e169d 100644 --- a/drivers/scsi/fnic/fnic_res.c +++ b/drivers/scsi/fnic/fnic_res.c | |||
| @@ -144,10 +144,9 @@ int fnic_get_vnic_config(struct fnic *fnic) | |||
| 144 | c->intr_timer_type = c->intr_timer_type; | 144 | c->intr_timer_type = c->intr_timer_type; |
| 145 | 145 | ||
| 146 | shost_printk(KERN_INFO, fnic->lport->host, | 146 | shost_printk(KERN_INFO, fnic->lport->host, |
| 147 | "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " | 147 | "vNIC MAC addr %pM " |
| 148 | "wq/wq_copy/rq %d/%d/%d\n", | 148 | "wq/wq_copy/rq %d/%d/%d\n", |
| 149 | fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], | 149 | fnic->ctlr.ctl_src_addr, |
| 150 | fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5], | ||
| 151 | c->wq_enet_desc_count, c->wq_copy_desc_count, | 150 | c->wq_enet_desc_count, c->wq_copy_desc_count, |
| 152 | c->rq_desc_count); | 151 | c->rq_desc_count); |
| 153 | shost_printk(KERN_INFO, fnic->lport->host, | 152 | shost_printk(KERN_INFO, fnic->lport->host, |
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h index b6f310262534..ef8aaf2156dd 100644 --- a/drivers/scsi/fnic/fnic_res.h +++ b/drivers/scsi/fnic/fnic_res.h | |||
| @@ -51,6 +51,31 @@ static inline void fnic_queue_wq_desc(struct vnic_wq *wq, | |||
| 51 | vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); | 51 | vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, | ||
| 55 | void *os_buf, dma_addr_t dma_addr, | ||
| 56 | unsigned int len, | ||
| 57 | int vlan_tag_insert, | ||
| 58 | unsigned int vlan_tag, | ||
| 59 | int cq_entry) | ||
| 60 | { | ||
| 61 | struct wq_enet_desc *desc = vnic_wq_next_desc(wq); | ||
| 62 | |||
| 63 | wq_enet_desc_enc(desc, | ||
| 64 | (u64)dma_addr | VNIC_PADDR_TARGET, | ||
| 65 | (u16)len, | ||
| 66 | 0, /* mss_or_csum_offset */ | ||
| 67 | 0, /* fc_eof */ | ||
| 68 | 0, /* offload_mode */ | ||
| 69 | 1, /* eop */ | ||
| 70 | (u8)cq_entry, | ||
| 71 | 0, /* fcoe_encap */ | ||
| 72 | (u8)vlan_tag_insert, | ||
| 73 | (u16)vlan_tag, | ||
| 74 | 0 /* loopback */); | ||
| 75 | |||
| 76 | vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); | ||
| 77 | } | ||
| 78 | |||
| 54 | static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, | 79 | static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, |
| 55 | u32 req_id, | 80 | u32 req_id, |
| 56 | u32 lunmap_id, u8 spl_flags, | 81 | u32 lunmap_id, u8 spl_flags, |
| @@ -58,6 +83,7 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, | |||
| 58 | u64 sgl_addr, u64 sns_addr, | 83 | u64 sgl_addr, u64 sns_addr, |
| 59 | u8 crn, u8 pri_ta, | 84 | u8 crn, u8 pri_ta, |
| 60 | u8 flags, u8 *scsi_cdb, | 85 | u8 flags, u8 *scsi_cdb, |
| 86 | u8 cdb_len, | ||
| 61 | u32 data_len, u8 *lun, | 87 | u32 data_len, u8 *lun, |
| 62 | u32 d_id, u16 mss, | 88 | u32 d_id, u16 mss, |
| 63 | u32 ratov, u32 edtov) | 89 | u32 ratov, u32 edtov) |
| @@ -82,7 +108,8 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, | |||
| 82 | desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ | 108 | desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ |
| 83 | desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ | 109 | desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ |
| 84 | desc->u.icmnd_16.flags = flags; /* command flags */ | 110 | desc->u.icmnd_16.flags = flags; /* command flags */ |
| 85 | memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ | 111 | memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16); |
| 112 | memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */ | ||
| 86 | desc->u.icmnd_16.data_len = data_len; /* length of data expected */ | 113 | desc->u.icmnd_16.data_len = data_len; /* length of data expected */ |
| 87 | memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ | 114 | memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ |
| 88 | desc->u.icmnd_16._resvd2 = 0; /* reserved */ | 115 | desc->u.icmnd_16._resvd2 = 0; /* reserved */ |
| @@ -132,12 +159,37 @@ static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, | |||
| 132 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | 159 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ |
| 133 | 160 | ||
| 134 | desc->u.flogi_reg.format = format; | 161 | desc->u.flogi_reg.format = format; |
| 162 | desc->u.flogi_reg._resvd = 0; | ||
| 135 | hton24(desc->u.flogi_reg.s_id, s_id); | 163 | hton24(desc->u.flogi_reg.s_id, s_id); |
| 136 | memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); | 164 | memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); |
| 137 | 165 | ||
| 138 | vnic_wq_copy_post(wq); | 166 | vnic_wq_copy_post(wq); |
| 139 | } | 167 | } |
| 140 | 168 | ||
| 169 | static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, | ||
| 170 | u32 req_id, u32 s_id, | ||
| 171 | u8 *fcf_mac, u8 *ha_mac, | ||
| 172 | u32 r_a_tov, u32 e_d_tov) | ||
| 173 | { | ||
| 174 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
| 175 | |||
| 176 | desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ | ||
| 177 | desc->hdr.status = 0; /* header status entry */ | ||
| 178 | desc->hdr._resvd = 0; /* reserved */ | ||
| 179 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
| 180 | |||
| 181 | desc->u.flogi_fip_reg._resvd0 = 0; | ||
| 182 | hton24(desc->u.flogi_fip_reg.s_id, s_id); | ||
| 183 | memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); | ||
| 184 | desc->u.flogi_fip_reg._resvd1 = 0; | ||
| 185 | desc->u.flogi_fip_reg.r_a_tov = r_a_tov; | ||
| 186 | desc->u.flogi_fip_reg.e_d_tov = e_d_tov; | ||
| 187 | memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); | ||
| 188 | desc->u.flogi_fip_reg._resvd2 = 0; | ||
| 189 | |||
| 190 | vnic_wq_copy_post(wq); | ||
| 191 | } | ||
| 192 | |||
| 141 | static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, | 193 | static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, |
| 142 | u32 req_id) | 194 | u32 req_id) |
| 143 | { | 195 | { |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index bfc996971b81..65a39b0f6dc2 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
| @@ -174,6 +174,9 @@ int fnic_fw_reset_handler(struct fnic *fnic) | |||
| 174 | int ret = 0; | 174 | int ret = 0; |
| 175 | unsigned long flags; | 175 | unsigned long flags; |
| 176 | 176 | ||
| 177 | skb_queue_purge(&fnic->frame_queue); | ||
| 178 | skb_queue_purge(&fnic->tx_queue); | ||
| 179 | |||
| 177 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | 180 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
| 178 | 181 | ||
| 179 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | 182 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
| @@ -200,9 +203,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) | |||
| 200 | * fnic_flogi_reg_handler | 203 | * fnic_flogi_reg_handler |
| 201 | * Routine to send flogi register msg to fw | 204 | * Routine to send flogi register msg to fw |
| 202 | */ | 205 | */ |
| 203 | int fnic_flogi_reg_handler(struct fnic *fnic) | 206 | int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) |
| 204 | { | 207 | { |
| 205 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | 208 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
| 209 | enum fcpio_flogi_reg_format_type format; | ||
| 210 | struct fc_lport *lp = fnic->lport; | ||
| 206 | u8 gw_mac[ETH_ALEN]; | 211 | u8 gw_mac[ETH_ALEN]; |
| 207 | int ret = 0; | 212 | int ret = 0; |
| 208 | unsigned long flags; | 213 | unsigned long flags; |
| @@ -217,23 +222,32 @@ int fnic_flogi_reg_handler(struct fnic *fnic) | |||
| 217 | goto flogi_reg_ioreq_end; | 222 | goto flogi_reg_ioreq_end; |
| 218 | } | 223 | } |
| 219 | 224 | ||
| 220 | if (fnic->fcoui_mode) | 225 | if (fnic->ctlr.map_dest) { |
| 221 | memset(gw_mac, 0xff, ETH_ALEN); | 226 | memset(gw_mac, 0xff, ETH_ALEN); |
| 222 | else | 227 | format = FCPIO_FLOGI_REG_DEF_DEST; |
| 223 | memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); | 228 | } else { |
| 229 | memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); | ||
| 230 | format = FCPIO_FLOGI_REG_GW_DEST; | ||
| 231 | } | ||
| 224 | 232 | ||
| 225 | fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, | 233 | if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { |
| 226 | FCPIO_FLOGI_REG_GW_DEST, | 234 | fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, |
| 227 | fnic->s_id, | 235 | fc_id, gw_mac, |
| 228 | gw_mac); | 236 | fnic->data_src_addr, |
| 237 | lp->r_a_tov, lp->e_d_tov); | ||
| 238 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
| 239 | "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", | ||
| 240 | fc_id, fnic->data_src_addr, gw_mac); | ||
| 241 | } else { | ||
| 242 | fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, | ||
| 243 | format, fc_id, gw_mac); | ||
| 244 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
| 245 | "FLOGI reg issued fcid %x map %d dest %pM\n", | ||
| 246 | fc_id, fnic->ctlr.map_dest, gw_mac); | ||
| 247 | } | ||
| 229 | 248 | ||
| 230 | flogi_reg_ioreq_end: | 249 | flogi_reg_ioreq_end: |
| 231 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 250 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
| 232 | |||
| 233 | if (!ret) | ||
| 234 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
| 235 | "flog reg issued\n"); | ||
| 236 | |||
| 237 | return ret; | 251 | return ret; |
| 238 | } | 252 | } |
| 239 | 253 | ||
| @@ -319,7 +333,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
| 319 | 0, /* scsi cmd ref, always 0 */ | 333 | 0, /* scsi cmd ref, always 0 */ |
| 320 | pri_tag, /* scsi pri and tag */ | 334 | pri_tag, /* scsi pri and tag */ |
| 321 | flags, /* command flags */ | 335 | flags, /* command flags */ |
| 322 | sc->cmnd, scsi_bufflen(sc), | 336 | sc->cmnd, sc->cmd_len, |
| 337 | scsi_bufflen(sc), | ||
| 323 | fc_lun.scsi_lun, io_req->port_id, | 338 | fc_lun.scsi_lun, io_req->port_id, |
| 324 | rport->maxframe_size, rp->r_a_tov, | 339 | rport->maxframe_size, rp->r_a_tov, |
| 325 | rp->e_d_tov); | 340 | rp->e_d_tov); |
| @@ -452,7 +467,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
| 452 | u8 hdr_status; | 467 | u8 hdr_status; |
| 453 | struct fcpio_tag tag; | 468 | struct fcpio_tag tag; |
| 454 | int ret = 0; | 469 | int ret = 0; |
| 455 | struct fc_frame *flogi; | ||
| 456 | unsigned long flags; | 470 | unsigned long flags; |
| 457 | 471 | ||
| 458 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 472 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
| @@ -462,9 +476,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
| 462 | 476 | ||
| 463 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 477 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
| 464 | 478 | ||
| 465 | flogi = fnic->flogi; | ||
| 466 | fnic->flogi = NULL; | ||
| 467 | |||
| 468 | /* fnic should be in FC_TRANS_ETH_MODE */ | 479 | /* fnic should be in FC_TRANS_ETH_MODE */ |
| 469 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { | 480 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { |
| 470 | /* Check status of reset completion */ | 481 | /* Check status of reset completion */ |
| @@ -505,17 +516,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
| 505 | * free the flogi frame. Else, send it out | 516 | * free the flogi frame. Else, send it out |
| 506 | */ | 517 | */ |
| 507 | if (fnic->remove_wait || ret) { | 518 | if (fnic->remove_wait || ret) { |
| 508 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
| 509 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 519 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 510 | if (flogi) | 520 | skb_queue_purge(&fnic->tx_queue); |
| 511 | dev_kfree_skb_irq(fp_skb(flogi)); | ||
| 512 | goto reset_cmpl_handler_end; | 521 | goto reset_cmpl_handler_end; |
| 513 | } | 522 | } |
| 514 | 523 | ||
| 515 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 524 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 516 | 525 | ||
| 517 | if (flogi) | 526 | fnic_flush_tx(fnic); |
| 518 | ret = fnic_send_frame(fnic, flogi); | ||
| 519 | 527 | ||
| 520 | reset_cmpl_handler_end: | 528 | reset_cmpl_handler_end: |
| 521 | return ret; | 529 | return ret; |
| @@ -532,18 +540,13 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, | |||
| 532 | u8 hdr_status; | 540 | u8 hdr_status; |
| 533 | struct fcpio_tag tag; | 541 | struct fcpio_tag tag; |
| 534 | int ret = 0; | 542 | int ret = 0; |
| 535 | struct fc_frame *flogi_resp = NULL; | ||
| 536 | unsigned long flags; | 543 | unsigned long flags; |
| 537 | struct sk_buff *skb; | ||
| 538 | 544 | ||
| 539 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 545 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
| 540 | 546 | ||
| 541 | /* Update fnic state based on status of flogi reg completion */ | 547 | /* Update fnic state based on status of flogi reg completion */ |
| 542 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 548 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
| 543 | 549 | ||
| 544 | flogi_resp = fnic->flogi_resp; | ||
| 545 | fnic->flogi_resp = NULL; | ||
| 546 | |||
| 547 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { | 550 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { |
| 548 | 551 | ||
| 549 | /* Check flogi registration completion status */ | 552 | /* Check flogi registration completion status */ |
| @@ -567,25 +570,17 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, | |||
| 567 | ret = -1; | 570 | ret = -1; |
| 568 | } | 571 | } |
| 569 | 572 | ||
| 570 | /* Successful flogi reg cmpl, pass frame to LibFC */ | 573 | if (!ret) { |
| 571 | if (!ret && flogi_resp) { | ||
| 572 | if (fnic->stop_rx_link_events) { | 574 | if (fnic->stop_rx_link_events) { |
| 573 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 575 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 574 | goto reg_cmpl_handler_end; | 576 | goto reg_cmpl_handler_end; |
| 575 | } | 577 | } |
| 576 | skb = (struct sk_buff *)flogi_resp; | ||
| 577 | /* Use fr_flags to indicate whether flogi resp or not */ | ||
| 578 | fr_flags(flogi_resp) = 1; | ||
| 579 | fr_dev(flogi_resp) = fnic->lport; | ||
| 580 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 578 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 581 | 579 | ||
| 582 | skb_queue_tail(&fnic->frame_queue, skb); | 580 | fnic_flush_tx(fnic); |
| 583 | queue_work(fnic_event_queue, &fnic->frame_work); | 581 | queue_work(fnic_event_queue, &fnic->frame_work); |
| 584 | |||
| 585 | } else { | 582 | } else { |
| 586 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 583 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 587 | if (flogi_resp) | ||
| 588 | dev_kfree_skb_irq(fp_skb(flogi_resp)); | ||
| 589 | } | 584 | } |
| 590 | 585 | ||
| 591 | reg_cmpl_handler_end: | 586 | reg_cmpl_handler_end: |
| @@ -907,6 +902,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | |||
| 907 | break; | 902 | break; |
| 908 | 903 | ||
| 909 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ | 904 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ |
| 905 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ | ||
| 910 | ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); | 906 | ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); |
| 911 | break; | 907 | break; |
| 912 | 908 | ||
| @@ -1224,22 +1220,6 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
| 1224 | 1220 | ||
| 1225 | } | 1221 | } |
| 1226 | 1222 | ||
| 1227 | static void fnic_block_error_handler(struct scsi_cmnd *sc) | ||
| 1228 | { | ||
| 1229 | struct Scsi_Host *shost = sc->device->host; | ||
| 1230 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); | ||
| 1231 | unsigned long flags; | ||
| 1232 | |||
| 1233 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 1234 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
| 1235 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 1236 | msleep(1000); | ||
| 1237 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 1238 | } | ||
| 1239 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 1240 | |||
| 1241 | } | ||
| 1242 | |||
| 1243 | /* | 1223 | /* |
| 1244 | * This function is exported to SCSI for sending abort cmnds. | 1224 | * This function is exported to SCSI for sending abort cmnds. |
| 1245 | * A SCSI IO is represented by a io_req in the driver. | 1225 | * A SCSI IO is represented by a io_req in the driver. |
| @@ -1259,7 +1239,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1259 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1239 | DECLARE_COMPLETION_ONSTACK(tm_done); |
| 1260 | 1240 | ||
| 1261 | /* Wait for rport to unblock */ | 1241 | /* Wait for rport to unblock */ |
| 1262 | fnic_block_error_handler(sc); | 1242 | fc_block_scsi_eh(sc); |
| 1263 | 1243 | ||
| 1264 | /* Get local-port, check ready and link up */ | 1244 | /* Get local-port, check ready and link up */ |
| 1265 | lp = shost_priv(sc->device->host); | 1245 | lp = shost_priv(sc->device->host); |
| @@ -1541,7 +1521,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
| 1541 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1521 | DECLARE_COMPLETION_ONSTACK(tm_done); |
| 1542 | 1522 | ||
| 1543 | /* Wait for rport to unblock */ | 1523 | /* Wait for rport to unblock */ |
| 1544 | fnic_block_error_handler(sc); | 1524 | fc_block_scsi_eh(sc); |
| 1545 | 1525 | ||
| 1546 | /* Get local-port, check ready and link up */ | 1526 | /* Get local-port, check ready and link up */ |
| 1547 | lp = shost_priv(sc->device->host); | 1527 | lp = shost_priv(sc->device->host); |
| @@ -1762,7 +1742,7 @@ void fnic_scsi_abort_io(struct fc_lport *lp) | |||
| 1762 | fnic->remove_wait = &remove_wait; | 1742 | fnic->remove_wait = &remove_wait; |
| 1763 | old_state = fnic->state; | 1743 | old_state = fnic->state; |
| 1764 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 1744 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
| 1765 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | 1745 | fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); |
| 1766 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 1746 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 1767 | 1747 | ||
| 1768 | err = fnic_fw_reset_handler(fnic); | 1748 | err = fnic_fw_reset_handler(fnic); |
| @@ -1802,7 +1782,7 @@ void fnic_scsi_cleanup(struct fc_lport *lp) | |||
| 1802 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 1782 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
| 1803 | old_state = fnic->state; | 1783 | old_state = fnic->state; |
| 1804 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 1784 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
| 1805 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | 1785 | fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); |
| 1806 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 1786 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 1807 | 1787 | ||
| 1808 | if (fnic_fw_reset_handler(fnic)) { | 1788 | if (fnic_fw_reset_handler(fnic)) { |
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index 46baa5254001..fbb55364e272 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h | |||
| @@ -95,5 +95,6 @@ struct vnic_fc_config { | |||
| 95 | 95 | ||
| 96 | #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ | 96 | #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ |
| 97 | #define VFCF_PERBI 0x2 /* persistent binding info available */ | 97 | #define VFCF_PERBI 0x2 /* persistent binding info available */ |
| 98 | #define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */ | ||
| 98 | 99 | ||
| 99 | #endif /* _VNIC_SCSI_H_ */ | 100 | #endif /* _VNIC_SCSI_H_ */ |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index c968cc31cd86..554626e18062 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -180,14 +180,20 @@ void scsi_remove_host(struct Scsi_Host *shost) | |||
| 180 | EXPORT_SYMBOL(scsi_remove_host); | 180 | EXPORT_SYMBOL(scsi_remove_host); |
| 181 | 181 | ||
| 182 | /** | 182 | /** |
| 183 | * scsi_add_host - add a scsi host | 183 | * scsi_add_host_with_dma - add a scsi host with dma device |
| 184 | * @shost: scsi host pointer to add | 184 | * @shost: scsi host pointer to add |
| 185 | * @dev: a struct device of type scsi class | 185 | * @dev: a struct device of type scsi class |
| 186 | * @dma_dev: dma device for the host | ||
| 187 | * | ||
| 188 | * Note: You rarely need to worry about this unless you're in a | ||
| 189 | * virtualised host environments, so use the simpler scsi_add_host() | ||
| 190 | * function instead. | ||
| 186 | * | 191 | * |
| 187 | * Return value: | 192 | * Return value: |
| 188 | * 0 on success / != 0 for error | 193 | * 0 on success / != 0 for error |
| 189 | **/ | 194 | **/ |
| 190 | int scsi_add_host(struct Scsi_Host *shost, struct device *dev) | 195 | int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, |
| 196 | struct device *dma_dev) | ||
| 191 | { | 197 | { |
| 192 | struct scsi_host_template *sht = shost->hostt; | 198 | struct scsi_host_template *sht = shost->hostt; |
| 193 | int error = -EINVAL; | 199 | int error = -EINVAL; |
| @@ -207,6 +213,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev) | |||
| 207 | 213 | ||
| 208 | if (!shost->shost_gendev.parent) | 214 | if (!shost->shost_gendev.parent) |
| 209 | shost->shost_gendev.parent = dev ? dev : &platform_bus; | 215 | shost->shost_gendev.parent = dev ? dev : &platform_bus; |
| 216 | shost->dma_dev = dma_dev; | ||
| 210 | 217 | ||
| 211 | error = device_add(&shost->shost_gendev); | 218 | error = device_add(&shost->shost_gendev); |
| 212 | if (error) | 219 | if (error) |
| @@ -262,7 +269,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev) | |||
| 262 | fail: | 269 | fail: |
| 263 | return error; | 270 | return error; |
| 264 | } | 271 | } |
| 265 | EXPORT_SYMBOL(scsi_add_host); | 272 | EXPORT_SYMBOL(scsi_add_host_with_dma); |
| 266 | 273 | ||
| 267 | static void scsi_host_dev_release(struct device *dev) | 274 | static void scsi_host_dev_release(struct device *dev) |
| 268 | { | 275 | { |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a0e7e711ff9d..4f0556571f80 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
| @@ -834,7 +834,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) | |||
| 834 | atomic_read(&hba->resetting) == 0, 60 * HZ); | 834 | atomic_read(&hba->resetting) == 0, 60 * HZ); |
| 835 | 835 | ||
| 836 | if (atomic_read(&hba->resetting)) { | 836 | if (atomic_read(&hba->resetting)) { |
| 837 | /* IOP is in unkown state, abort reset */ | 837 | /* IOP is in unknown state, abort reset */ |
| 838 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); | 838 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); |
| 839 | return -1; | 839 | return -1; |
| 840 | } | 840 | } |
| @@ -861,10 +861,13 @@ static int hptiop_reset(struct scsi_cmnd *scp) | |||
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | 863 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, |
| 864 | int queue_depth) | 864 | int queue_depth, int reason) |
| 865 | { | 865 | { |
| 866 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; | 866 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
| 867 | 867 | ||
| 868 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 869 | return -EOPNOTSUPP; | ||
| 870 | |||
| 868 | if (queue_depth > hba->max_requests) | 871 | if (queue_depth > hba->max_requests) |
| 869 | queue_depth = hba->max_requests; | 872 | queue_depth = hba->max_requests; |
| 870 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | 873 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bb2c696c006a..87b536a97cb4 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <scsi/scsi_device.h> | 39 | #include <scsi/scsi_device.h> |
| 40 | #include <scsi/scsi_tcq.h> | 40 | #include <scsi/scsi_tcq.h> |
| 41 | #include <scsi/scsi_transport_fc.h> | 41 | #include <scsi/scsi_transport_fc.h> |
| 42 | #include <scsi/scsi_bsg_fc.h> | ||
| 42 | #include "ibmvfc.h" | 43 | #include "ibmvfc.h" |
| 43 | 44 | ||
| 44 | static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; | 45 | static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; |
| @@ -558,12 +559,11 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost, | |||
| 558 | /** | 559 | /** |
| 559 | * ibmvfc_init_host - Start host initialization | 560 | * ibmvfc_init_host - Start host initialization |
| 560 | * @vhost: ibmvfc host struct | 561 | * @vhost: ibmvfc host struct |
| 561 | * @relogin: is this a re-login? | ||
| 562 | * | 562 | * |
| 563 | * Return value: | 563 | * Return value: |
| 564 | * nothing | 564 | * nothing |
| 565 | **/ | 565 | **/ |
| 566 | static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) | 566 | static void ibmvfc_init_host(struct ibmvfc_host *vhost) |
| 567 | { | 567 | { |
| 568 | struct ibmvfc_target *tgt; | 568 | struct ibmvfc_target *tgt; |
| 569 | 569 | ||
| @@ -577,10 +577,8 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) | |||
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { | 579 | if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { |
| 580 | if (!relogin) { | 580 | memset(vhost->async_crq.msgs, 0, PAGE_SIZE); |
| 581 | memset(vhost->async_crq.msgs, 0, PAGE_SIZE); | 581 | vhost->async_crq.cur = 0; |
| 582 | vhost->async_crq.cur = 0; | ||
| 583 | } | ||
| 584 | 582 | ||
| 585 | list_for_each_entry(tgt, &vhost->targets, queue) | 583 | list_for_each_entry(tgt, &vhost->targets, queue) |
| 586 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 584 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| @@ -1678,6 +1676,276 @@ static void ibmvfc_sync_completion(struct ibmvfc_event *evt) | |||
| 1678 | } | 1676 | } |
| 1679 | 1677 | ||
| 1680 | /** | 1678 | /** |
| 1679 | * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands | ||
| 1680 | * @evt: struct ibmvfc_event | ||
| 1681 | * | ||
| 1682 | **/ | ||
| 1683 | static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) | ||
| 1684 | { | ||
| 1685 | struct ibmvfc_host *vhost = evt->vhost; | ||
| 1686 | |||
| 1687 | ibmvfc_free_event(evt); | ||
| 1688 | vhost->aborting_passthru = 0; | ||
| 1689 | dev_info(vhost->dev, "Passthru command cancelled\n"); | ||
| 1690 | } | ||
| 1691 | |||
| 1692 | /** | ||
| 1693 | * ibmvfc_bsg_timeout - Handle a BSG timeout | ||
| 1694 | * @job: struct fc_bsg_job that timed out | ||
| 1695 | * | ||
| 1696 | * Returns: | ||
| 1697 | * 0 on success / other on failure | ||
| 1698 | **/ | ||
| 1699 | static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) | ||
| 1700 | { | ||
| 1701 | struct ibmvfc_host *vhost = shost_priv(job->shost); | ||
| 1702 | unsigned long port_id = (unsigned long)job->dd_data; | ||
| 1703 | struct ibmvfc_event *evt; | ||
| 1704 | struct ibmvfc_tmf *tmf; | ||
| 1705 | unsigned long flags; | ||
| 1706 | int rc; | ||
| 1707 | |||
| 1708 | ENTER; | ||
| 1709 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 1710 | if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) { | ||
| 1711 | __ibmvfc_reset_host(vhost); | ||
| 1712 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1713 | return 0; | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | vhost->aborting_passthru = 1; | ||
| 1717 | evt = ibmvfc_get_event(vhost); | ||
| 1718 | ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT); | ||
| 1719 | |||
| 1720 | tmf = &evt->iu.tmf; | ||
| 1721 | memset(tmf, 0, sizeof(*tmf)); | ||
| 1722 | tmf->common.version = 1; | ||
| 1723 | tmf->common.opcode = IBMVFC_TMF_MAD; | ||
| 1724 | tmf->common.length = sizeof(*tmf); | ||
| 1725 | tmf->scsi_id = port_id; | ||
| 1726 | tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; | ||
| 1727 | tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY; | ||
| 1728 | rc = ibmvfc_send_event(evt, vhost, default_timeout); | ||
| 1729 | |||
| 1730 | if (rc != 0) { | ||
| 1731 | vhost->aborting_passthru = 0; | ||
| 1732 | dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc); | ||
| 1733 | rc = -EIO; | ||
| 1734 | } else | ||
| 1735 | dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n", | ||
| 1736 | port_id); | ||
| 1737 | |||
| 1738 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1739 | |||
| 1740 | LEAVE; | ||
| 1741 | return rc; | ||
| 1742 | } | ||
| 1743 | |||
| 1744 | /** | ||
| 1745 | * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command | ||
| 1746 | * @vhost: struct ibmvfc_host to send command | ||
| 1747 | * @port_id: port ID to send command | ||
| 1748 | * | ||
| 1749 | * Returns: | ||
| 1750 | * 0 on success / other on failure | ||
| 1751 | **/ | ||
| 1752 | static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) | ||
| 1753 | { | ||
| 1754 | struct ibmvfc_port_login *plogi; | ||
| 1755 | struct ibmvfc_target *tgt; | ||
| 1756 | struct ibmvfc_event *evt; | ||
| 1757 | union ibmvfc_iu rsp_iu; | ||
| 1758 | unsigned long flags; | ||
| 1759 | int rc = 0, issue_login = 1; | ||
| 1760 | |||
| 1761 | ENTER; | ||
| 1762 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 1763 | list_for_each_entry(tgt, &vhost->targets, queue) { | ||
| 1764 | if (tgt->scsi_id == port_id) { | ||
| 1765 | issue_login = 0; | ||
| 1766 | break; | ||
| 1767 | } | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | if (!issue_login) | ||
| 1771 | goto unlock_out; | ||
| 1772 | if (unlikely((rc = ibmvfc_host_chkready(vhost)))) | ||
| 1773 | goto unlock_out; | ||
| 1774 | |||
| 1775 | evt = ibmvfc_get_event(vhost); | ||
| 1776 | ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); | ||
| 1777 | plogi = &evt->iu.plogi; | ||
| 1778 | memset(plogi, 0, sizeof(*plogi)); | ||
| 1779 | plogi->common.version = 1; | ||
| 1780 | plogi->common.opcode = IBMVFC_PORT_LOGIN; | ||
| 1781 | plogi->common.length = sizeof(*plogi); | ||
| 1782 | plogi->scsi_id = port_id; | ||
| 1783 | evt->sync_iu = &rsp_iu; | ||
| 1784 | init_completion(&evt->comp); | ||
| 1785 | |||
| 1786 | rc = ibmvfc_send_event(evt, vhost, default_timeout); | ||
| 1787 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1788 | |||
| 1789 | if (rc) | ||
| 1790 | return -EIO; | ||
| 1791 | |||
| 1792 | wait_for_completion(&evt->comp); | ||
| 1793 | |||
| 1794 | if (rsp_iu.plogi.common.status) | ||
| 1795 | rc = -EIO; | ||
| 1796 | |||
| 1797 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 1798 | ibmvfc_free_event(evt); | ||
| 1799 | unlock_out: | ||
| 1800 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1801 | LEAVE; | ||
| 1802 | return rc; | ||
| 1803 | } | ||
| 1804 | |||
| 1805 | /** | ||
| 1806 | * ibmvfc_bsg_request - Handle a BSG request | ||
| 1807 | * @job: struct fc_bsg_job to be executed | ||
| 1808 | * | ||
| 1809 | * Returns: | ||
| 1810 | * 0 on success / other on failure | ||
| 1811 | **/ | ||
| 1812 | static int ibmvfc_bsg_request(struct fc_bsg_job *job) | ||
| 1813 | { | ||
| 1814 | struct ibmvfc_host *vhost = shost_priv(job->shost); | ||
| 1815 | struct fc_rport *rport = job->rport; | ||
| 1816 | struct ibmvfc_passthru_mad *mad; | ||
| 1817 | struct ibmvfc_event *evt; | ||
| 1818 | union ibmvfc_iu rsp_iu; | ||
| 1819 | unsigned long flags, port_id = -1; | ||
| 1820 | unsigned int code = job->request->msgcode; | ||
| 1821 | int rc = 0, req_seg, rsp_seg, issue_login = 0; | ||
| 1822 | u32 fc_flags, rsp_len; | ||
| 1823 | |||
| 1824 | ENTER; | ||
| 1825 | job->reply->reply_payload_rcv_len = 0; | ||
| 1826 | if (rport) | ||
| 1827 | port_id = rport->port_id; | ||
| 1828 | |||
| 1829 | switch (code) { | ||
| 1830 | case FC_BSG_HST_ELS_NOLOGIN: | ||
| 1831 | port_id = (job->request->rqst_data.h_els.port_id[0] << 16) | | ||
| 1832 | (job->request->rqst_data.h_els.port_id[1] << 8) | | ||
| 1833 | job->request->rqst_data.h_els.port_id[2]; | ||
| 1834 | case FC_BSG_RPT_ELS: | ||
| 1835 | fc_flags = IBMVFC_FC_ELS; | ||
| 1836 | break; | ||
| 1837 | case FC_BSG_HST_CT: | ||
| 1838 | issue_login = 1; | ||
| 1839 | port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) | | ||
| 1840 | (job->request->rqst_data.h_ct.port_id[1] << 8) | | ||
| 1841 | job->request->rqst_data.h_ct.port_id[2]; | ||
| 1842 | case FC_BSG_RPT_CT: | ||
| 1843 | fc_flags = IBMVFC_FC_CT_IU; | ||
| 1844 | break; | ||
| 1845 | default: | ||
| 1846 | return -ENOTSUPP; | ||
| 1847 | }; | ||
| 1848 | |||
| 1849 | if (port_id == -1) | ||
| 1850 | return -EINVAL; | ||
| 1851 | if (!mutex_trylock(&vhost->passthru_mutex)) | ||
| 1852 | return -EBUSY; | ||
| 1853 | |||
| 1854 | job->dd_data = (void *)port_id; | ||
| 1855 | req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list, | ||
| 1856 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
| 1857 | |||
| 1858 | if (!req_seg) { | ||
| 1859 | mutex_unlock(&vhost->passthru_mutex); | ||
| 1860 | return -ENOMEM; | ||
| 1861 | } | ||
| 1862 | |||
| 1863 | rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list, | ||
| 1864 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
| 1865 | |||
| 1866 | if (!rsp_seg) { | ||
| 1867 | dma_unmap_sg(vhost->dev, job->request_payload.sg_list, | ||
| 1868 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
| 1869 | mutex_unlock(&vhost->passthru_mutex); | ||
| 1870 | return -ENOMEM; | ||
| 1871 | } | ||
| 1872 | |||
| 1873 | if (req_seg > 1 || rsp_seg > 1) { | ||
| 1874 | rc = -EINVAL; | ||
| 1875 | goto out; | ||
| 1876 | } | ||
| 1877 | |||
| 1878 | if (issue_login) | ||
| 1879 | rc = ibmvfc_bsg_plogi(vhost, port_id); | ||
| 1880 | |||
| 1881 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 1882 | |||
| 1883 | if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) || | ||
| 1884 | unlikely((rc = ibmvfc_host_chkready(vhost)))) { | ||
| 1885 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1886 | goto out; | ||
| 1887 | } | ||
| 1888 | |||
| 1889 | evt = ibmvfc_get_event(vhost); | ||
| 1890 | ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); | ||
| 1891 | mad = &evt->iu.passthru; | ||
| 1892 | |||
| 1893 | memset(mad, 0, sizeof(*mad)); | ||
| 1894 | mad->common.version = 1; | ||
| 1895 | mad->common.opcode = IBMVFC_PASSTHRU; | ||
| 1896 | mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); | ||
| 1897 | |||
| 1898 | mad->cmd_ioba.va = (u64)evt->crq.ioba + | ||
| 1899 | offsetof(struct ibmvfc_passthru_mad, iu); | ||
| 1900 | mad->cmd_ioba.len = sizeof(mad->iu); | ||
| 1901 | |||
| 1902 | mad->iu.cmd_len = job->request_payload.payload_len; | ||
| 1903 | mad->iu.rsp_len = job->reply_payload.payload_len; | ||
| 1904 | mad->iu.flags = fc_flags; | ||
| 1905 | mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; | ||
| 1906 | |||
| 1907 | mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list); | ||
| 1908 | mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list); | ||
| 1909 | mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list); | ||
| 1910 | mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list); | ||
| 1911 | mad->iu.scsi_id = port_id; | ||
| 1912 | mad->iu.tag = (u64)evt; | ||
| 1913 | rsp_len = mad->iu.rsp.len; | ||
| 1914 | |||
| 1915 | evt->sync_iu = &rsp_iu; | ||
| 1916 | init_completion(&evt->comp); | ||
| 1917 | rc = ibmvfc_send_event(evt, vhost, 0); | ||
| 1918 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1919 | |||
| 1920 | if (rc) { | ||
| 1921 | rc = -EIO; | ||
| 1922 | goto out; | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | wait_for_completion(&evt->comp); | ||
| 1926 | |||
| 1927 | if (rsp_iu.passthru.common.status) | ||
| 1928 | rc = -EIO; | ||
| 1929 | else | ||
| 1930 | job->reply->reply_payload_rcv_len = rsp_len; | ||
| 1931 | |||
| 1932 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 1933 | ibmvfc_free_event(evt); | ||
| 1934 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 1935 | job->reply->result = rc; | ||
| 1936 | job->job_done(job); | ||
| 1937 | rc = 0; | ||
| 1938 | out: | ||
| 1939 | dma_unmap_sg(vhost->dev, job->request_payload.sg_list, | ||
| 1940 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
| 1941 | dma_unmap_sg(vhost->dev, job->reply_payload.sg_list, | ||
| 1942 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
| 1943 | mutex_unlock(&vhost->passthru_mutex); | ||
| 1944 | LEAVE; | ||
| 1945 | return rc; | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | /** | ||
| 1681 | * ibmvfc_reset_device - Reset the device with the specified reset type | 1949 | * ibmvfc_reset_device - Reset the device with the specified reset type |
| 1682 | * @sdev: scsi device to reset | 1950 | * @sdev: scsi device to reset |
| 1683 | * @type: reset type | 1951 | * @type: reset type |
| @@ -1731,7 +1999,10 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) | |||
| 1731 | sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); | 1999 | sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); |
| 1732 | wait_for_completion(&evt->comp); | 2000 | wait_for_completion(&evt->comp); |
| 1733 | 2001 | ||
| 1734 | if (rsp_iu.cmd.status) { | 2002 | if (rsp_iu.cmd.status) |
| 2003 | rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); | ||
| 2004 | |||
| 2005 | if (rsp_code) { | ||
| 1735 | if (fc_rsp->flags & FCP_RSP_LEN_VALID) | 2006 | if (fc_rsp->flags & FCP_RSP_LEN_VALID) |
| 1736 | rsp_code = fc_rsp->data.info.rsp_code; | 2007 | rsp_code = fc_rsp->data.info.rsp_code; |
| 1737 | 2008 | ||
| @@ -1820,7 +2091,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) | |||
| 1820 | sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); | 2091 | sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); |
| 1821 | wait_for_completion(&evt->comp); | 2092 | wait_for_completion(&evt->comp); |
| 1822 | 2093 | ||
| 1823 | if (rsp_iu.cmd.status) { | 2094 | if (rsp_iu.cmd.status) |
| 2095 | rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); | ||
| 2096 | |||
| 2097 | if (rsp_code) { | ||
| 1824 | if (fc_rsp->flags & FCP_RSP_LEN_VALID) | 2098 | if (fc_rsp->flags & FCP_RSP_LEN_VALID) |
| 1825 | rsp_code = fc_rsp->data.info.rsp_code; | 2099 | rsp_code = fc_rsp->data.info.rsp_code; |
| 1826 | 2100 | ||
| @@ -2061,12 +2335,24 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 2061 | } | 2335 | } |
| 2062 | 2336 | ||
| 2063 | /** | 2337 | /** |
| 2064 | * ibmvfc_dev_cancel_all - Device iterated cancel all function | 2338 | * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function |
| 2065 | * @sdev: scsi device struct | 2339 | * @sdev: scsi device struct |
| 2066 | * @data: return code | 2340 | * @data: return code |
| 2067 | * | 2341 | * |
| 2068 | **/ | 2342 | **/ |
| 2069 | static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data) | 2343 | static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data) |
| 2344 | { | ||
| 2345 | unsigned long *rc = data; | ||
| 2346 | *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); | ||
| 2347 | } | ||
| 2348 | |||
| 2349 | /** | ||
| 2350 | * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function | ||
| 2351 | * @sdev: scsi device struct | ||
| 2352 | * @data: return code | ||
| 2353 | * | ||
| 2354 | **/ | ||
| 2355 | static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) | ||
| 2070 | { | 2356 | { |
| 2071 | unsigned long *rc = data; | 2357 | unsigned long *rc = data; |
| 2072 | *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); | 2358 | *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); |
| @@ -2102,7 +2388,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) | |||
| 2102 | 2388 | ||
| 2103 | ENTER; | 2389 | ENTER; |
| 2104 | ibmvfc_wait_while_resetting(vhost); | 2390 | ibmvfc_wait_while_resetting(vhost); |
| 2105 | starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); | 2391 | starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); |
| 2106 | reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); | 2392 | reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); |
| 2107 | 2393 | ||
| 2108 | if (!cancel_rc && !reset_rc) | 2394 | if (!cancel_rc && !reset_rc) |
| @@ -2144,7 +2430,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) | |||
| 2144 | int rc = FAILED; | 2430 | int rc = FAILED; |
| 2145 | 2431 | ||
| 2146 | ENTER; | 2432 | ENTER; |
| 2147 | starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); | 2433 | starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts); |
| 2148 | starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); | 2434 | starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); |
| 2149 | 2435 | ||
| 2150 | if (!cancel_rc && !abort_rc) | 2436 | if (!cancel_rc && !abort_rc) |
| @@ -2297,13 +2583,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | |||
| 2297 | /* Send back a response */ | 2583 | /* Send back a response */ |
| 2298 | rc = ibmvfc_send_crq_init_complete(vhost); | 2584 | rc = ibmvfc_send_crq_init_complete(vhost); |
| 2299 | if (rc == 0) | 2585 | if (rc == 0) |
| 2300 | ibmvfc_init_host(vhost, 0); | 2586 | ibmvfc_init_host(vhost); |
| 2301 | else | 2587 | else |
| 2302 | dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); | 2588 | dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); |
| 2303 | break; | 2589 | break; |
| 2304 | case IBMVFC_CRQ_INIT_COMPLETE: | 2590 | case IBMVFC_CRQ_INIT_COMPLETE: |
| 2305 | dev_info(vhost->dev, "Partner initialization complete\n"); | 2591 | dev_info(vhost->dev, "Partner initialization complete\n"); |
| 2306 | ibmvfc_init_host(vhost, 0); | 2592 | ibmvfc_init_host(vhost); |
| 2307 | break; | 2593 | break; |
| 2308 | default: | 2594 | default: |
| 2309 | dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); | 2595 | dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); |
| @@ -2478,12 +2764,17 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) | |||
| 2478 | * ibmvfc_change_queue_depth - Change the device's queue depth | 2764 | * ibmvfc_change_queue_depth - Change the device's queue depth |
| 2479 | * @sdev: scsi device struct | 2765 | * @sdev: scsi device struct |
| 2480 | * @qdepth: depth to set | 2766 | * @qdepth: depth to set |
| 2767 | * @reason: calling context | ||
| 2481 | * | 2768 | * |
| 2482 | * Return value: | 2769 | * Return value: |
| 2483 | * actual depth set | 2770 | * actual depth set |
| 2484 | **/ | 2771 | **/ |
| 2485 | static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) | 2772 | static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, |
| 2773 | int reason) | ||
| 2486 | { | 2774 | { |
| 2775 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 2776 | return -EOPNOTSUPP; | ||
| 2777 | |||
| 2487 | if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) | 2778 | if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) |
| 2488 | qdepth = IBMVFC_MAX_CMDS_PER_LUN; | 2779 | qdepth = IBMVFC_MAX_CMDS_PER_LUN; |
| 2489 | 2780 | ||
| @@ -3725,7 +4016,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) | |||
| 3725 | case IBMVFC_MAD_SUCCESS: | 4016 | case IBMVFC_MAD_SUCCESS: |
| 3726 | if (list_empty(&vhost->sent) && | 4017 | if (list_empty(&vhost->sent) && |
| 3727 | vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { | 4018 | vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { |
| 3728 | ibmvfc_init_host(vhost, 0); | 4019 | ibmvfc_init_host(vhost); |
| 3729 | return; | 4020 | return; |
| 3730 | } | 4021 | } |
| 3731 | break; | 4022 | break; |
| @@ -3903,6 +4194,8 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | |||
| 3903 | rport->supported_classes |= FC_COS_CLASS2; | 4194 | rport->supported_classes |= FC_COS_CLASS2; |
| 3904 | if (tgt->service_parms.class3_parms[0] & 0x80000000) | 4195 | if (tgt->service_parms.class3_parms[0] & 0x80000000) |
| 3905 | rport->supported_classes |= FC_COS_CLASS3; | 4196 | rport->supported_classes |= FC_COS_CLASS3; |
| 4197 | if (rport->rqst_q) | ||
| 4198 | blk_queue_max_hw_segments(rport->rqst_q, 1); | ||
| 3906 | } else | 4199 | } else |
| 3907 | tgt_dbg(tgt, "rport add failed\n"); | 4200 | tgt_dbg(tgt, "rport add failed\n"); |
| 3908 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 4201 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
| @@ -4342,6 +4635,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 4342 | init_waitqueue_head(&vhost->work_wait_q); | 4635 | init_waitqueue_head(&vhost->work_wait_q); |
| 4343 | init_waitqueue_head(&vhost->init_wait_q); | 4636 | init_waitqueue_head(&vhost->init_wait_q); |
| 4344 | INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); | 4637 | INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); |
| 4638 | mutex_init(&vhost->passthru_mutex); | ||
| 4345 | 4639 | ||
| 4346 | if ((rc = ibmvfc_alloc_mem(vhost))) | 4640 | if ((rc = ibmvfc_alloc_mem(vhost))) |
| 4347 | goto free_scsi_host; | 4641 | goto free_scsi_host; |
| @@ -4374,6 +4668,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 4374 | goto remove_shost; | 4668 | goto remove_shost; |
| 4375 | } | 4669 | } |
| 4376 | 4670 | ||
| 4671 | if (shost_to_fc_host(shost)->rqst_q) | ||
| 4672 | blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); | ||
| 4377 | dev_set_drvdata(dev, vhost); | 4673 | dev_set_drvdata(dev, vhost); |
| 4378 | spin_lock(&ibmvfc_driver_lock); | 4674 | spin_lock(&ibmvfc_driver_lock); |
| 4379 | list_add_tail(&vhost->queue, &ibmvfc_head); | 4675 | list_add_tail(&vhost->queue, &ibmvfc_head); |
| @@ -4414,7 +4710,11 @@ static int ibmvfc_remove(struct vio_dev *vdev) | |||
| 4414 | 4710 | ||
| 4415 | ENTER; | 4711 | ENTER; |
| 4416 | ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); | 4712 | ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); |
| 4713 | |||
| 4714 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 4417 | ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); | 4715 | ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); |
| 4716 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 4717 | |||
| 4418 | ibmvfc_wait_while_resetting(vhost); | 4718 | ibmvfc_wait_while_resetting(vhost); |
| 4419 | ibmvfc_release_crq_queue(vhost); | 4719 | ibmvfc_release_crq_queue(vhost); |
| 4420 | kthread_stop(vhost->work_thread); | 4720 | kthread_stop(vhost->work_thread); |
| @@ -4498,6 +4798,9 @@ static struct fc_function_template ibmvfc_transport_functions = { | |||
| 4498 | 4798 | ||
| 4499 | .get_starget_port_id = ibmvfc_get_starget_port_id, | 4799 | .get_starget_port_id = ibmvfc_get_starget_port_id, |
| 4500 | .show_starget_port_id = 1, | 4800 | .show_starget_port_id = 1, |
| 4801 | |||
| 4802 | .bsg_request = ibmvfc_bsg_request, | ||
| 4803 | .bsg_timeout = ibmvfc_bsg_timeout, | ||
| 4501 | }; | 4804 | }; |
| 4502 | 4805 | ||
| 4503 | /** | 4806 | /** |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 007fa1c9ef14..d25106a958d7 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
| @@ -29,8 +29,8 @@ | |||
| 29 | #include "viosrp.h" | 29 | #include "viosrp.h" |
| 30 | 30 | ||
| 31 | #define IBMVFC_NAME "ibmvfc" | 31 | #define IBMVFC_NAME "ibmvfc" |
| 32 | #define IBMVFC_DRIVER_VERSION "1.0.6" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.7" |
| 33 | #define IBMVFC_DRIVER_DATE "(May 28, 2009)" | 33 | #define IBMVFC_DRIVER_DATE "(October 16, 2009)" |
| 34 | 34 | ||
| 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
| 36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 | 36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 |
| @@ -58,9 +58,10 @@ | |||
| 58 | * 1 for ERP | 58 | * 1 for ERP |
| 59 | * 1 for initialization | 59 | * 1 for initialization |
| 60 | * 1 for NPIV Logout | 60 | * 1 for NPIV Logout |
| 61 | * 2 for BSG passthru | ||
| 61 | * 2 for each discovery thread | 62 | * 2 for each discovery thread |
| 62 | */ | 63 | */ |
| 63 | #define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) | 64 | #define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2)) |
| 64 | 65 | ||
| 65 | #define IBMVFC_MAD_SUCCESS 0x00 | 66 | #define IBMVFC_MAD_SUCCESS 0x00 |
| 66 | #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 | 67 | #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 |
| @@ -466,7 +467,10 @@ struct ibmvfc_passthru_iu { | |||
| 466 | u16 error; | 467 | u16 error; |
| 467 | u32 flags; | 468 | u32 flags; |
| 468 | #define IBMVFC_FC_ELS 0x01 | 469 | #define IBMVFC_FC_ELS 0x01 |
| 470 | #define IBMVFC_FC_CT_IU 0x02 | ||
| 469 | u32 cancel_key; | 471 | u32 cancel_key; |
| 472 | #define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000 | ||
| 473 | #define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001 | ||
| 470 | u32 reserved; | 474 | u32 reserved; |
| 471 | struct srp_direct_buf cmd; | 475 | struct srp_direct_buf cmd; |
| 472 | struct srp_direct_buf rsp; | 476 | struct srp_direct_buf rsp; |
| @@ -693,6 +697,7 @@ struct ibmvfc_host { | |||
| 693 | int disc_buf_sz; | 697 | int disc_buf_sz; |
| 694 | int log_level; | 698 | int log_level; |
| 695 | struct ibmvfc_discover_targets_buf *disc_buf; | 699 | struct ibmvfc_discover_targets_buf *disc_buf; |
| 700 | struct mutex passthru_mutex; | ||
| 696 | int task_set; | 701 | int task_set; |
| 697 | int init_retries; | 702 | int init_retries; |
| 698 | int discovery_threads; | 703 | int discovery_threads; |
| @@ -702,6 +707,7 @@ struct ibmvfc_host { | |||
| 702 | int delay_init; | 707 | int delay_init; |
| 703 | int scan_complete; | 708 | int scan_complete; |
| 704 | int logged_in; | 709 | int logged_in; |
| 710 | int aborting_passthru; | ||
| 705 | int events_to_log; | 711 | int events_to_log; |
| 706 | #define IBMVFC_AE_LINKUP 0x0001 | 712 | #define IBMVFC_AE_LINKUP 0x0001 |
| 707 | #define IBMVFC_AE_LINKDOWN 0x0002 | 713 | #define IBMVFC_AE_LINKDOWN 0x0002 |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d9b0e9d31983..e475b7957c2d 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
| @@ -1637,12 +1637,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
| 1637 | * ibmvscsi_change_queue_depth - Change the device's queue depth | 1637 | * ibmvscsi_change_queue_depth - Change the device's queue depth |
| 1638 | * @sdev: scsi device struct | 1638 | * @sdev: scsi device struct |
| 1639 | * @qdepth: depth to set | 1639 | * @qdepth: depth to set |
| 1640 | * @reason: calling context | ||
| 1640 | * | 1641 | * |
| 1641 | * Return value: | 1642 | * Return value: |
| 1642 | * actual depth set | 1643 | * actual depth set |
| 1643 | **/ | 1644 | **/ |
| 1644 | static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) | 1645 | static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth, |
| 1646 | int reason) | ||
| 1645 | { | 1647 | { |
| 1648 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 1649 | return -EOPNOTSUPP; | ||
| 1650 | |||
| 1646 | if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) | 1651 | if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) |
| 1647 | qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; | 1652 | qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; |
| 1648 | 1653 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 76d294fc7846..8643f5089361 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1333 | 1333 | ||
| 1334 | error = &hostrcb->hcam.u.error.u.type_17_error; | 1334 | error = &hostrcb->hcam.u.error.u.type_17_error; |
| 1335 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | 1335 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; |
| 1336 | strstrip(error->failure_reason); | 1336 | strim(error->failure_reason); |
| 1337 | 1337 | ||
| 1338 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, | 1338 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, |
| 1339 | be32_to_cpu(hostrcb->hcam.u.error.prc)); | 1339 | be32_to_cpu(hostrcb->hcam.u.error.prc)); |
| @@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1359 | 1359 | ||
| 1360 | error = &hostrcb->hcam.u.error.u.type_07_error; | 1360 | error = &hostrcb->hcam.u.error.u.type_07_error; |
| 1361 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | 1361 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; |
| 1362 | strstrip(error->failure_reason); | 1362 | strim(error->failure_reason); |
| 1363 | 1363 | ||
| 1364 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, | 1364 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, |
| 1365 | be32_to_cpu(hostrcb->hcam.u.error.prc)); | 1365 | be32_to_cpu(hostrcb->hcam.u.error.prc)); |
| @@ -3367,16 +3367,21 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; | |||
| 3367 | * ipr_change_queue_depth - Change the device's queue depth | 3367 | * ipr_change_queue_depth - Change the device's queue depth |
| 3368 | * @sdev: scsi device struct | 3368 | * @sdev: scsi device struct |
| 3369 | * @qdepth: depth to set | 3369 | * @qdepth: depth to set |
| 3370 | * @reason: calling context | ||
| 3370 | * | 3371 | * |
| 3371 | * Return value: | 3372 | * Return value: |
| 3372 | * actual depth set | 3373 | * actual depth set |
| 3373 | **/ | 3374 | **/ |
| 3374 | static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) | 3375 | static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, |
| 3376 | int reason) | ||
| 3375 | { | 3377 | { |
| 3376 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; | 3378 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; |
| 3377 | struct ipr_resource_entry *res; | 3379 | struct ipr_resource_entry *res; |
| 3378 | unsigned long lock_flags = 0; | 3380 | unsigned long lock_flags = 0; |
| 3379 | 3381 | ||
| 3382 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 3383 | return -EOPNOTSUPP; | ||
| 3384 | |||
| 3380 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 3385 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
| 3381 | res = (struct ipr_resource_entry *)sdev->hostdata; | 3386 | res = (struct ipr_resource_entry *)sdev->hostdata; |
| 3382 | 3387 | ||
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index edc49ca49cea..517da3fd89d3 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
| @@ -903,7 +903,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { | |||
| 903 | ISCSI_USERNAME | ISCSI_PASSWORD | | 903 | ISCSI_USERNAME | ISCSI_PASSWORD | |
| 904 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 904 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
| 905 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 905 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
| 906 | ISCSI_LU_RESET_TMO | | 906 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | |
| 907 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | 907 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
| 908 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | 908 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, |
| 909 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | 909 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | |
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile index 55f982de3a9a..4bb23ac86a5c 100644 --- a/drivers/scsi/libfc/Makefile +++ b/drivers/scsi/libfc/Makefile | |||
| @@ -3,10 +3,12 @@ | |||
| 3 | obj-$(CONFIG_LIBFC) += libfc.o | 3 | obj-$(CONFIG_LIBFC) += libfc.o |
| 4 | 4 | ||
| 5 | libfc-objs := \ | 5 | libfc-objs := \ |
| 6 | fc_libfc.o \ | ||
| 6 | fc_disc.o \ | 7 | fc_disc.o \ |
| 7 | fc_exch.o \ | 8 | fc_exch.o \ |
| 8 | fc_elsct.o \ | 9 | fc_elsct.o \ |
| 9 | fc_frame.o \ | 10 | fc_frame.o \ |
| 10 | fc_lport.o \ | 11 | fc_lport.o \ |
| 11 | fc_rport.o \ | 12 | fc_rport.o \ |
| 12 | fc_fcp.o | 13 | fc_fcp.o \ |
| 14 | fc_npiv.o | ||
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index c48799e9dd8e..9b0a5192a965 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
| @@ -40,6 +40,8 @@ | |||
| 40 | 40 | ||
| 41 | #include <scsi/libfc.h> | 41 | #include <scsi/libfc.h> |
| 42 | 42 | ||
| 43 | #include "fc_libfc.h" | ||
| 44 | |||
| 43 | #define FC_DISC_RETRY_LIMIT 3 /* max retries */ | 45 | #define FC_DISC_RETRY_LIMIT 3 /* max retries */ |
| 44 | #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ | 46 | #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ |
| 45 | 47 | ||
| @@ -51,8 +53,8 @@ static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); | |||
| 51 | static void fc_disc_restart(struct fc_disc *); | 53 | static void fc_disc_restart(struct fc_disc *); |
| 52 | 54 | ||
| 53 | /** | 55 | /** |
| 54 | * fc_disc_stop_rports() - delete all the remote ports associated with the lport | 56 | * fc_disc_stop_rports() - Delete all the remote ports associated with the lport |
| 55 | * @disc: The discovery job to stop rports on | 57 | * @disc: The discovery job to stop remote ports on |
| 56 | * | 58 | * |
| 57 | * Locking Note: This function expects that the lport mutex is locked before | 59 | * Locking Note: This function expects that the lport mutex is locked before |
| 58 | * calling it. | 60 | * calling it. |
| @@ -72,9 +74,9 @@ void fc_disc_stop_rports(struct fc_disc *disc) | |||
| 72 | 74 | ||
| 73 | /** | 75 | /** |
| 74 | * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) | 76 | * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) |
| 75 | * @sp: Current sequence of the RSCN exchange | 77 | * @sp: The sequence of the RSCN exchange |
| 76 | * @fp: RSCN Frame | 78 | * @fp: The RSCN frame |
| 77 | * @lport: Fibre Channel host port instance | 79 | * @lport: The local port that the request will be sent on |
| 78 | * | 80 | * |
| 79 | * Locking Note: This function expects that the disc_mutex is locked | 81 | * Locking Note: This function expects that the disc_mutex is locked |
| 80 | * before it is called. | 82 | * before it is called. |
| @@ -183,9 +185,9 @@ reject: | |||
| 183 | 185 | ||
| 184 | /** | 186 | /** |
| 185 | * fc_disc_recv_req() - Handle incoming requests | 187 | * fc_disc_recv_req() - Handle incoming requests |
| 186 | * @sp: Current sequence of the request exchange | 188 | * @sp: The sequence of the request exchange |
| 187 | * @fp: The frame | 189 | * @fp: The request frame |
| 188 | * @lport: The FC local port | 190 | * @lport: The local port receiving the request |
| 189 | * | 191 | * |
| 190 | * Locking Note: This function is called from the EM and will lock | 192 | * Locking Note: This function is called from the EM and will lock |
| 191 | * the disc_mutex before calling the handler for the | 193 | * the disc_mutex before calling the handler for the |
| @@ -213,7 +215,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
| 213 | 215 | ||
| 214 | /** | 216 | /** |
| 215 | * fc_disc_restart() - Restart discovery | 217 | * fc_disc_restart() - Restart discovery |
| 216 | * @lport: FC discovery context | 218 | * @disc: The discovery object to be restarted |
| 217 | * | 219 | * |
| 218 | * Locking Note: This function expects that the disc mutex | 220 | * Locking Note: This function expects that the disc mutex |
| 219 | * is already locked. | 221 | * is already locked. |
| @@ -240,9 +242,9 @@ static void fc_disc_restart(struct fc_disc *disc) | |||
| 240 | } | 242 | } |
| 241 | 243 | ||
| 242 | /** | 244 | /** |
| 243 | * fc_disc_start() - Fibre Channel Target discovery | 245 | * fc_disc_start() - Start discovery on a local port |
| 244 | * @lport: FC local port | 246 | * @lport: The local port to have discovery started on |
| 245 | * @disc_callback: function to be called when discovery is complete | 247 | * @disc_callback: Callback function to be called when discovery is complete |
| 246 | */ | 248 | */ |
| 247 | static void fc_disc_start(void (*disc_callback)(struct fc_lport *, | 249 | static void fc_disc_start(void (*disc_callback)(struct fc_lport *, |
| 248 | enum fc_disc_event), | 250 | enum fc_disc_event), |
| @@ -263,8 +265,8 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, | |||
| 263 | 265 | ||
| 264 | /** | 266 | /** |
| 265 | * fc_disc_done() - Discovery has been completed | 267 | * fc_disc_done() - Discovery has been completed |
| 266 | * @disc: FC discovery context | 268 | * @disc: The discovery context |
| 267 | * @event: discovery completion status | 269 | * @event: The discovery completion status |
| 268 | * | 270 | * |
| 269 | * Locking Note: This function expects that the disc mutex is locked before | 271 | * Locking Note: This function expects that the disc mutex is locked before |
| 270 | * it is called. The discovery callback is then made with the lock released, | 272 | * it is called. The discovery callback is then made with the lock released, |
| @@ -284,8 +286,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) | |||
| 284 | } | 286 | } |
| 285 | 287 | ||
| 286 | /* | 288 | /* |
| 287 | * Go through all remote ports. If they were found in the latest | 289 | * Go through all remote ports. If they were found in the latest |
| 288 | * discovery, reverify or log them in. Otherwise, log them out. | 290 | * discovery, reverify or log them in. Otherwise, log them out. |
| 289 | * Skip ports which were never discovered. These are the dNS port | 291 | * Skip ports which were never discovered. These are the dNS port |
| 290 | * and ports which were created by PLOGI. | 292 | * and ports which were created by PLOGI. |
| 291 | */ | 293 | */ |
| @@ -305,8 +307,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) | |||
| 305 | 307 | ||
| 306 | /** | 308 | /** |
| 307 | * fc_disc_error() - Handle error on dNS request | 309 | * fc_disc_error() - Handle error on dNS request |
| 308 | * @disc: FC discovery context | 310 | * @disc: The discovery context |
| 309 | * @fp: The frame pointer | 311 | * @fp: The error code encoded as a frame pointer |
| 310 | */ | 312 | */ |
| 311 | static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | 313 | static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) |
| 312 | { | 314 | { |
| @@ -342,7 +344,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | |||
| 342 | 344 | ||
| 343 | /** | 345 | /** |
| 344 | * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request | 346 | * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request |
| 345 | * @lport: FC discovery context | 347 | * @lport: The discovery context |
| 346 | * | 348 | * |
| 347 | * Locking Note: This function expects that the disc_mutex is locked | 349 | * Locking Note: This function expects that the disc_mutex is locked |
| 348 | * before it is called. | 350 | * before it is called. |
| @@ -368,17 +370,17 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) | |||
| 368 | if (lport->tt.elsct_send(lport, 0, fp, | 370 | if (lport->tt.elsct_send(lport, 0, fp, |
| 369 | FC_NS_GPN_FT, | 371 | FC_NS_GPN_FT, |
| 370 | fc_disc_gpn_ft_resp, | 372 | fc_disc_gpn_ft_resp, |
| 371 | disc, lport->e_d_tov)) | 373 | disc, 3 * lport->r_a_tov)) |
| 372 | return; | 374 | return; |
| 373 | err: | 375 | err: |
| 374 | fc_disc_error(disc, fp); | 376 | fc_disc_error(disc, NULL); |
| 375 | } | 377 | } |
| 376 | 378 | ||
| 377 | /** | 379 | /** |
| 378 | * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. | 380 | * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. |
| 379 | * @lport: Fibre Channel host port instance | 381 | * @lport: The local port the GPN_FT was received on |
| 380 | * @buf: GPN_FT response buffer | 382 | * @buf: The GPN_FT response buffer |
| 381 | * @len: size of response buffer | 383 | * @len: The size of response buffer |
| 382 | * | 384 | * |
| 383 | * Goes through the list of IDs and names resulting from a request. | 385 | * Goes through the list of IDs and names resulting from a request. |
| 384 | */ | 386 | */ |
| @@ -477,10 +479,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
| 477 | } | 479 | } |
| 478 | 480 | ||
| 479 | /** | 481 | /** |
| 480 | * fc_disc_timeout() - Retry handler for the disc component | 482 | * fc_disc_timeout() - Handler for discovery timeouts |
| 481 | * @work: Structure holding disc obj that needs retry discovery | 483 | * @work: Structure holding discovery context that needs to retry discovery |
| 482 | * | ||
| 483 | * Handle retry of memory allocation for remote ports. | ||
| 484 | */ | 484 | */ |
| 485 | static void fc_disc_timeout(struct work_struct *work) | 485 | static void fc_disc_timeout(struct work_struct *work) |
| 486 | { | 486 | { |
| @@ -494,9 +494,9 @@ static void fc_disc_timeout(struct work_struct *work) | |||
| 494 | 494 | ||
| 495 | /** | 495 | /** |
| 496 | * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) | 496 | * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) |
| 497 | * @sp: Current sequence of GPN_FT exchange | 497 | * @sp: The sequence that the GPN_FT response was received on |
| 498 | * @fp: response frame | 498 | * @fp: The GPN_FT response frame |
| 499 | * @lp_arg: Fibre Channel host port instance | 499 | * @lp_arg: The discovery context |
| 500 | * | 500 | * |
| 501 | * Locking Note: This function is called without disc mutex held, and | 501 | * Locking Note: This function is called without disc mutex held, and |
| 502 | * should do all its processing with the mutex held | 502 | * should do all its processing with the mutex held |
| @@ -567,9 +567,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 567 | 567 | ||
| 568 | /** | 568 | /** |
| 569 | * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) | 569 | * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) |
| 570 | * @sp: exchange sequence | 570 | * @sp: The sequence the GPN_ID is on |
| 571 | * @fp: response frame | 571 | * @fp: The response frame |
| 572 | * @rdata_arg: remote port private data | 572 | * @rdata_arg: The remote port that sent the GPN_ID response |
| 573 | * | 573 | * |
| 574 | * Locking Note: This function is called without disc mutex held. | 574 | * Locking Note: This function is called without disc mutex held. |
| 575 | */ | 575 | */ |
| @@ -637,7 +637,7 @@ out: | |||
| 637 | 637 | ||
| 638 | /** | 638 | /** |
| 639 | * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request | 639 | * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request |
| 640 | * @lport: local port | 640 | * @lport: The local port to initiate discovery on |
| 641 | * @rdata: remote port private data | 641 | * @rdata: remote port private data |
| 642 | * | 642 | * |
| 643 | * Locking Note: This function expects that the disc_mutex is locked | 643 | * Locking Note: This function expects that the disc_mutex is locked |
| @@ -654,7 +654,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, | |||
| 654 | if (!fp) | 654 | if (!fp) |
| 655 | return -ENOMEM; | 655 | return -ENOMEM; |
| 656 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, | 656 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, |
| 657 | fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) | 657 | fc_disc_gpn_id_resp, rdata, |
| 658 | 3 * lport->r_a_tov)) | ||
| 658 | return -ENOMEM; | 659 | return -ENOMEM; |
| 659 | kref_get(&rdata->kref); | 660 | kref_get(&rdata->kref); |
| 660 | return 0; | 661 | return 0; |
| @@ -662,8 +663,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, | |||
| 662 | 663 | ||
| 663 | /** | 664 | /** |
| 664 | * fc_disc_single() - Discover the directory information for a single target | 665 | * fc_disc_single() - Discover the directory information for a single target |
| 665 | * @lport: local port | 666 | * @lport: The local port the remote port is associated with |
| 666 | * @dp: The port to rediscover | 667 | * @dp: The port to rediscover |
| 667 | * | 668 | * |
| 668 | * Locking Note: This function expects that the disc_mutex is locked | 669 | * Locking Note: This function expects that the disc_mutex is locked |
| 669 | * before it is called. | 670 | * before it is called. |
| @@ -681,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) | |||
| 681 | 682 | ||
| 682 | /** | 683 | /** |
| 683 | * fc_disc_stop() - Stop discovery for a given lport | 684 | * fc_disc_stop() - Stop discovery for a given lport |
| 684 | * @lport: The lport that discovery should stop for | 685 | * @lport: The local port that discovery should stop on |
| 685 | */ | 686 | */ |
| 686 | void fc_disc_stop(struct fc_lport *lport) | 687 | void fc_disc_stop(struct fc_lport *lport) |
| 687 | { | 688 | { |
| @@ -695,7 +696,7 @@ void fc_disc_stop(struct fc_lport *lport) | |||
| 695 | 696 | ||
| 696 | /** | 697 | /** |
| 697 | * fc_disc_stop_final() - Stop discovery for a given lport | 698 | * fc_disc_stop_final() - Stop discovery for a given lport |
| 698 | * @lport: The lport that discovery should stop for | 699 | * @lport: The lport that discovery should stop on |
| 699 | * | 700 | * |
| 700 | * This function will block until discovery has been | 701 | * This function will block until discovery has been |
| 701 | * completely stopped and all rports have been deleted. | 702 | * completely stopped and all rports have been deleted. |
| @@ -707,8 +708,8 @@ void fc_disc_stop_final(struct fc_lport *lport) | |||
| 707 | } | 708 | } |
| 708 | 709 | ||
| 709 | /** | 710 | /** |
| 710 | * fc_disc_init() - Initialize the discovery block | 711 | * fc_disc_init() - Initialize the discovery layer for a local port |
| 711 | * @lport: FC local port | 712 | * @lport: The local port that needs the discovery layer to be initialized |
| 712 | */ | 713 | */ |
| 713 | int fc_disc_init(struct fc_lport *lport) | 714 | int fc_disc_init(struct fc_lport *lport) |
| 714 | { | 715 | { |
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5cfa68732e9d..53748724f2c5 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c | |||
| @@ -28,17 +28,22 @@ | |||
| 28 | #include <scsi/libfc.h> | 28 | #include <scsi/libfc.h> |
| 29 | #include <scsi/fc_encode.h> | 29 | #include <scsi/fc_encode.h> |
| 30 | 30 | ||
| 31 | /* | 31 | /** |
| 32 | * fc_elsct_send - sends ELS/CT frame | 32 | * fc_elsct_send() - Send an ELS or CT frame |
| 33 | * @lport: The local port to send the frame on | ||
| 34 | * @did: The destination ID for the frame | ||
| 35 | * @fp: The frame to be sent | ||
| 36 | * @op: The operational code | ||
| 37 | * @resp: The callback routine when the response is received | ||
| 38 | * @arg: The argument to pass to the response callback routine | ||
| 39 | * @timer_msec: The timeout period for the frame (in msecs) | ||
| 33 | */ | 40 | */ |
| 34 | static struct fc_seq *fc_elsct_send(struct fc_lport *lport, | 41 | struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, |
| 35 | u32 did, | 42 | struct fc_frame *fp, unsigned int op, |
| 36 | struct fc_frame *fp, | 43 | void (*resp)(struct fc_seq *, |
| 37 | unsigned int op, | 44 | struct fc_frame *, |
| 38 | void (*resp)(struct fc_seq *, | 45 | void *), |
| 39 | struct fc_frame *fp, | 46 | void *arg, u32 timer_msec) |
| 40 | void *arg), | ||
| 41 | void *arg, u32 timer_msec) | ||
| 42 | { | 47 | { |
| 43 | enum fc_rctl r_ctl; | 48 | enum fc_rctl r_ctl; |
| 44 | enum fc_fh_type fh_type; | 49 | enum fc_fh_type fh_type; |
| @@ -53,15 +58,22 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, | |||
| 53 | did = FC_FID_DIR_SERV; | 58 | did = FC_FID_DIR_SERV; |
| 54 | } | 59 | } |
| 55 | 60 | ||
| 56 | if (rc) | 61 | if (rc) { |
| 62 | fc_frame_free(fp); | ||
| 57 | return NULL; | 63 | return NULL; |
| 64 | } | ||
| 58 | 65 | ||
| 59 | fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, | 66 | fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, |
| 60 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 67 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
| 61 | 68 | ||
| 62 | return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); | 69 | return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); |
| 63 | } | 70 | } |
| 71 | EXPORT_SYMBOL(fc_elsct_send); | ||
| 64 | 72 | ||
| 73 | /** | ||
| 74 | * fc_elsct_init() - Initialize the ELS/CT layer | ||
| 75 | * @lport: The local port to initialize the ELS/CT layer for | ||
| 76 | */ | ||
| 65 | int fc_elsct_init(struct fc_lport *lport) | 77 | int fc_elsct_init(struct fc_lport *lport) |
| 66 | { | 78 | { |
| 67 | if (!lport->tt.elsct_send) | 79 | if (!lport->tt.elsct_send) |
| @@ -72,12 +84,15 @@ int fc_elsct_init(struct fc_lport *lport) | |||
| 72 | EXPORT_SYMBOL(fc_elsct_init); | 84 | EXPORT_SYMBOL(fc_elsct_init); |
| 73 | 85 | ||
| 74 | /** | 86 | /** |
| 75 | * fc_els_resp_type() - return string describing ELS response for debug. | 87 | * fc_els_resp_type() - Return a string describing the ELS response |
| 76 | * @fp: frame pointer with possible error code. | 88 | * @fp: The frame pointer or possible error code |
| 77 | */ | 89 | */ |
| 78 | const char *fc_els_resp_type(struct fc_frame *fp) | 90 | const char *fc_els_resp_type(struct fc_frame *fp) |
| 79 | { | 91 | { |
| 80 | const char *msg; | 92 | const char *msg; |
| 93 | struct fc_frame_header *fh; | ||
| 94 | struct fc_ct_hdr *ct; | ||
| 95 | |||
| 81 | if (IS_ERR(fp)) { | 96 | if (IS_ERR(fp)) { |
| 82 | switch (-PTR_ERR(fp)) { | 97 | switch (-PTR_ERR(fp)) { |
| 83 | case FC_NO_ERR: | 98 | case FC_NO_ERR: |
| @@ -94,15 +109,41 @@ const char *fc_els_resp_type(struct fc_frame *fp) | |||
| 94 | break; | 109 | break; |
| 95 | } | 110 | } |
| 96 | } else { | 111 | } else { |
| 97 | switch (fc_frame_payload_op(fp)) { | 112 | fh = fc_frame_header_get(fp); |
| 98 | case ELS_LS_ACC: | 113 | switch (fh->fh_type) { |
| 99 | msg = "accept"; | 114 | case FC_TYPE_ELS: |
| 115 | switch (fc_frame_payload_op(fp)) { | ||
| 116 | case ELS_LS_ACC: | ||
| 117 | msg = "accept"; | ||
| 118 | break; | ||
| 119 | case ELS_LS_RJT: | ||
| 120 | msg = "reject"; | ||
| 121 | break; | ||
| 122 | default: | ||
| 123 | msg = "response unknown ELS"; | ||
| 124 | break; | ||
| 125 | } | ||
| 100 | break; | 126 | break; |
| 101 | case ELS_LS_RJT: | 127 | case FC_TYPE_CT: |
| 102 | msg = "reject"; | 128 | ct = fc_frame_payload_get(fp, sizeof(*ct)); |
| 129 | if (ct) { | ||
| 130 | switch (ntohs(ct->ct_cmd)) { | ||
| 131 | case FC_FS_ACC: | ||
| 132 | msg = "CT accept"; | ||
| 133 | break; | ||
| 134 | case FC_FS_RJT: | ||
| 135 | msg = "CT reject"; | ||
| 136 | break; | ||
| 137 | default: | ||
| 138 | msg = "response unknown CT"; | ||
| 139 | break; | ||
| 140 | } | ||
| 141 | } else { | ||
| 142 | msg = "short CT response"; | ||
| 143 | } | ||
| 103 | break; | 144 | break; |
| 104 | default: | 145 | default: |
| 105 | msg = "response unknown ELS"; | 146 | msg = "response not ELS or CT"; |
| 106 | break; | 147 | break; |
| 107 | } | 148 | } |
| 108 | } | 149 | } |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index c1c15748220c..19d711cb938c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
| @@ -32,10 +32,13 @@ | |||
| 32 | #include <scsi/libfc.h> | 32 | #include <scsi/libfc.h> |
| 33 | #include <scsi/fc_encode.h> | 33 | #include <scsi/fc_encode.h> |
| 34 | 34 | ||
| 35 | #include "fc_libfc.h" | ||
| 36 | |||
| 35 | u16 fc_cpu_mask; /* cpu mask for possible cpus */ | 37 | u16 fc_cpu_mask; /* cpu mask for possible cpus */ |
| 36 | EXPORT_SYMBOL(fc_cpu_mask); | 38 | EXPORT_SYMBOL(fc_cpu_mask); |
| 37 | static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ | 39 | static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ |
| 38 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ | 40 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ |
| 41 | struct workqueue_struct *fc_exch_workqueue; | ||
| 39 | 42 | ||
| 40 | /* | 43 | /* |
| 41 | * Structure and function definitions for managing Fibre Channel Exchanges | 44 | * Structure and function definitions for managing Fibre Channel Exchanges |
| @@ -50,35 +53,46 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ | |||
| 50 | * fc_seq holds the state for an individual sequence. | 53 | * fc_seq holds the state for an individual sequence. |
| 51 | */ | 54 | */ |
| 52 | 55 | ||
| 53 | /* | 56 | /** |
| 54 | * Per cpu exchange pool | 57 | * struct fc_exch_pool - Per cpu exchange pool |
| 58 | * @next_index: Next possible free exchange index | ||
| 59 | * @total_exches: Total allocated exchanges | ||
| 60 | * @lock: Exch pool lock | ||
| 61 | * @ex_list: List of exchanges | ||
| 55 | * | 62 | * |
| 56 | * This structure manages per cpu exchanges in array of exchange pointers. | 63 | * This structure manages per cpu exchanges in array of exchange pointers. |
| 57 | * This array is allocated followed by struct fc_exch_pool memory for | 64 | * This array is allocated followed by struct fc_exch_pool memory for |
| 58 | * assigned range of exchanges to per cpu pool. | 65 | * assigned range of exchanges to per cpu pool. |
| 59 | */ | 66 | */ |
| 60 | struct fc_exch_pool { | 67 | struct fc_exch_pool { |
| 61 | u16 next_index; /* next possible free exchange index */ | 68 | u16 next_index; |
| 62 | u16 total_exches; /* total allocated exchanges */ | 69 | u16 total_exches; |
| 63 | spinlock_t lock; /* exch pool lock */ | 70 | spinlock_t lock; |
| 64 | struct list_head ex_list; /* allocated exchanges list */ | 71 | struct list_head ex_list; |
| 65 | }; | 72 | }; |
| 66 | 73 | ||
| 67 | /* | 74 | /** |
| 68 | * Exchange manager. | 75 | * struct fc_exch_mgr - The Exchange Manager (EM). |
| 76 | * @class: Default class for new sequences | ||
| 77 | * @kref: Reference counter | ||
| 78 | * @min_xid: Minimum exchange ID | ||
| 79 | * @max_xid: Maximum exchange ID | ||
| 80 | * @ep_pool: Reserved exchange pointers | ||
| 81 | * @pool_max_index: Max exch array index in exch pool | ||
| 82 | * @pool: Per cpu exch pool | ||
| 83 | * @stats: Statistics structure | ||
| 69 | * | 84 | * |
| 70 | * This structure is the center for creating exchanges and sequences. | 85 | * This structure is the center for creating exchanges and sequences. |
| 71 | * It manages the allocation of exchange IDs. | 86 | * It manages the allocation of exchange IDs. |
| 72 | */ | 87 | */ |
| 73 | struct fc_exch_mgr { | 88 | struct fc_exch_mgr { |
| 74 | enum fc_class class; /* default class for sequences */ | 89 | enum fc_class class; |
| 75 | struct kref kref; /* exchange mgr reference count */ | 90 | struct kref kref; |
| 76 | u16 min_xid; /* min exchange ID */ | 91 | u16 min_xid; |
| 77 | u16 max_xid; /* max exchange ID */ | 92 | u16 max_xid; |
| 78 | struct list_head ex_list; /* allocated exchanges list */ | 93 | mempool_t *ep_pool; |
| 79 | mempool_t *ep_pool; /* reserve ep's */ | 94 | u16 pool_max_index; |
| 80 | u16 pool_max_index; /* max exch array index in exch pool */ | 95 | struct fc_exch_pool *pool; |
| 81 | struct fc_exch_pool *pool; /* per cpu exch pool */ | ||
| 82 | 96 | ||
| 83 | /* | 97 | /* |
| 84 | * currently exchange mgr stats are updated but not used. | 98 | * currently exchange mgr stats are updated but not used. |
| @@ -96,6 +110,18 @@ struct fc_exch_mgr { | |||
| 96 | }; | 110 | }; |
| 97 | #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) | 111 | #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) |
| 98 | 112 | ||
| 113 | /** | ||
| 114 | * struct fc_exch_mgr_anchor - primary structure for list of EMs | ||
| 115 | * @ema_list: Exchange Manager Anchor list | ||
| 116 | * @mp: Exchange Manager associated with this anchor | ||
| 117 | * @match: Routine to determine if this anchor's EM should be used | ||
| 118 | * | ||
| 119 | * When walking the list of anchors the match routine will be called | ||
| 120 | * for each anchor to determine if that EM should be used. The last | ||
| 121 | * anchor in the list will always match to handle any exchanges not | ||
| 122 | * handled by other EMs. The non-default EMs would be added to the | ||
| 123 | * anchor list by HW that provides FCoE offloads. | ||
| 124 | */ | ||
| 99 | struct fc_exch_mgr_anchor { | 125 | struct fc_exch_mgr_anchor { |
| 100 | struct list_head ema_list; | 126 | struct list_head ema_list; |
| 101 | struct fc_exch_mgr *mp; | 127 | struct fc_exch_mgr *mp; |
| @@ -108,7 +134,6 @@ static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, | |||
| 108 | enum fc_els_rjt_explan); | 134 | enum fc_els_rjt_explan); |
| 109 | static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); | 135 | static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); |
| 110 | static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); | 136 | static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); |
| 111 | static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); | ||
| 112 | 137 | ||
| 113 | /* | 138 | /* |
| 114 | * Internal implementation notes. | 139 | * Internal implementation notes. |
| @@ -196,6 +221,15 @@ static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; | |||
| 196 | 221 | ||
| 197 | #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) | 222 | #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) |
| 198 | 223 | ||
| 224 | /** | ||
| 225 | * fc_exch_name_lookup() - Lookup name by opcode | ||
| 226 | * @op: Opcode to be looked up | ||
| 227 | * @table: Opcode/name table | ||
| 228 | * @max_index: Index not to be exceeded | ||
| 229 | * | ||
| 230 | * This routine is used to determine a human-readable string identifying | ||
| 231 | * a R_CTL opcode. | ||
| 232 | */ | ||
| 199 | static inline const char *fc_exch_name_lookup(unsigned int op, char **table, | 233 | static inline const char *fc_exch_name_lookup(unsigned int op, char **table, |
| 200 | unsigned int max_index) | 234 | unsigned int max_index) |
| 201 | { | 235 | { |
| @@ -208,25 +242,34 @@ static inline const char *fc_exch_name_lookup(unsigned int op, char **table, | |||
| 208 | return name; | 242 | return name; |
| 209 | } | 243 | } |
| 210 | 244 | ||
| 245 | /** | ||
| 246 | * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup() | ||
| 247 | * @op: The opcode to be looked up | ||
| 248 | */ | ||
| 211 | static const char *fc_exch_rctl_name(unsigned int op) | 249 | static const char *fc_exch_rctl_name(unsigned int op) |
| 212 | { | 250 | { |
| 213 | return fc_exch_name_lookup(op, fc_exch_rctl_names, | 251 | return fc_exch_name_lookup(op, fc_exch_rctl_names, |
| 214 | FC_TABLE_SIZE(fc_exch_rctl_names)); | 252 | FC_TABLE_SIZE(fc_exch_rctl_names)); |
| 215 | } | 253 | } |
| 216 | 254 | ||
| 217 | /* | 255 | /** |
| 218 | * Hold an exchange - keep it from being freed. | 256 | * fc_exch_hold() - Increment an exchange's reference count |
| 257 | * @ep: Echange to be held | ||
| 219 | */ | 258 | */ |
| 220 | static void fc_exch_hold(struct fc_exch *ep) | 259 | static inline void fc_exch_hold(struct fc_exch *ep) |
| 221 | { | 260 | { |
| 222 | atomic_inc(&ep->ex_refcnt); | 261 | atomic_inc(&ep->ex_refcnt); |
| 223 | } | 262 | } |
| 224 | 263 | ||
| 225 | /* | 264 | /** |
| 226 | * setup fc hdr by initializing few more FC header fields and sof/eof. | 265 | * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields |
| 227 | * Initialized fields by this func: | 266 | * and determine SOF and EOF. |
| 228 | * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt | 267 | * @ep: The exchange to that will use the header |
| 229 | * - sof and eof | 268 | * @fp: The frame whose header is to be modified |
| 269 | * @f_ctl: F_CTL bits that will be used for the frame header | ||
| 270 | * | ||
| 271 | * The fields initialized by this routine are: fh_ox_id, fh_rx_id, | ||
| 272 | * fh_seq_id, fh_seq_cnt and the SOF and EOF. | ||
| 230 | */ | 273 | */ |
| 231 | static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, | 274 | static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, |
| 232 | u32 f_ctl) | 275 | u32 f_ctl) |
| @@ -243,7 +286,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, | |||
| 243 | if (fc_sof_needs_ack(ep->class)) | 286 | if (fc_sof_needs_ack(ep->class)) |
| 244 | fr_eof(fp) = FC_EOF_N; | 287 | fr_eof(fp) = FC_EOF_N; |
| 245 | /* | 288 | /* |
| 246 | * Form f_ctl. | 289 | * From F_CTL. |
| 247 | * The number of fill bytes to make the length a 4-byte | 290 | * The number of fill bytes to make the length a 4-byte |
| 248 | * multiple is the low order 2-bits of the f_ctl. | 291 | * multiple is the low order 2-bits of the f_ctl. |
| 249 | * The fill itself will have been cleared by the frame | 292 | * The fill itself will have been cleared by the frame |
| @@ -273,10 +316,12 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, | |||
| 273 | fh->fh_seq_cnt = htons(ep->seq.cnt); | 316 | fh->fh_seq_cnt = htons(ep->seq.cnt); |
| 274 | } | 317 | } |
| 275 | 318 | ||
| 276 | 319 | /** | |
| 277 | /* | 320 | * fc_exch_release() - Decrement an exchange's reference count |
| 278 | * Release a reference to an exchange. | 321 | * @ep: Exchange to be released |
| 279 | * If the refcnt goes to zero and the exchange is complete, it is freed. | 322 | * |
| 323 | * If the reference count reaches zero and the exchange is complete, | ||
| 324 | * it is freed. | ||
| 280 | */ | 325 | */ |
| 281 | static void fc_exch_release(struct fc_exch *ep) | 326 | static void fc_exch_release(struct fc_exch *ep) |
| 282 | { | 327 | { |
| @@ -291,6 +336,10 @@ static void fc_exch_release(struct fc_exch *ep) | |||
| 291 | } | 336 | } |
| 292 | } | 337 | } |
| 293 | 338 | ||
| 339 | /** | ||
| 340 | * fc_exch_done_locked() - Complete an exchange with the exchange lock held | ||
| 341 | * @ep: The exchange that is complete | ||
| 342 | */ | ||
| 294 | static int fc_exch_done_locked(struct fc_exch *ep) | 343 | static int fc_exch_done_locked(struct fc_exch *ep) |
| 295 | { | 344 | { |
| 296 | int rc = 1; | 345 | int rc = 1; |
| @@ -315,6 +364,15 @@ static int fc_exch_done_locked(struct fc_exch *ep) | |||
| 315 | return rc; | 364 | return rc; |
| 316 | } | 365 | } |
| 317 | 366 | ||
| 367 | /** | ||
| 368 | * fc_exch_ptr_get() - Return an exchange from an exchange pool | ||
| 369 | * @pool: Exchange Pool to get an exchange from | ||
| 370 | * @index: Index of the exchange within the pool | ||
| 371 | * | ||
| 372 | * Use the index to get an exchange from within an exchange pool. exches | ||
| 373 | * will point to an array of exchange pointers. The index will select | ||
| 374 | * the exchange within the array. | ||
| 375 | */ | ||
| 318 | static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, | 376 | static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, |
| 319 | u16 index) | 377 | u16 index) |
| 320 | { | 378 | { |
| @@ -322,12 +380,22 @@ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, | |||
| 322 | return exches[index]; | 380 | return exches[index]; |
| 323 | } | 381 | } |
| 324 | 382 | ||
| 383 | /** | ||
| 384 | * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool | ||
| 385 | * @pool: The pool to assign the exchange to | ||
| 386 | * @index: The index in the pool where the exchange will be assigned | ||
| 387 | * @ep: The exchange to assign to the pool | ||
| 388 | */ | ||
| 325 | static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, | 389 | static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, |
| 326 | struct fc_exch *ep) | 390 | struct fc_exch *ep) |
| 327 | { | 391 | { |
| 328 | ((struct fc_exch **)(pool + 1))[index] = ep; | 392 | ((struct fc_exch **)(pool + 1))[index] = ep; |
| 329 | } | 393 | } |
| 330 | 394 | ||
| 395 | /** | ||
| 396 | * fc_exch_delete() - Delete an exchange | ||
| 397 | * @ep: The exchange to be deleted | ||
| 398 | */ | ||
| 331 | static void fc_exch_delete(struct fc_exch *ep) | 399 | static void fc_exch_delete(struct fc_exch *ep) |
| 332 | { | 400 | { |
| 333 | struct fc_exch_pool *pool; | 401 | struct fc_exch_pool *pool; |
| @@ -343,8 +411,14 @@ static void fc_exch_delete(struct fc_exch *ep) | |||
| 343 | fc_exch_release(ep); /* drop hold for exch in mp */ | 411 | fc_exch_release(ep); /* drop hold for exch in mp */ |
| 344 | } | 412 | } |
| 345 | 413 | ||
| 346 | /* | 414 | /** |
| 347 | * Internal version of fc_exch_timer_set - used with lock held. | 415 | * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the |
| 416 | * the exchange lock held | ||
| 417 | * @ep: The exchange whose timer will start | ||
| 418 | * @timer_msec: The timeout period | ||
| 419 | * | ||
| 420 | * Used for upper level protocols to time out the exchange. | ||
| 421 | * The timer is cancelled when it fires or when the exchange completes. | ||
| 348 | */ | 422 | */ |
| 349 | static inline void fc_exch_timer_set_locked(struct fc_exch *ep, | 423 | static inline void fc_exch_timer_set_locked(struct fc_exch *ep, |
| 350 | unsigned int timer_msec) | 424 | unsigned int timer_msec) |
| @@ -354,17 +428,15 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, | |||
| 354 | 428 | ||
| 355 | FC_EXCH_DBG(ep, "Exchange timer armed\n"); | 429 | FC_EXCH_DBG(ep, "Exchange timer armed\n"); |
| 356 | 430 | ||
| 357 | if (schedule_delayed_work(&ep->timeout_work, | 431 | if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, |
| 358 | msecs_to_jiffies(timer_msec))) | 432 | msecs_to_jiffies(timer_msec))) |
| 359 | fc_exch_hold(ep); /* hold for timer */ | 433 | fc_exch_hold(ep); /* hold for timer */ |
| 360 | } | 434 | } |
| 361 | 435 | ||
| 362 | /* | 436 | /** |
| 363 | * Set timer for an exchange. | 437 | * fc_exch_timer_set() - Lock the exchange and set the timer |
| 364 | * The time is a minimum delay in milliseconds until the timer fires. | 438 | * @ep: The exchange whose timer will start |
| 365 | * Used for upper level protocols to time out the exchange. | 439 | * @timer_msec: The timeout period |
| 366 | * The timer is cancelled when it fires or when the exchange completes. | ||
| 367 | * Returns non-zero if a timer couldn't be allocated. | ||
| 368 | */ | 440 | */ |
| 369 | static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) | 441 | static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) |
| 370 | { | 442 | { |
| @@ -373,7 +445,115 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) | |||
| 373 | spin_unlock_bh(&ep->ex_lock); | 445 | spin_unlock_bh(&ep->ex_lock); |
| 374 | } | 446 | } |
| 375 | 447 | ||
| 376 | int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) | 448 | /** |
| 449 | * fc_seq_send() - Send a frame using existing sequence/exchange pair | ||
| 450 | * @lport: The local port that the exchange will be sent on | ||
| 451 | * @sp: The sequence to be sent | ||
| 452 | * @fp: The frame to be sent on the exchange | ||
| 453 | */ | ||
| 454 | static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, | ||
| 455 | struct fc_frame *fp) | ||
| 456 | { | ||
| 457 | struct fc_exch *ep; | ||
| 458 | struct fc_frame_header *fh = fc_frame_header_get(fp); | ||
| 459 | int error; | ||
| 460 | u32 f_ctl; | ||
| 461 | |||
| 462 | ep = fc_seq_exch(sp); | ||
| 463 | WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); | ||
| 464 | |||
| 465 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
| 466 | fc_exch_setup_hdr(ep, fp, f_ctl); | ||
| 467 | |||
| 468 | /* | ||
| 469 | * update sequence count if this frame is carrying | ||
| 470 | * multiple FC frames when sequence offload is enabled | ||
| 471 | * by LLD. | ||
| 472 | */ | ||
| 473 | if (fr_max_payload(fp)) | ||
| 474 | sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), | ||
| 475 | fr_max_payload(fp)); | ||
| 476 | else | ||
| 477 | sp->cnt++; | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Send the frame. | ||
| 481 | */ | ||
| 482 | error = lport->tt.frame_send(lport, fp); | ||
| 483 | |||
| 484 | /* | ||
| 485 | * Update the exchange and sequence flags, | ||
| 486 | * assuming all frames for the sequence have been sent. | ||
| 487 | * We can only be called to send once for each sequence. | ||
| 488 | */ | ||
| 489 | spin_lock_bh(&ep->ex_lock); | ||
| 490 | ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ | ||
| 491 | if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) | ||
| 492 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; | ||
| 493 | spin_unlock_bh(&ep->ex_lock); | ||
| 494 | return error; | ||
| 495 | } | ||
| 496 | |||
| 497 | /** | ||
| 498 | * fc_seq_alloc() - Allocate a sequence for a given exchange | ||
| 499 | * @ep: The exchange to allocate a new sequence for | ||
| 500 | * @seq_id: The sequence ID to be used | ||
| 501 | * | ||
| 502 | * We don't support multiple originated sequences on the same exchange. | ||
| 503 | * By implication, any previously originated sequence on this exchange | ||
| 504 | * is complete, and we reallocate the same sequence. | ||
| 505 | */ | ||
| 506 | static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) | ||
| 507 | { | ||
| 508 | struct fc_seq *sp; | ||
| 509 | |||
| 510 | sp = &ep->seq; | ||
| 511 | sp->ssb_stat = 0; | ||
| 512 | sp->cnt = 0; | ||
| 513 | sp->id = seq_id; | ||
| 514 | return sp; | ||
| 515 | } | ||
| 516 | |||
| 517 | /** | ||
| 518 | * fc_seq_start_next_locked() - Allocate a new sequence on the same | ||
| 519 | * exchange as the supplied sequence | ||
| 520 | * @sp: The sequence/exchange to get a new sequence for | ||
| 521 | */ | ||
| 522 | static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) | ||
| 523 | { | ||
| 524 | struct fc_exch *ep = fc_seq_exch(sp); | ||
| 525 | |||
| 526 | sp = fc_seq_alloc(ep, ep->seq_id++); | ||
| 527 | FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", | ||
| 528 | ep->f_ctl, sp->id); | ||
| 529 | return sp; | ||
| 530 | } | ||
| 531 | |||
| 532 | /** | ||
| 533 | * fc_seq_start_next() - Lock the exchange and get a new sequence | ||
| 534 | * for a given sequence/exchange pair | ||
| 535 | * @sp: The sequence/exchange to get a new exchange for | ||
| 536 | */ | ||
| 537 | static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) | ||
| 538 | { | ||
| 539 | struct fc_exch *ep = fc_seq_exch(sp); | ||
| 540 | |||
| 541 | spin_lock_bh(&ep->ex_lock); | ||
| 542 | sp = fc_seq_start_next_locked(sp); | ||
| 543 | spin_unlock_bh(&ep->ex_lock); | ||
| 544 | |||
| 545 | return sp; | ||
| 546 | } | ||
| 547 | |||
| 548 | /** | ||
| 549 | * fc_seq_exch_abort() - Abort an exchange and sequence | ||
| 550 | * @req_sp: The sequence to be aborted | ||
| 551 | * @timer_msec: The period of time to wait before aborting | ||
| 552 | * | ||
| 553 | * Generally called because of a timeout or an abort from the upper layer. | ||
| 554 | */ | ||
| 555 | static int fc_seq_exch_abort(const struct fc_seq *req_sp, | ||
| 556 | unsigned int timer_msec) | ||
| 377 | { | 557 | { |
| 378 | struct fc_seq *sp; | 558 | struct fc_seq *sp; |
| 379 | struct fc_exch *ep; | 559 | struct fc_exch *ep; |
| @@ -422,11 +602,10 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) | |||
| 422 | error = -ENOBUFS; | 602 | error = -ENOBUFS; |
| 423 | return error; | 603 | return error; |
| 424 | } | 604 | } |
| 425 | EXPORT_SYMBOL(fc_seq_exch_abort); | ||
| 426 | 605 | ||
| 427 | /* | 606 | /** |
| 428 | * Exchange timeout - handle exchange timer expiration. | 607 | * fc_exch_timeout() - Handle exchange timer expiration |
| 429 | * The timer will have been cancelled before this is called. | 608 | * @work: The work_struct identifying the exchange that timed out |
| 430 | */ | 609 | */ |
| 431 | static void fc_exch_timeout(struct work_struct *work) | 610 | static void fc_exch_timeout(struct work_struct *work) |
| 432 | { | 611 | { |
| @@ -474,28 +653,10 @@ done: | |||
| 474 | fc_exch_release(ep); | 653 | fc_exch_release(ep); |
| 475 | } | 654 | } |
| 476 | 655 | ||
| 477 | /* | ||
| 478 | * Allocate a sequence. | ||
| 479 | * | ||
| 480 | * We don't support multiple originated sequences on the same exchange. | ||
| 481 | * By implication, any previously originated sequence on this exchange | ||
| 482 | * is complete, and we reallocate the same sequence. | ||
| 483 | */ | ||
| 484 | static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) | ||
| 485 | { | ||
| 486 | struct fc_seq *sp; | ||
| 487 | |||
| 488 | sp = &ep->seq; | ||
| 489 | sp->ssb_stat = 0; | ||
| 490 | sp->cnt = 0; | ||
| 491 | sp->id = seq_id; | ||
| 492 | return sp; | ||
| 493 | } | ||
| 494 | |||
| 495 | /** | 656 | /** |
| 496 | * fc_exch_em_alloc() - allocate an exchange from a specified EM. | 657 | * fc_exch_em_alloc() - Allocate an exchange from a specified EM. |
| 497 | * @lport: ptr to the local port | 658 | * @lport: The local port that the exchange is for |
| 498 | * @mp: ptr to the exchange manager | 659 | * @mp: The exchange manager that will allocate the exchange |
| 499 | * | 660 | * |
| 500 | * Returns pointer to allocated fc_exch with exch lock held. | 661 | * Returns pointer to allocated fc_exch with exch lock held. |
| 501 | */ | 662 | */ |
| @@ -563,16 +724,18 @@ err: | |||
| 563 | } | 724 | } |
| 564 | 725 | ||
| 565 | /** | 726 | /** |
| 566 | * fc_exch_alloc() - allocate an exchange. | 727 | * fc_exch_alloc() - Allocate an exchange from an EM on a |
| 567 | * @lport: ptr to the local port | 728 | * local port's list of EMs. |
| 568 | * @fp: ptr to the FC frame | 729 | * @lport: The local port that will own the exchange |
| 730 | * @fp: The FC frame that the exchange will be for | ||
| 569 | * | 731 | * |
| 570 | * This function walks the list of the exchange manager(EM) | 732 | * This function walks the list of exchange manager(EM) |
| 571 | * anchors to select a EM for new exchange allocation. The | 733 | * anchors to select an EM for a new exchange allocation. The |
| 572 | * EM is selected having either a NULL match function pointer | 734 | * EM is selected when a NULL match function pointer is encountered |
| 573 | * or call to match function returning true. | 735 | * or when a call to a match function returns true. |
| 574 | */ | 736 | */ |
| 575 | struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) | 737 | static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, |
| 738 | struct fc_frame *fp) | ||
| 576 | { | 739 | { |
| 577 | struct fc_exch_mgr_anchor *ema; | 740 | struct fc_exch_mgr_anchor *ema; |
| 578 | struct fc_exch *ep; | 741 | struct fc_exch *ep; |
| @@ -586,10 +749,11 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) | |||
| 586 | } | 749 | } |
| 587 | return NULL; | 750 | return NULL; |
| 588 | } | 751 | } |
| 589 | EXPORT_SYMBOL(fc_exch_alloc); | ||
| 590 | 752 | ||
| 591 | /* | 753 | /** |
| 592 | * Lookup and hold an exchange. | 754 | * fc_exch_find() - Lookup and hold an exchange |
| 755 | * @mp: The exchange manager to lookup the exchange from | ||
| 756 | * @xid: The XID of the exchange to look up | ||
| 593 | */ | 757 | */ |
| 594 | static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) | 758 | static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) |
| 595 | { | 759 | { |
| @@ -609,7 +773,13 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) | |||
| 609 | return ep; | 773 | return ep; |
| 610 | } | 774 | } |
| 611 | 775 | ||
| 612 | void fc_exch_done(struct fc_seq *sp) | 776 | |
| 777 | /** | ||
| 778 | * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and | ||
| 779 | * the memory allocated for the related objects may be freed. | ||
| 780 | * @sp: The sequence that has completed | ||
| 781 | */ | ||
| 782 | static void fc_exch_done(struct fc_seq *sp) | ||
| 613 | { | 783 | { |
| 614 | struct fc_exch *ep = fc_seq_exch(sp); | 784 | struct fc_exch *ep = fc_seq_exch(sp); |
| 615 | int rc; | 785 | int rc; |
| @@ -620,10 +790,13 @@ void fc_exch_done(struct fc_seq *sp) | |||
| 620 | if (!rc) | 790 | if (!rc) |
| 621 | fc_exch_delete(ep); | 791 | fc_exch_delete(ep); |
| 622 | } | 792 | } |
| 623 | EXPORT_SYMBOL(fc_exch_done); | ||
| 624 | 793 | ||
| 625 | /* | 794 | /** |
| 626 | * Allocate a new exchange as responder. | 795 | * fc_exch_resp() - Allocate a new exchange for a response frame |
| 796 | * @lport: The local port that the exchange was for | ||
| 797 | * @mp: The exchange manager to allocate the exchange from | ||
| 798 | * @fp: The response frame | ||
| 799 | * | ||
| 627 | * Sets the responder ID in the frame header. | 800 | * Sets the responder ID in the frame header. |
| 628 | */ | 801 | */ |
| 629 | static struct fc_exch *fc_exch_resp(struct fc_lport *lport, | 802 | static struct fc_exch *fc_exch_resp(struct fc_lport *lport, |
| @@ -664,8 +837,13 @@ static struct fc_exch *fc_exch_resp(struct fc_lport *lport, | |||
| 664 | return ep; | 837 | return ep; |
| 665 | } | 838 | } |
| 666 | 839 | ||
| 667 | /* | 840 | /** |
| 668 | * Find a sequence for receive where the other end is originating the sequence. | 841 | * fc_seq_lookup_recip() - Find a sequence where the other end |
| 842 | * originated the sequence | ||
| 843 | * @lport: The local port that the frame was sent to | ||
| 844 | * @mp: The Exchange Manager to lookup the exchange from | ||
| 845 | * @fp: The frame associated with the sequence we're looking for | ||
| 846 | * | ||
| 669 | * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold | 847 | * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold |
| 670 | * on the ep that should be released by the caller. | 848 | * on the ep that should be released by the caller. |
| 671 | */ | 849 | */ |
| @@ -771,10 +949,12 @@ rel: | |||
| 771 | return reject; | 949 | return reject; |
| 772 | } | 950 | } |
| 773 | 951 | ||
| 774 | /* | 952 | /** |
| 775 | * Find the sequence for a frame being received. | 953 | * fc_seq_lookup_orig() - Find a sequence where this end |
| 776 | * We originated the sequence, so it should be found. | 954 | * originated the sequence |
| 777 | * We may or may not have originated the exchange. | 955 | * @mp: The Exchange Manager to lookup the exchange from |
| 956 | * @fp: The frame associated with the sequence we're looking for | ||
| 957 | * | ||
| 778 | * Does not hold the sequence for the caller. | 958 | * Does not hold the sequence for the caller. |
| 779 | */ | 959 | */ |
| 780 | static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, | 960 | static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, |
| @@ -806,8 +986,12 @@ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, | |||
| 806 | return sp; | 986 | return sp; |
| 807 | } | 987 | } |
| 808 | 988 | ||
| 809 | /* | 989 | /** |
| 810 | * Set addresses for an exchange. | 990 | * fc_exch_set_addr() - Set the source and destination IDs for an exchange |
| 991 | * @ep: The exchange to set the addresses for | ||
| 992 | * @orig_id: The originator's ID | ||
| 993 | * @resp_id: The responder's ID | ||
| 994 | * | ||
| 811 | * Note this must be done before the first sequence of the exchange is sent. | 995 | * Note this must be done before the first sequence of the exchange is sent. |
| 812 | */ | 996 | */ |
| 813 | static void fc_exch_set_addr(struct fc_exch *ep, | 997 | static void fc_exch_set_addr(struct fc_exch *ep, |
| @@ -823,76 +1007,15 @@ static void fc_exch_set_addr(struct fc_exch *ep, | |||
| 823 | } | 1007 | } |
| 824 | } | 1008 | } |
| 825 | 1009 | ||
| 826 | static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) | 1010 | /** |
| 827 | { | 1011 | * fc_seq_els_rsp_send() - Send an ELS response using infomation from |
| 828 | struct fc_exch *ep = fc_seq_exch(sp); | 1012 | * the existing sequence/exchange. |
| 829 | 1013 | * @sp: The sequence/exchange to get information from | |
| 830 | sp = fc_seq_alloc(ep, ep->seq_id++); | 1014 | * @els_cmd: The ELS command to be sent |
| 831 | FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", | 1015 | * @els_data: The ELS data to be sent |
| 832 | ep->f_ctl, sp->id); | ||
| 833 | return sp; | ||
| 834 | } | ||
| 835 | /* | ||
| 836 | * Allocate a new sequence on the same exchange as the supplied sequence. | ||
| 837 | * This will never return NULL. | ||
| 838 | */ | 1016 | */ |
| 839 | struct fc_seq *fc_seq_start_next(struct fc_seq *sp) | 1017 | static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, |
| 840 | { | 1018 | struct fc_seq_els_data *els_data) |
| 841 | struct fc_exch *ep = fc_seq_exch(sp); | ||
| 842 | |||
| 843 | spin_lock_bh(&ep->ex_lock); | ||
| 844 | sp = fc_seq_start_next_locked(sp); | ||
| 845 | spin_unlock_bh(&ep->ex_lock); | ||
| 846 | |||
| 847 | return sp; | ||
| 848 | } | ||
| 849 | EXPORT_SYMBOL(fc_seq_start_next); | ||
| 850 | |||
| 851 | int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp) | ||
| 852 | { | ||
| 853 | struct fc_exch *ep; | ||
| 854 | struct fc_frame_header *fh = fc_frame_header_get(fp); | ||
| 855 | int error; | ||
| 856 | u32 f_ctl; | ||
| 857 | |||
| 858 | ep = fc_seq_exch(sp); | ||
| 859 | WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); | ||
| 860 | |||
| 861 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
| 862 | fc_exch_setup_hdr(ep, fp, f_ctl); | ||
| 863 | |||
| 864 | /* | ||
| 865 | * update sequence count if this frame is carrying | ||
| 866 | * multiple FC frames when sequence offload is enabled | ||
| 867 | * by LLD. | ||
| 868 | */ | ||
| 869 | if (fr_max_payload(fp)) | ||
| 870 | sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), | ||
| 871 | fr_max_payload(fp)); | ||
| 872 | else | ||
| 873 | sp->cnt++; | ||
| 874 | |||
| 875 | /* | ||
| 876 | * Send the frame. | ||
| 877 | */ | ||
| 878 | error = lp->tt.frame_send(lp, fp); | ||
| 879 | |||
| 880 | /* | ||
| 881 | * Update the exchange and sequence flags, | ||
| 882 | * assuming all frames for the sequence have been sent. | ||
| 883 | * We can only be called to send once for each sequence. | ||
| 884 | */ | ||
| 885 | spin_lock_bh(&ep->ex_lock); | ||
| 886 | ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ | ||
| 887 | if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) | ||
| 888 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; | ||
| 889 | spin_unlock_bh(&ep->ex_lock); | ||
| 890 | return error; | ||
| 891 | } | ||
| 892 | EXPORT_SYMBOL(fc_seq_send); | ||
| 893 | |||
| 894 | void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, | ||
| 895 | struct fc_seq_els_data *els_data) | ||
| 896 | { | 1019 | { |
| 897 | switch (els_cmd) { | 1020 | switch (els_cmd) { |
| 898 | case ELS_LS_RJT: | 1021 | case ELS_LS_RJT: |
| @@ -911,10 +1034,13 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, | |||
| 911 | FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); | 1034 | FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); |
| 912 | } | 1035 | } |
| 913 | } | 1036 | } |
| 914 | EXPORT_SYMBOL(fc_seq_els_rsp_send); | ||
| 915 | 1037 | ||
| 916 | /* | 1038 | /** |
| 917 | * Send a sequence, which is also the last sequence in the exchange. | 1039 | * fc_seq_send_last() - Send a sequence that is the last in the exchange |
| 1040 | * @sp: The sequence that is to be sent | ||
| 1041 | * @fp: The frame that will be sent on the sequence | ||
| 1042 | * @rctl: The R_CTL information to be sent | ||
| 1043 | * @fh_type: The frame header type | ||
| 918 | */ | 1044 | */ |
| 919 | static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, | 1045 | static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, |
| 920 | enum fc_rctl rctl, enum fc_fh_type fh_type) | 1046 | enum fc_rctl rctl, enum fc_fh_type fh_type) |
| @@ -928,9 +1054,12 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, | |||
| 928 | fc_seq_send(ep->lp, sp, fp); | 1054 | fc_seq_send(ep->lp, sp, fp); |
| 929 | } | 1055 | } |
| 930 | 1056 | ||
| 931 | /* | 1057 | /** |
| 1058 | * fc_seq_send_ack() - Send an acknowledgement that we've received a frame | ||
| 1059 | * @sp: The sequence to send the ACK on | ||
| 1060 | * @rx_fp: The received frame that is being acknoledged | ||
| 1061 | * | ||
| 932 | * Send ACK_1 (or equiv.) indicating we received something. | 1062 | * Send ACK_1 (or equiv.) indicating we received something. |
| 933 | * The frame we're acking is supplied. | ||
| 934 | */ | 1063 | */ |
| 935 | static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) | 1064 | static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) |
| 936 | { | 1065 | { |
| @@ -938,14 +1067,14 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) | |||
| 938 | struct fc_frame_header *rx_fh; | 1067 | struct fc_frame_header *rx_fh; |
| 939 | struct fc_frame_header *fh; | 1068 | struct fc_frame_header *fh; |
| 940 | struct fc_exch *ep = fc_seq_exch(sp); | 1069 | struct fc_exch *ep = fc_seq_exch(sp); |
| 941 | struct fc_lport *lp = ep->lp; | 1070 | struct fc_lport *lport = ep->lp; |
| 942 | unsigned int f_ctl; | 1071 | unsigned int f_ctl; |
| 943 | 1072 | ||
| 944 | /* | 1073 | /* |
| 945 | * Don't send ACKs for class 3. | 1074 | * Don't send ACKs for class 3. |
| 946 | */ | 1075 | */ |
| 947 | if (fc_sof_needs_ack(fr_sof(rx_fp))) { | 1076 | if (fc_sof_needs_ack(fr_sof(rx_fp))) { |
| 948 | fp = fc_frame_alloc(lp, 0); | 1077 | fp = fc_frame_alloc(lport, 0); |
| 949 | if (!fp) | 1078 | if (!fp) |
| 950 | return; | 1079 | return; |
| 951 | 1080 | ||
| @@ -980,12 +1109,16 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) | |||
| 980 | else | 1109 | else |
| 981 | fr_eof(fp) = FC_EOF_N; | 1110 | fr_eof(fp) = FC_EOF_N; |
| 982 | 1111 | ||
| 983 | (void) lp->tt.frame_send(lp, fp); | 1112 | lport->tt.frame_send(lport, fp); |
| 984 | } | 1113 | } |
| 985 | } | 1114 | } |
| 986 | 1115 | ||
| 987 | /* | 1116 | /** |
| 988 | * Send BLS Reject. | 1117 | * fc_exch_send_ba_rjt() - Send BLS Reject |
| 1118 | * @rx_fp: The frame being rejected | ||
| 1119 | * @reason: The reason the frame is being rejected | ||
| 1120 | * @explan: The explaination for the rejection | ||
| 1121 | * | ||
| 989 | * This is for rejecting BA_ABTS only. | 1122 | * This is for rejecting BA_ABTS only. |
| 990 | */ | 1123 | */ |
| 991 | static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, | 1124 | static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, |
| @@ -996,11 +1129,11 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, | |||
| 996 | struct fc_frame_header *rx_fh; | 1129 | struct fc_frame_header *rx_fh; |
| 997 | struct fc_frame_header *fh; | 1130 | struct fc_frame_header *fh; |
| 998 | struct fc_ba_rjt *rp; | 1131 | struct fc_ba_rjt *rp; |
| 999 | struct fc_lport *lp; | 1132 | struct fc_lport *lport; |
| 1000 | unsigned int f_ctl; | 1133 | unsigned int f_ctl; |
| 1001 | 1134 | ||
| 1002 | lp = fr_dev(rx_fp); | 1135 | lport = fr_dev(rx_fp); |
| 1003 | fp = fc_frame_alloc(lp, sizeof(*rp)); | 1136 | fp = fc_frame_alloc(lport, sizeof(*rp)); |
| 1004 | if (!fp) | 1137 | if (!fp) |
| 1005 | return; | 1138 | return; |
| 1006 | fh = fc_frame_header_get(fp); | 1139 | fh = fc_frame_header_get(fp); |
| @@ -1045,13 +1178,17 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, | |||
| 1045 | if (fc_sof_needs_ack(fr_sof(fp))) | 1178 | if (fc_sof_needs_ack(fr_sof(fp))) |
| 1046 | fr_eof(fp) = FC_EOF_N; | 1179 | fr_eof(fp) = FC_EOF_N; |
| 1047 | 1180 | ||
| 1048 | (void) lp->tt.frame_send(lp, fp); | 1181 | lport->tt.frame_send(lport, fp); |
| 1049 | } | 1182 | } |
| 1050 | 1183 | ||
| 1051 | /* | 1184 | /** |
| 1052 | * Handle an incoming ABTS. This would be for target mode usually, | 1185 | * fc_exch_recv_abts() - Handle an incoming ABTS |
| 1053 | * but could be due to lost FCP transfer ready, confirm or RRQ. | 1186 | * @ep: The exchange the abort was on |
| 1054 | * We always handle this as an exchange abort, ignoring the parameter. | 1187 | * @rx_fp: The ABTS frame |
| 1188 | * | ||
| 1189 | * This would be for target mode usually, but could be due to lost | ||
| 1190 | * FCP transfer ready, confirm or RRQ. We always handle this as an | ||
| 1191 | * exchange abort, ignoring the parameter. | ||
| 1055 | */ | 1192 | */ |
| 1056 | static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) | 1193 | static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) |
| 1057 | { | 1194 | { |
| @@ -1100,10 +1237,14 @@ free: | |||
| 1100 | fc_frame_free(rx_fp); | 1237 | fc_frame_free(rx_fp); |
| 1101 | } | 1238 | } |
| 1102 | 1239 | ||
| 1103 | /* | 1240 | /** |
| 1104 | * Handle receive where the other end is originating the sequence. | 1241 | * fc_exch_recv_req() - Handler for an incoming request where is other |
| 1242 | * end is originating the sequence | ||
| 1243 | * @lport: The local port that received the request | ||
| 1244 | * @mp: The EM that the exchange is on | ||
| 1245 | * @fp: The request frame | ||
| 1105 | */ | 1246 | */ |
| 1106 | static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, | 1247 | static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, |
| 1107 | struct fc_frame *fp) | 1248 | struct fc_frame *fp) |
| 1108 | { | 1249 | { |
| 1109 | struct fc_frame_header *fh = fc_frame_header_get(fp); | 1250 | struct fc_frame_header *fh = fc_frame_header_get(fp); |
| @@ -1114,8 +1255,17 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
| 1114 | u32 f_ctl; | 1255 | u32 f_ctl; |
| 1115 | enum fc_pf_rjt_reason reject; | 1256 | enum fc_pf_rjt_reason reject; |
| 1116 | 1257 | ||
| 1258 | /* We can have the wrong fc_lport at this point with NPIV, which is a | ||
| 1259 | * problem now that we know a new exchange needs to be allocated | ||
| 1260 | */ | ||
| 1261 | lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); | ||
| 1262 | if (!lport) { | ||
| 1263 | fc_frame_free(fp); | ||
| 1264 | return; | ||
| 1265 | } | ||
| 1266 | |||
| 1117 | fr_seq(fp) = NULL; | 1267 | fr_seq(fp) = NULL; |
| 1118 | reject = fc_seq_lookup_recip(lp, mp, fp); | 1268 | reject = fc_seq_lookup_recip(lport, mp, fp); |
| 1119 | if (reject == FC_RJT_NONE) { | 1269 | if (reject == FC_RJT_NONE) { |
| 1120 | sp = fr_seq(fp); /* sequence will be held */ | 1270 | sp = fr_seq(fp); /* sequence will be held */ |
| 1121 | ep = fc_seq_exch(sp); | 1271 | ep = fc_seq_exch(sp); |
| @@ -1138,17 +1288,21 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
| 1138 | if (ep->resp) | 1288 | if (ep->resp) |
| 1139 | ep->resp(sp, fp, ep->arg); | 1289 | ep->resp(sp, fp, ep->arg); |
| 1140 | else | 1290 | else |
| 1141 | lp->tt.lport_recv(lp, sp, fp); | 1291 | lport->tt.lport_recv(lport, sp, fp); |
| 1142 | fc_exch_release(ep); /* release from lookup */ | 1292 | fc_exch_release(ep); /* release from lookup */ |
| 1143 | } else { | 1293 | } else { |
| 1144 | FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); | 1294 | FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", |
| 1295 | reject); | ||
| 1145 | fc_frame_free(fp); | 1296 | fc_frame_free(fp); |
| 1146 | } | 1297 | } |
| 1147 | } | 1298 | } |
| 1148 | 1299 | ||
| 1149 | /* | 1300 | /** |
| 1150 | * Handle receive where the other end is originating the sequence in | 1301 | * fc_exch_recv_seq_resp() - Handler for an incoming response where the other |
| 1151 | * response to our exchange. | 1302 | * end is the originator of the sequence that is a |
| 1303 | * response to our initial exchange | ||
| 1304 | * @mp: The EM that the exchange is on | ||
| 1305 | * @fp: The response frame | ||
| 1152 | */ | 1306 | */ |
| 1153 | static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | 1307 | static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
| 1154 | { | 1308 | { |
| @@ -1239,8 +1393,11 @@ out: | |||
| 1239 | fc_frame_free(fp); | 1393 | fc_frame_free(fp); |
| 1240 | } | 1394 | } |
| 1241 | 1395 | ||
| 1242 | /* | 1396 | /** |
| 1243 | * Handle receive for a sequence where other end is responding to our sequence. | 1397 | * fc_exch_recv_resp() - Handler for a sequence where other end is |
| 1398 | * responding to our sequence | ||
| 1399 | * @mp: The EM that the exchange is on | ||
| 1400 | * @fp: The response frame | ||
| 1244 | */ | 1401 | */ |
| 1245 | static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | 1402 | static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
| 1246 | { | 1403 | { |
| @@ -1256,9 +1413,13 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
| 1256 | fc_frame_free(fp); | 1413 | fc_frame_free(fp); |
| 1257 | } | 1414 | } |
| 1258 | 1415 | ||
| 1259 | /* | 1416 | /** |
| 1260 | * Handle the response to an ABTS for exchange or sequence. | 1417 | * fc_exch_abts_resp() - Handler for a response to an ABT |
| 1261 | * This can be BA_ACC or BA_RJT. | 1418 | * @ep: The exchange that the frame is on |
| 1419 | * @fp: The response frame | ||
| 1420 | * | ||
| 1421 | * This response would be to an ABTS cancelling an exchange or sequence. | ||
| 1422 | * The response can be either BA_ACC or BA_RJT | ||
| 1262 | */ | 1423 | */ |
| 1263 | static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) | 1424 | static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) |
| 1264 | { | 1425 | { |
| @@ -1333,9 +1494,12 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) | |||
| 1333 | 1494 | ||
| 1334 | } | 1495 | } |
| 1335 | 1496 | ||
| 1336 | /* | 1497 | /** |
| 1337 | * Receive BLS sequence. | 1498 | * fc_exch_recv_bls() - Handler for a BLS sequence |
| 1338 | * This is always a sequence initiated by the remote side. | 1499 | * @mp: The EM that the exchange is on |
| 1500 | * @fp: The request frame | ||
| 1501 | * | ||
| 1502 | * The BLS frame is always a sequence initiated by the remote side. | ||
| 1339 | * We may be either the originator or recipient of the exchange. | 1503 | * We may be either the originator or recipient of the exchange. |
| 1340 | */ | 1504 | */ |
| 1341 | static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) | 1505 | static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) |
| @@ -1392,8 +1556,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
| 1392 | fc_exch_release(ep); /* release hold taken by fc_exch_find */ | 1556 | fc_exch_release(ep); /* release hold taken by fc_exch_find */ |
| 1393 | } | 1557 | } |
| 1394 | 1558 | ||
| 1395 | /* | 1559 | /** |
| 1396 | * Accept sequence with LS_ACC. | 1560 | * fc_seq_ls_acc() - Accept sequence with LS_ACC |
| 1561 | * @req_sp: The request sequence | ||
| 1562 | * | ||
| 1397 | * If this fails due to allocation or transmit congestion, assume the | 1563 | * If this fails due to allocation or transmit congestion, assume the |
| 1398 | * originator will repeat the sequence. | 1564 | * originator will repeat the sequence. |
| 1399 | */ | 1565 | */ |
| @@ -1413,8 +1579,12 @@ static void fc_seq_ls_acc(struct fc_seq *req_sp) | |||
| 1413 | } | 1579 | } |
| 1414 | } | 1580 | } |
| 1415 | 1581 | ||
| 1416 | /* | 1582 | /** |
| 1417 | * Reject sequence with ELS LS_RJT. | 1583 | * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT |
| 1584 | * @req_sp: The request sequence | ||
| 1585 | * @reason: The reason the sequence is being rejected | ||
| 1586 | * @explan: The explaination for the rejection | ||
| 1587 | * | ||
| 1418 | * If this fails due to allocation or transmit congestion, assume the | 1588 | * If this fails due to allocation or transmit congestion, assume the |
| 1419 | * originator will repeat the sequence. | 1589 | * originator will repeat the sequence. |
| 1420 | */ | 1590 | */ |
| @@ -1437,6 +1607,10 @@ static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, | |||
| 1437 | } | 1607 | } |
| 1438 | } | 1608 | } |
| 1439 | 1609 | ||
| 1610 | /** | ||
| 1611 | * fc_exch_reset() - Reset an exchange | ||
| 1612 | * @ep: The exchange to be reset | ||
| 1613 | */ | ||
| 1440 | static void fc_exch_reset(struct fc_exch *ep) | 1614 | static void fc_exch_reset(struct fc_exch *ep) |
| 1441 | { | 1615 | { |
| 1442 | struct fc_seq *sp; | 1616 | struct fc_seq *sp; |
| @@ -1446,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep) | |||
| 1446 | 1620 | ||
| 1447 | spin_lock_bh(&ep->ex_lock); | 1621 | spin_lock_bh(&ep->ex_lock); |
| 1448 | ep->state |= FC_EX_RST_CLEANUP; | 1622 | ep->state |= FC_EX_RST_CLEANUP; |
| 1449 | /* | ||
| 1450 | * we really want to call del_timer_sync, but cannot due | ||
| 1451 | * to the lport calling with the lport lock held (some resp | ||
| 1452 | * functions can also grab the lport lock which could cause | ||
| 1453 | * a deadlock). | ||
| 1454 | */ | ||
| 1455 | if (cancel_delayed_work(&ep->timeout_work)) | 1623 | if (cancel_delayed_work(&ep->timeout_work)) |
| 1456 | atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ | 1624 | atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ |
| 1457 | resp = ep->resp; | 1625 | resp = ep->resp; |
| @@ -1471,16 +1639,16 @@ static void fc_exch_reset(struct fc_exch *ep) | |||
| 1471 | } | 1639 | } |
| 1472 | 1640 | ||
| 1473 | /** | 1641 | /** |
| 1474 | * fc_exch_pool_reset() - Resets an per cpu exches pool. | 1642 | * fc_exch_pool_reset() - Reset a per cpu exchange pool |
| 1475 | * @lport: ptr to the local port | 1643 | * @lport: The local port that the exchange pool is on |
| 1476 | * @pool: ptr to the per cpu exches pool | 1644 | * @pool: The exchange pool to be reset |
| 1477 | * @sid: source FC ID | 1645 | * @sid: The source ID |
| 1478 | * @did: destination FC ID | 1646 | * @did: The destination ID |
| 1479 | * | 1647 | * |
| 1480 | * Resets an per cpu exches pool, releasing its all sequences | 1648 | * Resets a per cpu exches pool, releasing all of its sequences |
| 1481 | * and exchanges. If sid is non-zero, then reset only exchanges | 1649 | * and exchanges. If sid is non-zero then reset only exchanges |
| 1482 | * we sourced from that FID. If did is non-zero, reset only | 1650 | * we sourced from the local port's FID. If did is non-zero then |
| 1483 | * exchanges destined to that FID. | 1651 | * only reset exchanges destined for the local port's FID. |
| 1484 | */ | 1652 | */ |
| 1485 | static void fc_exch_pool_reset(struct fc_lport *lport, | 1653 | static void fc_exch_pool_reset(struct fc_lport *lport, |
| 1486 | struct fc_exch_pool *pool, | 1654 | struct fc_exch_pool *pool, |
| @@ -1514,15 +1682,15 @@ restart: | |||
| 1514 | } | 1682 | } |
| 1515 | 1683 | ||
| 1516 | /** | 1684 | /** |
| 1517 | * fc_exch_mgr_reset() - Resets all EMs of a lport | 1685 | * fc_exch_mgr_reset() - Reset all EMs of a local port |
| 1518 | * @lport: ptr to the local port | 1686 | * @lport: The local port whose EMs are to be reset |
| 1519 | * @sid: source FC ID | 1687 | * @sid: The source ID |
| 1520 | * @did: destination FC ID | 1688 | * @did: The destination ID |
| 1521 | * | 1689 | * |
| 1522 | * Reset all EMs of a lport, releasing its all sequences and | 1690 | * Reset all EMs associated with a given local port. Release all |
| 1523 | * exchanges. If sid is non-zero, then reset only exchanges | 1691 | * sequences and exchanges. If sid is non-zero then reset only the |
| 1524 | * we sourced from that FID. If did is non-zero, reset only | 1692 | * exchanges sent from the local port's FID. If did is non-zero then |
| 1525 | * exchanges destined to that FID. | 1693 | * reset only exchanges destined for the local port's FID. |
| 1526 | */ | 1694 | */ |
| 1527 | void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) | 1695 | void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) |
| 1528 | { | 1696 | { |
| @@ -1538,8 +1706,11 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) | |||
| 1538 | } | 1706 | } |
| 1539 | EXPORT_SYMBOL(fc_exch_mgr_reset); | 1707 | EXPORT_SYMBOL(fc_exch_mgr_reset); |
| 1540 | 1708 | ||
| 1541 | /* | 1709 | /** |
| 1542 | * Handle incoming ELS REC - Read Exchange Concise. | 1710 | * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests |
| 1711 | * @sp: The sequence the REC is on | ||
| 1712 | * @rfp: The REC frame | ||
| 1713 | * | ||
| 1543 | * Note that the requesting port may be different than the S_ID in the request. | 1714 | * Note that the requesting port may be different than the S_ID in the request. |
| 1544 | */ | 1715 | */ |
| 1545 | static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) | 1716 | static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) |
| @@ -1621,10 +1792,11 @@ reject: | |||
| 1621 | fc_frame_free(rfp); | 1792 | fc_frame_free(rfp); |
| 1622 | } | 1793 | } |
| 1623 | 1794 | ||
| 1624 | /* | 1795 | /** |
| 1625 | * Handle response from RRQ. | 1796 | * fc_exch_rrq_resp() - Handler for RRQ responses |
| 1626 | * Not much to do here, really. | 1797 | * @sp: The sequence that the RRQ is on |
| 1627 | * Should report errors. | 1798 | * @fp: The RRQ frame |
| 1799 | * @arg: The exchange that the RRQ is on | ||
| 1628 | * | 1800 | * |
| 1629 | * TODO: fix error handler. | 1801 | * TODO: fix error handler. |
| 1630 | */ | 1802 | */ |
| @@ -1664,21 +1836,99 @@ cleanup: | |||
| 1664 | fc_exch_release(aborted_ep); | 1836 | fc_exch_release(aborted_ep); |
| 1665 | } | 1837 | } |
| 1666 | 1838 | ||
| 1667 | /* | 1839 | |
| 1668 | * Send ELS RRQ - Reinstate Recovery Qualifier. | 1840 | /** |
| 1841 | * fc_exch_seq_send() - Send a frame using a new exchange and sequence | ||
| 1842 | * @lport: The local port to send the frame on | ||
| 1843 | * @fp: The frame to be sent | ||
| 1844 | * @resp: The response handler for this request | ||
| 1845 | * @destructor: The destructor for the exchange | ||
| 1846 | * @arg: The argument to be passed to the response handler | ||
| 1847 | * @timer_msec: The timeout period for the exchange | ||
| 1848 | * | ||
| 1849 | * The frame pointer with some of the header's fields must be | ||
| 1850 | * filled before calling this routine, those fields are: | ||
| 1851 | * | ||
| 1852 | * - routing control | ||
| 1853 | * - FC port did | ||
| 1854 | * - FC port sid | ||
| 1855 | * - FC header type | ||
| 1856 | * - frame control | ||
| 1857 | * - parameter or relative offset | ||
| 1858 | */ | ||
| 1859 | static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | ||
| 1860 | struct fc_frame *fp, | ||
| 1861 | void (*resp)(struct fc_seq *, | ||
| 1862 | struct fc_frame *fp, | ||
| 1863 | void *arg), | ||
| 1864 | void (*destructor)(struct fc_seq *, | ||
| 1865 | void *), | ||
| 1866 | void *arg, u32 timer_msec) | ||
| 1867 | { | ||
| 1868 | struct fc_exch *ep; | ||
| 1869 | struct fc_seq *sp = NULL; | ||
| 1870 | struct fc_frame_header *fh; | ||
| 1871 | int rc = 1; | ||
| 1872 | |||
| 1873 | ep = fc_exch_alloc(lport, fp); | ||
| 1874 | if (!ep) { | ||
| 1875 | fc_frame_free(fp); | ||
| 1876 | return NULL; | ||
| 1877 | } | ||
| 1878 | ep->esb_stat |= ESB_ST_SEQ_INIT; | ||
| 1879 | fh = fc_frame_header_get(fp); | ||
| 1880 | fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); | ||
| 1881 | ep->resp = resp; | ||
| 1882 | ep->destructor = destructor; | ||
| 1883 | ep->arg = arg; | ||
| 1884 | ep->r_a_tov = FC_DEF_R_A_TOV; | ||
| 1885 | ep->lp = lport; | ||
| 1886 | sp = &ep->seq; | ||
| 1887 | |||
| 1888 | ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ | ||
| 1889 | ep->f_ctl = ntoh24(fh->fh_f_ctl); | ||
| 1890 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); | ||
| 1891 | sp->cnt++; | ||
| 1892 | |||
| 1893 | if (ep->xid <= lport->lro_xid) | ||
| 1894 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); | ||
| 1895 | |||
| 1896 | if (unlikely(lport->tt.frame_send(lport, fp))) | ||
| 1897 | goto err; | ||
| 1898 | |||
| 1899 | if (timer_msec) | ||
| 1900 | fc_exch_timer_set_locked(ep, timer_msec); | ||
| 1901 | ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ | ||
| 1902 | |||
| 1903 | if (ep->f_ctl & FC_FC_SEQ_INIT) | ||
| 1904 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; | ||
| 1905 | spin_unlock_bh(&ep->ex_lock); | ||
| 1906 | return sp; | ||
| 1907 | err: | ||
| 1908 | rc = fc_exch_done_locked(ep); | ||
| 1909 | spin_unlock_bh(&ep->ex_lock); | ||
| 1910 | if (!rc) | ||
| 1911 | fc_exch_delete(ep); | ||
| 1912 | return NULL; | ||
| 1913 | } | ||
| 1914 | |||
| 1915 | /** | ||
| 1916 | * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command | ||
| 1917 | * @ep: The exchange to send the RRQ on | ||
| 1918 | * | ||
| 1669 | * This tells the remote port to stop blocking the use of | 1919 | * This tells the remote port to stop blocking the use of |
| 1670 | * the exchange and the seq_cnt range. | 1920 | * the exchange and the seq_cnt range. |
| 1671 | */ | 1921 | */ |
| 1672 | static void fc_exch_rrq(struct fc_exch *ep) | 1922 | static void fc_exch_rrq(struct fc_exch *ep) |
| 1673 | { | 1923 | { |
| 1674 | struct fc_lport *lp; | 1924 | struct fc_lport *lport; |
| 1675 | struct fc_els_rrq *rrq; | 1925 | struct fc_els_rrq *rrq; |
| 1676 | struct fc_frame *fp; | 1926 | struct fc_frame *fp; |
| 1677 | u32 did; | 1927 | u32 did; |
| 1678 | 1928 | ||
| 1679 | lp = ep->lp; | 1929 | lport = ep->lp; |
| 1680 | 1930 | ||
| 1681 | fp = fc_frame_alloc(lp, sizeof(*rrq)); | 1931 | fp = fc_frame_alloc(lport, sizeof(*rrq)); |
| 1682 | if (!fp) | 1932 | if (!fp) |
| 1683 | goto retry; | 1933 | goto retry; |
| 1684 | 1934 | ||
| @@ -1694,10 +1944,11 @@ static void fc_exch_rrq(struct fc_exch *ep) | |||
| 1694 | did = ep->sid; | 1944 | did = ep->sid; |
| 1695 | 1945 | ||
| 1696 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, | 1946 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, |
| 1697 | fc_host_port_id(lp->host), FC_TYPE_ELS, | 1947 | fc_host_port_id(lport->host), FC_TYPE_ELS, |
| 1698 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1948 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
| 1699 | 1949 | ||
| 1700 | if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) | 1950 | if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, |
| 1951 | lport->e_d_tov)) | ||
| 1701 | return; | 1952 | return; |
| 1702 | 1953 | ||
| 1703 | retry: | 1954 | retry: |
| @@ -1714,12 +1965,14 @@ retry: | |||
| 1714 | } | 1965 | } |
| 1715 | 1966 | ||
| 1716 | 1967 | ||
| 1717 | /* | 1968 | /** |
| 1718 | * Handle incoming ELS RRQ - Reset Recovery Qualifier. | 1969 | * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests |
| 1970 | * @sp: The sequence that the RRQ is on | ||
| 1971 | * @fp: The RRQ frame | ||
| 1719 | */ | 1972 | */ |
| 1720 | static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) | 1973 | static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) |
| 1721 | { | 1974 | { |
| 1722 | struct fc_exch *ep; /* request or subject exchange */ | 1975 | struct fc_exch *ep = NULL; /* request or subject exchange */ |
| 1723 | struct fc_els_rrq *rp; | 1976 | struct fc_els_rrq *rp; |
| 1724 | u32 sid; | 1977 | u32 sid; |
| 1725 | u16 xid; | 1978 | u16 xid; |
| @@ -1769,17 +2022,24 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) | |||
| 1769 | * Send LS_ACC. | 2022 | * Send LS_ACC. |
| 1770 | */ | 2023 | */ |
| 1771 | fc_seq_ls_acc(sp); | 2024 | fc_seq_ls_acc(sp); |
| 1772 | fc_frame_free(fp); | 2025 | goto out; |
| 1773 | return; | ||
| 1774 | 2026 | ||
| 1775 | unlock_reject: | 2027 | unlock_reject: |
| 1776 | spin_unlock_bh(&ep->ex_lock); | 2028 | spin_unlock_bh(&ep->ex_lock); |
| 1777 | fc_exch_release(ep); /* drop hold from fc_exch_find */ | ||
| 1778 | reject: | 2029 | reject: |
| 1779 | fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); | 2030 | fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); |
| 2031 | out: | ||
| 1780 | fc_frame_free(fp); | 2032 | fc_frame_free(fp); |
| 2033 | if (ep) | ||
| 2034 | fc_exch_release(ep); /* drop hold from fc_exch_find */ | ||
| 1781 | } | 2035 | } |
| 1782 | 2036 | ||
| 2037 | /** | ||
| 2038 | * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs | ||
| 2039 | * @lport: The local port to add the exchange manager to | ||
| 2040 | * @mp: The exchange manager to be added to the local port | ||
| 2041 | * @match: The match routine that indicates when this EM should be used | ||
| 2042 | */ | ||
| 1783 | struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, | 2043 | struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, |
| 1784 | struct fc_exch_mgr *mp, | 2044 | struct fc_exch_mgr *mp, |
| 1785 | bool (*match)(struct fc_frame *)) | 2045 | bool (*match)(struct fc_frame *)) |
| @@ -1799,6 +2059,10 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, | |||
| 1799 | } | 2059 | } |
| 1800 | EXPORT_SYMBOL(fc_exch_mgr_add); | 2060 | EXPORT_SYMBOL(fc_exch_mgr_add); |
| 1801 | 2061 | ||
| 2062 | /** | ||
| 2063 | * fc_exch_mgr_destroy() - Destroy an exchange manager | ||
| 2064 | * @kref: The reference to the EM to be destroyed | ||
| 2065 | */ | ||
| 1802 | static void fc_exch_mgr_destroy(struct kref *kref) | 2066 | static void fc_exch_mgr_destroy(struct kref *kref) |
| 1803 | { | 2067 | { |
| 1804 | struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); | 2068 | struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); |
| @@ -1808,6 +2072,10 @@ static void fc_exch_mgr_destroy(struct kref *kref) | |||
| 1808 | kfree(mp); | 2072 | kfree(mp); |
| 1809 | } | 2073 | } |
| 1810 | 2074 | ||
| 2075 | /** | ||
| 2076 | * fc_exch_mgr_del() - Delete an EM from a local port's list | ||
| 2077 | * @ema: The exchange manager anchor identifying the EM to be deleted | ||
| 2078 | */ | ||
| 1811 | void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) | 2079 | void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) |
| 1812 | { | 2080 | { |
| 1813 | /* remove EM anchor from EM anchors list */ | 2081 | /* remove EM anchor from EM anchors list */ |
| @@ -1817,7 +2085,35 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) | |||
| 1817 | } | 2085 | } |
| 1818 | EXPORT_SYMBOL(fc_exch_mgr_del); | 2086 | EXPORT_SYMBOL(fc_exch_mgr_del); |
| 1819 | 2087 | ||
| 1820 | struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, | 2088 | /** |
| 2089 | * fc_exch_mgr_list_clone() - Share all exchange manager objects | ||
| 2090 | * @src: Source lport to clone exchange managers from | ||
| 2091 | * @dst: New lport that takes references to all the exchange managers | ||
| 2092 | */ | ||
| 2093 | int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) | ||
| 2094 | { | ||
| 2095 | struct fc_exch_mgr_anchor *ema, *tmp; | ||
| 2096 | |||
| 2097 | list_for_each_entry(ema, &src->ema_list, ema_list) { | ||
| 2098 | if (!fc_exch_mgr_add(dst, ema->mp, ema->match)) | ||
| 2099 | goto err; | ||
| 2100 | } | ||
| 2101 | return 0; | ||
| 2102 | err: | ||
| 2103 | list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list) | ||
| 2104 | fc_exch_mgr_del(ema); | ||
| 2105 | return -ENOMEM; | ||
| 2106 | } | ||
| 2107 | |||
| 2108 | /** | ||
| 2109 | * fc_exch_mgr_alloc() - Allocate an exchange manager | ||
| 2110 | * @lport: The local port that the new EM will be associated with | ||
| 2111 | * @class: The default FC class for new exchanges | ||
| 2112 | * @min_xid: The minimum XID for exchanges from the new EM | ||
| 2113 | * @max_xid: The maximum XID for exchanges from the new EM | ||
| 2114 | * @match: The match routine for the new EM | ||
| 2115 | */ | ||
| 2116 | struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, | ||
| 1821 | enum fc_class class, | 2117 | enum fc_class class, |
| 1822 | u16 min_xid, u16 max_xid, | 2118 | u16 min_xid, u16 max_xid, |
| 1823 | bool (*match)(struct fc_frame *)) | 2119 | bool (*match)(struct fc_frame *)) |
| @@ -1830,7 +2126,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, | |||
| 1830 | 2126 | ||
| 1831 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || | 2127 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || |
| 1832 | (min_xid & fc_cpu_mask) != 0) { | 2128 | (min_xid & fc_cpu_mask) != 0) { |
| 1833 | FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", | 2129 | FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", |
| 1834 | min_xid, max_xid); | 2130 | min_xid, max_xid); |
| 1835 | return NULL; | 2131 | return NULL; |
| 1836 | } | 2132 | } |
| @@ -1873,7 +2169,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, | |||
| 1873 | } | 2169 | } |
| 1874 | 2170 | ||
| 1875 | kref_init(&mp->kref); | 2171 | kref_init(&mp->kref); |
| 1876 | if (!fc_exch_mgr_add(lp, mp, match)) { | 2172 | if (!fc_exch_mgr_add(lport, mp, match)) { |
| 1877 | free_percpu(mp->pool); | 2173 | free_percpu(mp->pool); |
| 1878 | goto free_mempool; | 2174 | goto free_mempool; |
| 1879 | } | 2175 | } |
| @@ -1894,76 +2190,26 @@ free_mp: | |||
| 1894 | } | 2190 | } |
| 1895 | EXPORT_SYMBOL(fc_exch_mgr_alloc); | 2191 | EXPORT_SYMBOL(fc_exch_mgr_alloc); |
| 1896 | 2192 | ||
| 2193 | /** | ||
| 2194 | * fc_exch_mgr_free() - Free all exchange managers on a local port | ||
| 2195 | * @lport: The local port whose EMs are to be freed | ||
| 2196 | */ | ||
| 1897 | void fc_exch_mgr_free(struct fc_lport *lport) | 2197 | void fc_exch_mgr_free(struct fc_lport *lport) |
| 1898 | { | 2198 | { |
| 1899 | struct fc_exch_mgr_anchor *ema, *next; | 2199 | struct fc_exch_mgr_anchor *ema, *next; |
| 1900 | 2200 | ||
| 2201 | flush_workqueue(fc_exch_workqueue); | ||
| 1901 | list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) | 2202 | list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) |
| 1902 | fc_exch_mgr_del(ema); | 2203 | fc_exch_mgr_del(ema); |
| 1903 | } | 2204 | } |
| 1904 | EXPORT_SYMBOL(fc_exch_mgr_free); | 2205 | EXPORT_SYMBOL(fc_exch_mgr_free); |
| 1905 | 2206 | ||
| 1906 | 2207 | /** | |
| 1907 | struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, | 2208 | * fc_exch_recv() - Handler for received frames |
| 1908 | struct fc_frame *fp, | 2209 | * @lport: The local port the frame was received on |
| 1909 | void (*resp)(struct fc_seq *, | 2210 | * @fp: The received frame |
| 1910 | struct fc_frame *fp, | ||
| 1911 | void *arg), | ||
| 1912 | void (*destructor)(struct fc_seq *, void *), | ||
| 1913 | void *arg, u32 timer_msec) | ||
| 1914 | { | ||
| 1915 | struct fc_exch *ep; | ||
| 1916 | struct fc_seq *sp = NULL; | ||
| 1917 | struct fc_frame_header *fh; | ||
| 1918 | int rc = 1; | ||
| 1919 | |||
| 1920 | ep = fc_exch_alloc(lp, fp); | ||
| 1921 | if (!ep) { | ||
| 1922 | fc_frame_free(fp); | ||
| 1923 | return NULL; | ||
| 1924 | } | ||
| 1925 | ep->esb_stat |= ESB_ST_SEQ_INIT; | ||
| 1926 | fh = fc_frame_header_get(fp); | ||
| 1927 | fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); | ||
| 1928 | ep->resp = resp; | ||
| 1929 | ep->destructor = destructor; | ||
| 1930 | ep->arg = arg; | ||
| 1931 | ep->r_a_tov = FC_DEF_R_A_TOV; | ||
| 1932 | ep->lp = lp; | ||
| 1933 | sp = &ep->seq; | ||
| 1934 | |||
| 1935 | ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ | ||
| 1936 | ep->f_ctl = ntoh24(fh->fh_f_ctl); | ||
| 1937 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); | ||
| 1938 | sp->cnt++; | ||
| 1939 | |||
| 1940 | if (ep->xid <= lp->lro_xid) | ||
| 1941 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); | ||
| 1942 | |||
| 1943 | if (unlikely(lp->tt.frame_send(lp, fp))) | ||
| 1944 | goto err; | ||
| 1945 | |||
| 1946 | if (timer_msec) | ||
| 1947 | fc_exch_timer_set_locked(ep, timer_msec); | ||
| 1948 | ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ | ||
| 1949 | |||
| 1950 | if (ep->f_ctl & FC_FC_SEQ_INIT) | ||
| 1951 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; | ||
| 1952 | spin_unlock_bh(&ep->ex_lock); | ||
| 1953 | return sp; | ||
| 1954 | err: | ||
| 1955 | rc = fc_exch_done_locked(ep); | ||
| 1956 | spin_unlock_bh(&ep->ex_lock); | ||
| 1957 | if (!rc) | ||
| 1958 | fc_exch_delete(ep); | ||
| 1959 | return NULL; | ||
| 1960 | } | ||
| 1961 | EXPORT_SYMBOL(fc_exch_seq_send); | ||
| 1962 | |||
| 1963 | /* | ||
| 1964 | * Receive a frame | ||
| 1965 | */ | 2211 | */ |
| 1966 | void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) | 2212 | void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) |
| 1967 | { | 2213 | { |
| 1968 | struct fc_frame_header *fh = fc_frame_header_get(fp); | 2214 | struct fc_frame_header *fh = fc_frame_header_get(fp); |
| 1969 | struct fc_exch_mgr_anchor *ema; | 2215 | struct fc_exch_mgr_anchor *ema; |
| @@ -1971,8 +2217,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1971 | u16 oxid; | 2217 | u16 oxid; |
| 1972 | 2218 | ||
| 1973 | /* lport lock ? */ | 2219 | /* lport lock ? */ |
| 1974 | if (!lp || lp->state == LPORT_ST_DISABLED) { | 2220 | if (!lport || lport->state == LPORT_ST_DISABLED) { |
| 1975 | FC_LPORT_DBG(lp, "Receiving frames for an lport that " | 2221 | FC_LPORT_DBG(lport, "Receiving frames for an lport that " |
| 1976 | "has not been initialized correctly\n"); | 2222 | "has not been initialized correctly\n"); |
| 1977 | fc_frame_free(fp); | 2223 | fc_frame_free(fp); |
| 1978 | return; | 2224 | return; |
| @@ -1981,7 +2227,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1981 | f_ctl = ntoh24(fh->fh_f_ctl); | 2227 | f_ctl = ntoh24(fh->fh_f_ctl); |
| 1982 | oxid = ntohs(fh->fh_ox_id); | 2228 | oxid = ntohs(fh->fh_ox_id); |
| 1983 | if (f_ctl & FC_FC_EX_CTX) { | 2229 | if (f_ctl & FC_FC_EX_CTX) { |
| 1984 | list_for_each_entry(ema, &lp->ema_list, ema_list) { | 2230 | list_for_each_entry(ema, &lport->ema_list, ema_list) { |
| 1985 | if ((oxid >= ema->mp->min_xid) && | 2231 | if ((oxid >= ema->mp->min_xid) && |
| 1986 | (oxid <= ema->mp->max_xid)) { | 2232 | (oxid <= ema->mp->max_xid)) { |
| 1987 | found = 1; | 2233 | found = 1; |
| @@ -1990,13 +2236,13 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) | |||
| 1990 | } | 2236 | } |
| 1991 | 2237 | ||
| 1992 | if (!found) { | 2238 | if (!found) { |
| 1993 | FC_LPORT_DBG(lp, "Received response for out " | 2239 | FC_LPORT_DBG(lport, "Received response for out " |
| 1994 | "of range oxid:%hx\n", oxid); | 2240 | "of range oxid:%hx\n", oxid); |
| 1995 | fc_frame_free(fp); | 2241 | fc_frame_free(fp); |
| 1996 | return; | 2242 | return; |
| 1997 | } | 2243 | } |
| 1998 | } else | 2244 | } else |
| 1999 | ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); | 2245 | ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list); |
| 2000 | 2246 | ||
| 2001 | /* | 2247 | /* |
| 2002 | * If frame is marked invalid, just drop it. | 2248 | * If frame is marked invalid, just drop it. |
| @@ -2015,37 +2261,56 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) | |||
| 2015 | else if (f_ctl & FC_FC_SEQ_CTX) | 2261 | else if (f_ctl & FC_FC_SEQ_CTX) |
| 2016 | fc_exch_recv_resp(ema->mp, fp); | 2262 | fc_exch_recv_resp(ema->mp, fp); |
| 2017 | else | 2263 | else |
| 2018 | fc_exch_recv_req(lp, ema->mp, fp); | 2264 | fc_exch_recv_req(lport, ema->mp, fp); |
| 2019 | break; | 2265 | break; |
| 2020 | default: | 2266 | default: |
| 2021 | FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); | 2267 | FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)", |
| 2268 | fr_eof(fp)); | ||
| 2022 | fc_frame_free(fp); | 2269 | fc_frame_free(fp); |
| 2023 | } | 2270 | } |
| 2024 | } | 2271 | } |
| 2025 | EXPORT_SYMBOL(fc_exch_recv); | 2272 | EXPORT_SYMBOL(fc_exch_recv); |
| 2026 | 2273 | ||
| 2027 | int fc_exch_init(struct fc_lport *lp) | 2274 | /** |
| 2275 | * fc_exch_init() - Initialize the exchange layer for a local port | ||
| 2276 | * @lport: The local port to initialize the exchange layer for | ||
| 2277 | */ | ||
| 2278 | int fc_exch_init(struct fc_lport *lport) | ||
| 2028 | { | 2279 | { |
| 2029 | if (!lp->tt.seq_start_next) | 2280 | if (!lport->tt.seq_start_next) |
| 2030 | lp->tt.seq_start_next = fc_seq_start_next; | 2281 | lport->tt.seq_start_next = fc_seq_start_next; |
| 2031 | 2282 | ||
| 2032 | if (!lp->tt.exch_seq_send) | 2283 | if (!lport->tt.exch_seq_send) |
| 2033 | lp->tt.exch_seq_send = fc_exch_seq_send; | 2284 | lport->tt.exch_seq_send = fc_exch_seq_send; |
| 2034 | 2285 | ||
| 2035 | if (!lp->tt.seq_send) | 2286 | if (!lport->tt.seq_send) |
| 2036 | lp->tt.seq_send = fc_seq_send; | 2287 | lport->tt.seq_send = fc_seq_send; |
| 2037 | 2288 | ||
| 2038 | if (!lp->tt.seq_els_rsp_send) | 2289 | if (!lport->tt.seq_els_rsp_send) |
| 2039 | lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send; | 2290 | lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send; |
| 2040 | 2291 | ||
| 2041 | if (!lp->tt.exch_done) | 2292 | if (!lport->tt.exch_done) |
| 2042 | lp->tt.exch_done = fc_exch_done; | 2293 | lport->tt.exch_done = fc_exch_done; |
| 2043 | 2294 | ||
| 2044 | if (!lp->tt.exch_mgr_reset) | 2295 | if (!lport->tt.exch_mgr_reset) |
| 2045 | lp->tt.exch_mgr_reset = fc_exch_mgr_reset; | 2296 | lport->tt.exch_mgr_reset = fc_exch_mgr_reset; |
| 2046 | 2297 | ||
| 2047 | if (!lp->tt.seq_exch_abort) | 2298 | if (!lport->tt.seq_exch_abort) |
| 2048 | lp->tt.seq_exch_abort = fc_seq_exch_abort; | 2299 | lport->tt.seq_exch_abort = fc_seq_exch_abort; |
| 2300 | |||
| 2301 | return 0; | ||
| 2302 | } | ||
| 2303 | EXPORT_SYMBOL(fc_exch_init); | ||
| 2304 | |||
| 2305 | /** | ||
| 2306 | * fc_setup_exch_mgr() - Setup an exchange manager | ||
| 2307 | */ | ||
| 2308 | int fc_setup_exch_mgr() | ||
| 2309 | { | ||
| 2310 | fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), | ||
| 2311 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
| 2312 | if (!fc_em_cachep) | ||
| 2313 | return -ENOMEM; | ||
| 2049 | 2314 | ||
| 2050 | /* | 2315 | /* |
| 2051 | * Initialize fc_cpu_mask and fc_cpu_order. The | 2316 | * Initialize fc_cpu_mask and fc_cpu_order. The |
| @@ -2069,20 +2334,17 @@ int fc_exch_init(struct fc_lport *lp) | |||
| 2069 | } | 2334 | } |
| 2070 | fc_cpu_mask--; | 2335 | fc_cpu_mask--; |
| 2071 | 2336 | ||
| 2072 | return 0; | 2337 | fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); |
| 2073 | } | 2338 | if (!fc_exch_workqueue) |
| 2074 | EXPORT_SYMBOL(fc_exch_init); | ||
| 2075 | |||
| 2076 | int fc_setup_exch_mgr(void) | ||
| 2077 | { | ||
| 2078 | fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), | ||
| 2079 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
| 2080 | if (!fc_em_cachep) | ||
| 2081 | return -ENOMEM; | 2339 | return -ENOMEM; |
| 2082 | return 0; | 2340 | return 0; |
| 2083 | } | 2341 | } |
| 2084 | 2342 | ||
| 2085 | void fc_destroy_exch_mgr(void) | 2343 | /** |
| 2344 | * fc_destroy_exch_mgr() - Destroy an exchange manager | ||
| 2345 | */ | ||
| 2346 | void fc_destroy_exch_mgr() | ||
| 2086 | { | 2347 | { |
| 2348 | destroy_workqueue(fc_exch_workqueue); | ||
| 2087 | kmem_cache_destroy(fc_em_cachep); | 2349 | kmem_cache_destroy(fc_em_cachep); |
| 2088 | } | 2350 | } |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 59a4408b27b5..c4b58d042f6f 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
| @@ -39,15 +39,9 @@ | |||
| 39 | #include <scsi/libfc.h> | 39 | #include <scsi/libfc.h> |
| 40 | #include <scsi/fc_encode.h> | 40 | #include <scsi/fc_encode.h> |
| 41 | 41 | ||
| 42 | MODULE_AUTHOR("Open-FCoE.org"); | 42 | #include "fc_libfc.h" |
| 43 | MODULE_DESCRIPTION("libfc"); | ||
| 44 | MODULE_LICENSE("GPL v2"); | ||
| 45 | 43 | ||
| 46 | unsigned int fc_debug_logging; | 44 | struct kmem_cache *scsi_pkt_cachep; |
| 47 | module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); | ||
| 48 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
| 49 | |||
| 50 | static struct kmem_cache *scsi_pkt_cachep; | ||
| 51 | 45 | ||
| 52 | /* SRB state definitions */ | 46 | /* SRB state definitions */ |
| 53 | #define FC_SRB_FREE 0 /* cmd is free */ | 47 | #define FC_SRB_FREE 0 /* cmd is free */ |
| @@ -58,7 +52,6 @@ static struct kmem_cache *scsi_pkt_cachep; | |||
| 58 | #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ | 52 | #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ |
| 59 | #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ | 53 | #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ |
| 60 | #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ | 54 | #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ |
| 61 | #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ | ||
| 62 | 55 | ||
| 63 | #define FC_SRB_READ (1 << 1) | 56 | #define FC_SRB_READ (1 << 1) |
| 64 | #define FC_SRB_WRITE (1 << 0) | 57 | #define FC_SRB_WRITE (1 << 0) |
| @@ -73,10 +66,20 @@ static struct kmem_cache *scsi_pkt_cachep; | |||
| 73 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) | 66 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) |
| 74 | #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) | 67 | #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) |
| 75 | 68 | ||
| 69 | /** | ||
| 70 | * struct fc_fcp_internal - FCP layer internal data | ||
| 71 | * @scsi_pkt_pool: Memory pool to draw FCP packets from | ||
| 72 | * @scsi_pkt_queue: Current FCP packets | ||
| 73 | * @last_can_queue_ramp_down_time: ramp down time | ||
| 74 | * @last_can_queue_ramp_up_time: ramp up time | ||
| 75 | * @max_can_queue: max can_queue size | ||
| 76 | */ | ||
| 76 | struct fc_fcp_internal { | 77 | struct fc_fcp_internal { |
| 77 | mempool_t *scsi_pkt_pool; | 78 | mempool_t *scsi_pkt_pool; |
| 78 | struct list_head scsi_pkt_queue; | 79 | struct list_head scsi_pkt_queue; |
| 79 | u8 throttled; | 80 | unsigned long last_can_queue_ramp_down_time; |
| 81 | unsigned long last_can_queue_ramp_up_time; | ||
| 82 | int max_can_queue; | ||
| 80 | }; | 83 | }; |
| 81 | 84 | ||
| 82 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) | 85 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) |
| @@ -90,9 +93,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); | |||
| 90 | static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); | 93 | static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); |
| 91 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); | 94 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); |
| 92 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); | 95 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); |
| 93 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); | 96 | static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); |
| 94 | static void fc_timeout_error(struct fc_fcp_pkt *); | 97 | static void fc_timeout_error(struct fc_fcp_pkt *); |
| 95 | static void fc_fcp_timeout(unsigned long data); | 98 | static void fc_fcp_timeout(unsigned long); |
| 96 | static void fc_fcp_rec(struct fc_fcp_pkt *); | 99 | static void fc_fcp_rec(struct fc_fcp_pkt *); |
| 97 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); | 100 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); |
| 98 | static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); | 101 | static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); |
| @@ -124,6 +127,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); | |||
| 124 | #define FC_SCSI_TM_TOV (10 * HZ) | 127 | #define FC_SCSI_TM_TOV (10 * HZ) |
| 125 | #define FC_SCSI_REC_TOV (2 * HZ) | 128 | #define FC_SCSI_REC_TOV (2 * HZ) |
| 126 | #define FC_HOST_RESET_TIMEOUT (30 * HZ) | 129 | #define FC_HOST_RESET_TIMEOUT (30 * HZ) |
| 130 | #define FC_CAN_QUEUE_PERIOD (60 * HZ) | ||
| 127 | 131 | ||
| 128 | #define FC_MAX_ERROR_CNT 5 | 132 | #define FC_MAX_ERROR_CNT 5 |
| 129 | #define FC_MAX_RECOV_RETRY 3 | 133 | #define FC_MAX_RECOV_RETRY 3 |
| @@ -131,23 +135,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); | |||
| 131 | #define FC_FCP_DFLT_QUEUE_DEPTH 32 | 135 | #define FC_FCP_DFLT_QUEUE_DEPTH 32 |
| 132 | 136 | ||
| 133 | /** | 137 | /** |
| 134 | * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet | 138 | * fc_fcp_pkt_alloc() - Allocate a fcp_pkt |
| 135 | * @lp: fc lport struct | 139 | * @lport: The local port that the FCP packet is for |
| 136 | * @gfp: gfp flags for allocation | 140 | * @gfp: GFP flags for allocation |
| 137 | * | 141 | * |
| 138 | * This is used by upper layer scsi driver. | 142 | * Return value: fcp_pkt structure or null on allocation failure. |
| 139 | * Return Value : scsi_pkt structure or null on allocation failure. | 143 | * Context: Can be called from process context, no lock is required. |
| 140 | * Context : call from process context. no locking required. | ||
| 141 | */ | 144 | */ |
| 142 | static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) | 145 | static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) |
| 143 | { | 146 | { |
| 144 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 147 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
| 145 | struct fc_fcp_pkt *fsp; | 148 | struct fc_fcp_pkt *fsp; |
| 146 | 149 | ||
| 147 | fsp = mempool_alloc(si->scsi_pkt_pool, gfp); | 150 | fsp = mempool_alloc(si->scsi_pkt_pool, gfp); |
| 148 | if (fsp) { | 151 | if (fsp) { |
| 149 | memset(fsp, 0, sizeof(*fsp)); | 152 | memset(fsp, 0, sizeof(*fsp)); |
| 150 | fsp->lp = lp; | 153 | fsp->lp = lport; |
| 151 | atomic_set(&fsp->ref_cnt, 1); | 154 | atomic_set(&fsp->ref_cnt, 1); |
| 152 | init_timer(&fsp->timer); | 155 | init_timer(&fsp->timer); |
| 153 | INIT_LIST_HEAD(&fsp->list); | 156 | INIT_LIST_HEAD(&fsp->list); |
| @@ -157,12 +160,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) | |||
| 157 | } | 160 | } |
| 158 | 161 | ||
| 159 | /** | 162 | /** |
| 160 | * fc_fcp_pkt_release() - release hold on scsi_pkt packet | 163 | * fc_fcp_pkt_release() - Release hold on a fcp_pkt |
| 161 | * @fsp: fcp packet struct | 164 | * @fsp: The FCP packet to be released |
| 162 | * | 165 | * |
| 163 | * This is used by upper layer scsi driver. | 166 | * Context: Can be called from process or interrupt context, |
| 164 | * Context : call from process and interrupt context. | 167 | * no lock is required. |
| 165 | * no locking required | ||
| 166 | */ | 168 | */ |
| 167 | static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) | 169 | static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) |
| 168 | { | 170 | { |
| @@ -173,20 +175,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) | |||
| 173 | } | 175 | } |
| 174 | } | 176 | } |
| 175 | 177 | ||
| 178 | /** | ||
| 179 | * fc_fcp_pkt_hold() - Hold a fcp_pkt | ||
| 180 | * @fsp: The FCP packet to be held | ||
| 181 | */ | ||
| 176 | static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) | 182 | static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) |
| 177 | { | 183 | { |
| 178 | atomic_inc(&fsp->ref_cnt); | 184 | atomic_inc(&fsp->ref_cnt); |
| 179 | } | 185 | } |
| 180 | 186 | ||
| 181 | /** | 187 | /** |
| 182 | * fc_fcp_pkt_destory() - release hold on scsi_pkt packet | 188 | * fc_fcp_pkt_destory() - Release hold on a fcp_pkt |
| 183 | * @seq: exchange sequence | 189 | * @seq: The sequence that the FCP packet is on (required by destructor API) |
| 184 | * @fsp: fcp packet struct | 190 | * @fsp: The FCP packet to be released |
| 191 | * | ||
| 192 | * This routine is called by a destructor callback in the exch_seq_send() | ||
| 193 | * routine of the libfc Transport Template. The 'struct fc_seq' is a required | ||
| 194 | * argument even though it is not used by this routine. | ||
| 185 | * | 195 | * |
| 186 | * Release hold on scsi_pkt packet set to keep scsi_pkt | 196 | * Context: No locking required. |
| 187 | * till EM layer exch resource is not freed. | ||
| 188 | * Context : called from from EM layer. | ||
| 189 | * no locking required | ||
| 190 | */ | 197 | */ |
| 191 | static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) | 198 | static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) |
| 192 | { | 199 | { |
| @@ -194,10 +201,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) | |||
| 194 | } | 201 | } |
| 195 | 202 | ||
| 196 | /** | 203 | /** |
| 197 | * fc_fcp_lock_pkt() - lock a packet and get a ref to it. | 204 | * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count |
| 198 | * @fsp: fcp packet | 205 | * @fsp: The FCP packet to be locked and incremented |
| 199 | * | 206 | * |
| 200 | * We should only return error if we return a command to scsi-ml before | 207 | * We should only return error if we return a command to SCSI-ml before |
| 201 | * getting a response. This can happen in cases where we send a abort, but | 208 | * getting a response. This can happen in cases where we send a abort, but |
| 202 | * do not wait for the response and the abort and command can be passing | 209 | * do not wait for the response and the abort and command can be passing |
| 203 | * each other on the wire/network-layer. | 210 | * each other on the wire/network-layer. |
| @@ -222,18 +229,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) | |||
| 222 | return 0; | 229 | return 0; |
| 223 | } | 230 | } |
| 224 | 231 | ||
| 232 | /** | ||
| 233 | * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its | ||
| 234 | * reference count | ||
| 235 | * @fsp: The FCP packet to be unlocked and decremented | ||
| 236 | */ | ||
| 225 | static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) | 237 | static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) |
| 226 | { | 238 | { |
| 227 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 239 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
| 228 | fc_fcp_pkt_release(fsp); | 240 | fc_fcp_pkt_release(fsp); |
| 229 | } | 241 | } |
| 230 | 242 | ||
| 243 | /** | ||
| 244 | * fc_fcp_timer_set() - Start a timer for a fcp_pkt | ||
| 245 | * @fsp: The FCP packet to start a timer for | ||
| 246 | * @delay: The timeout period for the timer | ||
| 247 | */ | ||
| 231 | static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) | 248 | static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) |
| 232 | { | 249 | { |
| 233 | if (!(fsp->state & FC_SRB_COMPL)) | 250 | if (!(fsp->state & FC_SRB_COMPL)) |
| 234 | mod_timer(&fsp->timer, jiffies + delay); | 251 | mod_timer(&fsp->timer, jiffies + delay); |
| 235 | } | 252 | } |
| 236 | 253 | ||
| 254 | /** | ||
| 255 | * fc_fcp_send_abort() - Send an abort for exchanges associated with a | ||
| 256 | * fcp_pkt | ||
| 257 | * @fsp: The FCP packet to abort exchanges on | ||
| 258 | */ | ||
| 237 | static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) | 259 | static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) |
| 238 | { | 260 | { |
| 239 | if (!fsp->seq_ptr) | 261 | if (!fsp->seq_ptr) |
| @@ -243,9 +265,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) | |||
| 243 | return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); | 265 | return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); |
| 244 | } | 266 | } |
| 245 | 267 | ||
| 246 | /* | 268 | /** |
| 247 | * Retry command. | 269 | * fc_fcp_retry_cmd() - Retry a fcp_pkt |
| 248 | * An abort isn't needed. | 270 | * @fsp: The FCP packet to be retried |
| 271 | * | ||
| 272 | * Sets the status code to be FC_ERROR and then calls | ||
| 273 | * fc_fcp_complete_locked() which in turn calls fc_io_compl(). | ||
| 274 | * fc_io_compl() will notify the SCSI-ml that the I/O is done. | ||
| 275 | * The SCSI-ml will retry the command. | ||
| 249 | */ | 276 | */ |
| 250 | static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) | 277 | static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) |
| 251 | { | 278 | { |
| @@ -260,64 +287,146 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) | |||
| 260 | fc_fcp_complete_locked(fsp); | 287 | fc_fcp_complete_locked(fsp); |
| 261 | } | 288 | } |
| 262 | 289 | ||
| 263 | /* | 290 | /** |
| 264 | * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP | 291 | * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context |
| 265 | * transfer for a read I/O indicated by the fc_fcp_pkt. | 292 | * @fsp: The FCP packet that will manage the DDP frames |
| 266 | * @fsp: ptr to the fc_fcp_pkt | 293 | * @xid: The XID that will be used for the DDP exchange |
| 267 | * | ||
| 268 | * This is called in exch_seq_send() when we have a newly allocated | ||
| 269 | * exchange with a valid exchange id to setup ddp. | ||
| 270 | * | ||
| 271 | * returns: none | ||
| 272 | */ | 294 | */ |
| 273 | void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) | 295 | void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) |
| 274 | { | 296 | { |
| 275 | struct fc_lport *lp; | 297 | struct fc_lport *lport; |
| 276 | 298 | ||
| 277 | if (!fsp) | 299 | if (!fsp) |
| 278 | return; | 300 | return; |
| 279 | 301 | ||
| 280 | lp = fsp->lp; | 302 | lport = fsp->lp; |
| 281 | if ((fsp->req_flags & FC_SRB_READ) && | 303 | if ((fsp->req_flags & FC_SRB_READ) && |
| 282 | (lp->lro_enabled) && (lp->tt.ddp_setup)) { | 304 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { |
| 283 | if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), | 305 | if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), |
| 284 | scsi_sg_count(fsp->cmd))) | 306 | scsi_sg_count(fsp->cmd))) |
| 285 | fsp->xfer_ddp = xid; | 307 | fsp->xfer_ddp = xid; |
| 286 | } | 308 | } |
| 287 | } | 309 | } |
| 288 | EXPORT_SYMBOL(fc_fcp_ddp_setup); | ||
| 289 | 310 | ||
| 290 | /* | 311 | /** |
| 291 | * fc_fcp_ddp_done - calls to LLD's ddp_done to release any | 312 | * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any |
| 292 | * DDP related resources for this I/O if it is initialized | 313 | * DDP related resources for a fcp_pkt |
| 293 | * as a ddp transfer | 314 | * @fsp: The FCP packet that DDP had been used on |
| 294 | * @fsp: ptr to the fc_fcp_pkt | ||
| 295 | * | ||
| 296 | * returns: none | ||
| 297 | */ | 315 | */ |
| 298 | static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) | 316 | static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) |
| 299 | { | 317 | { |
| 300 | struct fc_lport *lp; | 318 | struct fc_lport *lport; |
| 301 | 319 | ||
| 302 | if (!fsp) | 320 | if (!fsp) |
| 303 | return; | 321 | return; |
| 304 | 322 | ||
| 305 | lp = fsp->lp; | 323 | if (fsp->xfer_ddp == FC_XID_UNKNOWN) |
| 306 | if (fsp->xfer_ddp && lp->tt.ddp_done) { | 324 | return; |
| 307 | fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); | 325 | |
| 308 | fsp->xfer_ddp = 0; | 326 | lport = fsp->lp; |
| 327 | if (lport->tt.ddp_done) { | ||
| 328 | fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); | ||
| 329 | fsp->xfer_ddp = FC_XID_UNKNOWN; | ||
| 309 | } | 330 | } |
| 310 | } | 331 | } |
| 311 | 332 | ||
| 333 | /** | ||
| 334 | * fc_fcp_can_queue_ramp_up() - increases can_queue | ||
| 335 | * @lport: lport to ramp up can_queue | ||
| 336 | * | ||
| 337 | * Locking notes: Called with Scsi_Host lock held | ||
| 338 | */ | ||
| 339 | static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) | ||
| 340 | { | ||
| 341 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | ||
| 342 | int can_queue; | ||
| 343 | |||
| 344 | if (si->last_can_queue_ramp_up_time && | ||
| 345 | (time_before(jiffies, si->last_can_queue_ramp_up_time + | ||
| 346 | FC_CAN_QUEUE_PERIOD))) | ||
| 347 | return; | ||
| 348 | |||
| 349 | if (time_before(jiffies, si->last_can_queue_ramp_down_time + | ||
| 350 | FC_CAN_QUEUE_PERIOD)) | ||
| 351 | return; | ||
| 352 | |||
| 353 | si->last_can_queue_ramp_up_time = jiffies; | ||
| 354 | |||
| 355 | can_queue = lport->host->can_queue << 1; | ||
| 356 | if (can_queue >= si->max_can_queue) { | ||
| 357 | can_queue = si->max_can_queue; | ||
| 358 | si->last_can_queue_ramp_down_time = 0; | ||
| 359 | } | ||
| 360 | lport->host->can_queue = can_queue; | ||
| 361 | shost_printk(KERN_ERR, lport->host, "libfc: increased " | ||
| 362 | "can_queue to %d.\n", can_queue); | ||
| 363 | } | ||
| 364 | |||
| 365 | /** | ||
| 366 | * fc_fcp_can_queue_ramp_down() - reduces can_queue | ||
| 367 | * @lport: lport to reduce can_queue | ||
| 368 | * | ||
| 369 | * If we are getting memory allocation failures, then we may | ||
| 370 | * be trying to execute too many commands. We let the running | ||
| 371 | * commands complete or timeout, then try again with a reduced | ||
| 372 | * can_queue. Eventually we will hit the point where we run | ||
| 373 | * on all reserved structs. | ||
| 374 | * | ||
| 375 | * Locking notes: Called with Scsi_Host lock held | ||
| 376 | */ | ||
| 377 | static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) | ||
| 378 | { | ||
| 379 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | ||
| 380 | int can_queue; | ||
| 381 | |||
| 382 | if (si->last_can_queue_ramp_down_time && | ||
| 383 | (time_before(jiffies, si->last_can_queue_ramp_down_time + | ||
| 384 | FC_CAN_QUEUE_PERIOD))) | ||
| 385 | return; | ||
| 386 | |||
| 387 | si->last_can_queue_ramp_down_time = jiffies; | ||
| 388 | |||
| 389 | can_queue = lport->host->can_queue; | ||
| 390 | can_queue >>= 1; | ||
| 391 | if (!can_queue) | ||
| 392 | can_queue = 1; | ||
| 393 | lport->host->can_queue = can_queue; | ||
| 394 | shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" | ||
| 395 | "Reducing can_queue to %d.\n", can_queue); | ||
| 396 | } | ||
| 312 | 397 | ||
| 313 | /* | 398 | /* |
| 314 | * Receive SCSI data from target. | 399 | * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer. |
| 315 | * Called after receiving solicited data. | 400 | * @lport: fc lport struct |
| 401 | * @len: payload length | ||
| 402 | * | ||
| 403 | * Allocates fc_frame structure and buffer but if fails to allocate | ||
| 404 | * then reduce can_queue. | ||
| 405 | */ | ||
| 406 | static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, | ||
| 407 | size_t len) | ||
| 408 | { | ||
| 409 | struct fc_frame *fp; | ||
| 410 | unsigned long flags; | ||
| 411 | |||
| 412 | fp = fc_frame_alloc(lport, len); | ||
| 413 | if (!fp) { | ||
| 414 | spin_lock_irqsave(lport->host->host_lock, flags); | ||
| 415 | fc_fcp_can_queue_ramp_down(lport); | ||
| 416 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
| 417 | } | ||
| 418 | return fp; | ||
| 419 | } | ||
| 420 | |||
| 421 | /** | ||
| 422 | * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target | ||
| 423 | * @fsp: The FCP packet the data is on | ||
| 424 | * @fp: The data frame | ||
| 316 | */ | 425 | */ |
| 317 | static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 426 | static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 318 | { | 427 | { |
| 319 | struct scsi_cmnd *sc = fsp->cmd; | 428 | struct scsi_cmnd *sc = fsp->cmd; |
| 320 | struct fc_lport *lp = fsp->lp; | 429 | struct fc_lport *lport = fsp->lp; |
| 321 | struct fcoe_dev_stats *stats; | 430 | struct fcoe_dev_stats *stats; |
| 322 | struct fc_frame_header *fh; | 431 | struct fc_frame_header *fh; |
| 323 | size_t start_offset; | 432 | size_t start_offset; |
| @@ -327,7 +436,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
| 327 | size_t len; | 436 | size_t len; |
| 328 | void *buf; | 437 | void *buf; |
| 329 | struct scatterlist *sg; | 438 | struct scatterlist *sg; |
| 330 | size_t remaining; | 439 | u32 nents; |
| 331 | 440 | ||
| 332 | fh = fc_frame_header_get(fp); | 441 | fh = fc_frame_header_get(fp); |
| 333 | offset = ntohl(fh->fh_parm_offset); | 442 | offset = ntohl(fh->fh_parm_offset); |
| @@ -351,65 +460,29 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
| 351 | if (offset != fsp->xfer_len) | 460 | if (offset != fsp->xfer_len) |
| 352 | fsp->state |= FC_SRB_DISCONTIG; | 461 | fsp->state |= FC_SRB_DISCONTIG; |
| 353 | 462 | ||
| 354 | crc = 0; | ||
| 355 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) | ||
| 356 | crc = crc32(~0, (u8 *) fh, sizeof(*fh)); | ||
| 357 | |||
| 358 | sg = scsi_sglist(sc); | 463 | sg = scsi_sglist(sc); |
| 359 | remaining = len; | 464 | nents = scsi_sg_count(sc); |
| 360 | |||
| 361 | while (remaining > 0 && sg) { | ||
| 362 | size_t off; | ||
| 363 | void *page_addr; | ||
| 364 | size_t sg_bytes; | ||
| 365 | 465 | ||
| 366 | if (offset >= sg->length) { | 466 | if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { |
| 367 | offset -= sg->length; | 467 | copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, |
| 368 | sg = sg_next(sg); | 468 | &offset, KM_SOFTIRQ0, NULL); |
| 369 | continue; | 469 | } else { |
| 370 | } | 470 | crc = crc32(~0, (u8 *) fh, sizeof(*fh)); |
| 371 | sg_bytes = min(remaining, sg->length - offset); | 471 | copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, |
| 372 | 472 | &offset, KM_SOFTIRQ0, &crc); | |
| 373 | /* | ||
| 374 | * The scatterlist item may be bigger than PAGE_SIZE, | ||
| 375 | * but we are limited to mapping PAGE_SIZE at a time. | ||
| 376 | */ | ||
| 377 | off = offset + sg->offset; | ||
| 378 | sg_bytes = min(sg_bytes, (size_t) | ||
| 379 | (PAGE_SIZE - (off & ~PAGE_MASK))); | ||
| 380 | page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), | ||
| 381 | KM_SOFTIRQ0); | ||
| 382 | if (!page_addr) | ||
| 383 | break; /* XXX panic? */ | ||
| 384 | |||
| 385 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) | ||
| 386 | crc = crc32(crc, buf, sg_bytes); | ||
| 387 | memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, | ||
| 388 | sg_bytes); | ||
| 389 | |||
| 390 | kunmap_atomic(page_addr, KM_SOFTIRQ0); | ||
| 391 | buf += sg_bytes; | ||
| 392 | offset += sg_bytes; | ||
| 393 | remaining -= sg_bytes; | ||
| 394 | copy_len += sg_bytes; | ||
| 395 | } | ||
| 396 | |||
| 397 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | ||
| 398 | buf = fc_frame_payload_get(fp, 0); | 473 | buf = fc_frame_payload_get(fp, 0); |
| 399 | if (len % 4) { | 474 | if (len % 4) |
| 400 | crc = crc32(crc, buf + len, 4 - (len % 4)); | 475 | crc = crc32(crc, buf + len, 4 - (len % 4)); |
| 401 | len += 4 - (len % 4); | ||
| 402 | } | ||
| 403 | 476 | ||
| 404 | if (~crc != le32_to_cpu(fr_crc(fp))) { | 477 | if (~crc != le32_to_cpu(fr_crc(fp))) { |
| 405 | crc_err: | 478 | crc_err: |
| 406 | stats = fc_lport_get_stats(lp); | 479 | stats = fc_lport_get_stats(lport); |
| 407 | stats->ErrorFrames++; | 480 | stats->ErrorFrames++; |
| 408 | /* FIXME - per cpu count, not total count! */ | 481 | /* FIXME - per cpu count, not total count! */ |
| 409 | if (stats->InvalidCRCCount++ < 5) | 482 | if (stats->InvalidCRCCount++ < 5) |
| 410 | printk(KERN_WARNING "libfc: CRC error on data " | 483 | printk(KERN_WARNING "libfc: CRC error on data " |
| 411 | "frame for port (%6x)\n", | 484 | "frame for port (%6x)\n", |
| 412 | fc_host_port_id(lp->host)); | 485 | fc_host_port_id(lport->host)); |
| 413 | /* | 486 | /* |
| 414 | * Assume the frame is total garbage. | 487 | * Assume the frame is total garbage. |
| 415 | * We may have copied it over the good part | 488 | * We may have copied it over the good part |
| @@ -437,18 +510,17 @@ crc_err: | |||
| 437 | } | 510 | } |
| 438 | 511 | ||
| 439 | /** | 512 | /** |
| 440 | * fc_fcp_send_data() - Send SCSI data to target. | 513 | * fc_fcp_send_data() - Send SCSI data to a target |
| 441 | * @fsp: ptr to fc_fcp_pkt | 514 | * @fsp: The FCP packet the data is on |
| 442 | * @sp: ptr to this sequence | 515 | * @sp: The sequence the data is to be sent on |
| 443 | * @offset: starting offset for this data request | 516 | * @offset: The starting offset for this data request |
| 444 | * @seq_blen: the burst length for this data request | 517 | * @seq_blen: The burst length for this data request |
| 445 | * | 518 | * |
| 446 | * Called after receiving a Transfer Ready data descriptor. | 519 | * Called after receiving a Transfer Ready data descriptor. |
| 447 | * if LLD is capable of seq offload then send down seq_blen | 520 | * If the LLD is capable of sequence offload then send down the |
| 448 | * size of data in single frame, otherwise send multiple FC | 521 | * seq_blen ammount of data in single frame, otherwise send |
| 449 | * frames of max FC frame payload supported by target port. | 522 | * multiple frames of the maximum frame payload supported by |
| 450 | * | 523 | * the target port. |
| 451 | * Returns : 0 for success. | ||
| 452 | */ | 524 | */ |
| 453 | static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | 525 | static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, |
| 454 | size_t offset, size_t seq_blen) | 526 | size_t offset, size_t seq_blen) |
| @@ -457,16 +529,18 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 457 | struct scsi_cmnd *sc; | 529 | struct scsi_cmnd *sc; |
| 458 | struct scatterlist *sg; | 530 | struct scatterlist *sg; |
| 459 | struct fc_frame *fp = NULL; | 531 | struct fc_frame *fp = NULL; |
| 460 | struct fc_lport *lp = fsp->lp; | 532 | struct fc_lport *lport = fsp->lp; |
| 533 | struct page *page; | ||
| 461 | size_t remaining; | 534 | size_t remaining; |
| 462 | size_t t_blen; | 535 | size_t t_blen; |
| 463 | size_t tlen; | 536 | size_t tlen; |
| 464 | size_t sg_bytes; | 537 | size_t sg_bytes; |
| 465 | size_t frame_offset, fh_parm_offset; | 538 | size_t frame_offset, fh_parm_offset; |
| 539 | size_t off; | ||
| 466 | int error; | 540 | int error; |
| 467 | void *data = NULL; | 541 | void *data = NULL; |
| 468 | void *page_addr; | 542 | void *page_addr; |
| 469 | int using_sg = lp->sg_supp; | 543 | int using_sg = lport->sg_supp; |
| 470 | u32 f_ctl; | 544 | u32 f_ctl; |
| 471 | 545 | ||
| 472 | WARN_ON(seq_blen <= 0); | 546 | WARN_ON(seq_blen <= 0); |
| @@ -488,10 +562,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 488 | * to max FC frame payload previously set in fsp->max_payload. | 562 | * to max FC frame payload previously set in fsp->max_payload. |
| 489 | */ | 563 | */ |
| 490 | t_blen = fsp->max_payload; | 564 | t_blen = fsp->max_payload; |
| 491 | if (lp->seq_offload) { | 565 | if (lport->seq_offload) { |
| 492 | t_blen = min(seq_blen, (size_t)lp->lso_max); | 566 | t_blen = min(seq_blen, (size_t)lport->lso_max); |
| 493 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", | 567 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", |
| 494 | fsp, seq_blen, lp->lso_max, t_blen); | 568 | fsp, seq_blen, lport->lso_max, t_blen); |
| 495 | } | 569 | } |
| 496 | 570 | ||
| 497 | WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); | 571 | WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); |
| @@ -503,7 +577,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 503 | remaining = seq_blen; | 577 | remaining = seq_blen; |
| 504 | fh_parm_offset = frame_offset = offset; | 578 | fh_parm_offset = frame_offset = offset; |
| 505 | tlen = 0; | 579 | tlen = 0; |
| 506 | seq = lp->tt.seq_start_next(seq); | 580 | seq = lport->tt.seq_start_next(seq); |
| 507 | f_ctl = FC_FC_REL_OFF; | 581 | f_ctl = FC_FC_REL_OFF; |
| 508 | WARN_ON(!seq); | 582 | WARN_ON(!seq); |
| 509 | 583 | ||
| @@ -525,43 +599,34 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 525 | */ | 599 | */ |
| 526 | if (tlen % 4) | 600 | if (tlen % 4) |
| 527 | using_sg = 0; | 601 | using_sg = 0; |
| 528 | if (using_sg) { | 602 | fp = fc_frame_alloc(lport, using_sg ? 0 : tlen); |
| 529 | fp = _fc_frame_alloc(lp, 0); | 603 | if (!fp) |
| 530 | if (!fp) | 604 | return -ENOMEM; |
| 531 | return -ENOMEM; | ||
| 532 | } else { | ||
| 533 | fp = fc_frame_alloc(lp, tlen); | ||
| 534 | if (!fp) | ||
| 535 | return -ENOMEM; | ||
| 536 | 605 | ||
| 537 | data = (void *)(fr_hdr(fp)) + | 606 | data = fc_frame_header_get(fp) + 1; |
| 538 | sizeof(struct fc_frame_header); | ||
| 539 | } | ||
| 540 | fh_parm_offset = frame_offset; | 607 | fh_parm_offset = frame_offset; |
| 541 | fr_max_payload(fp) = fsp->max_payload; | 608 | fr_max_payload(fp) = fsp->max_payload; |
| 542 | } | 609 | } |
| 610 | |||
| 611 | off = offset + sg->offset; | ||
| 543 | sg_bytes = min(tlen, sg->length - offset); | 612 | sg_bytes = min(tlen, sg->length - offset); |
| 613 | sg_bytes = min(sg_bytes, | ||
| 614 | (size_t) (PAGE_SIZE - (off & ~PAGE_MASK))); | ||
| 615 | page = sg_page(sg) + (off >> PAGE_SHIFT); | ||
| 544 | if (using_sg) { | 616 | if (using_sg) { |
| 545 | get_page(sg_page(sg)); | 617 | get_page(page); |
| 546 | skb_fill_page_desc(fp_skb(fp), | 618 | skb_fill_page_desc(fp_skb(fp), |
| 547 | skb_shinfo(fp_skb(fp))->nr_frags, | 619 | skb_shinfo(fp_skb(fp))->nr_frags, |
| 548 | sg_page(sg), sg->offset + offset, | 620 | page, off & ~PAGE_MASK, sg_bytes); |
| 549 | sg_bytes); | ||
| 550 | fp_skb(fp)->data_len += sg_bytes; | 621 | fp_skb(fp)->data_len += sg_bytes; |
| 551 | fr_len(fp) += sg_bytes; | 622 | fr_len(fp) += sg_bytes; |
| 552 | fp_skb(fp)->truesize += PAGE_SIZE; | 623 | fp_skb(fp)->truesize += PAGE_SIZE; |
| 553 | } else { | 624 | } else { |
| 554 | size_t off = offset + sg->offset; | ||
| 555 | |||
| 556 | /* | 625 | /* |
| 557 | * The scatterlist item may be bigger than PAGE_SIZE, | 626 | * The scatterlist item may be bigger than PAGE_SIZE, |
| 558 | * but we must not cross pages inside the kmap. | 627 | * but we must not cross pages inside the kmap. |
| 559 | */ | 628 | */ |
| 560 | sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - | 629 | page_addr = kmap_atomic(page, KM_SOFTIRQ0); |
| 561 | (off & ~PAGE_MASK))); | ||
| 562 | page_addr = kmap_atomic(sg_page(sg) + | ||
| 563 | (off >> PAGE_SHIFT), | ||
| 564 | KM_SOFTIRQ0); | ||
| 565 | memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), | 630 | memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), |
| 566 | sg_bytes); | 631 | sg_bytes); |
| 567 | kunmap_atomic(page_addr, KM_SOFTIRQ0); | 632 | kunmap_atomic(page_addr, KM_SOFTIRQ0); |
| @@ -572,7 +637,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 572 | tlen -= sg_bytes; | 637 | tlen -= sg_bytes; |
| 573 | remaining -= sg_bytes; | 638 | remaining -= sg_bytes; |
| 574 | 639 | ||
| 575 | if (tlen) | 640 | if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && |
| 641 | (tlen)) | ||
| 576 | continue; | 642 | continue; |
| 577 | 643 | ||
| 578 | /* | 644 | /* |
| @@ -589,7 +655,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 589 | /* | 655 | /* |
| 590 | * send fragment using for a sequence. | 656 | * send fragment using for a sequence. |
| 591 | */ | 657 | */ |
| 592 | error = lp->tt.seq_send(lp, seq, fp); | 658 | error = lport->tt.seq_send(lport, seq, fp); |
| 593 | if (error) { | 659 | if (error) { |
| 594 | WARN_ON(1); /* send error should be rare */ | 660 | WARN_ON(1); /* send error should be rare */ |
| 595 | fc_fcp_retry_cmd(fsp); | 661 | fc_fcp_retry_cmd(fsp); |
| @@ -601,6 +667,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
| 601 | return 0; | 667 | return 0; |
| 602 | } | 668 | } |
| 603 | 669 | ||
| 670 | /** | ||
| 671 | * fc_fcp_abts_resp() - Send an ABTS response | ||
| 672 | * @fsp: The FCP packet that is being aborted | ||
| 673 | * @fp: The response frame | ||
| 674 | */ | ||
| 604 | static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 675 | static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 605 | { | 676 | { |
| 606 | int ba_done = 1; | 677 | int ba_done = 1; |
| @@ -637,46 +708,13 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
| 637 | } | 708 | } |
| 638 | 709 | ||
| 639 | /** | 710 | /** |
| 640 | * fc_fcp_reduce_can_queue() - drop can_queue | 711 | * fc_fcp_recv() - Reveive an FCP frame |
| 641 | * @lp: lport to drop queueing for | ||
| 642 | * | ||
| 643 | * If we are getting memory allocation failures, then we may | ||
| 644 | * be trying to execute too many commands. We let the running | ||
| 645 | * commands complete or timeout, then try again with a reduced | ||
| 646 | * can_queue. Eventually we will hit the point where we run | ||
| 647 | * on all reserved structs. | ||
| 648 | */ | ||
| 649 | static void fc_fcp_reduce_can_queue(struct fc_lport *lp) | ||
| 650 | { | ||
| 651 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | ||
| 652 | unsigned long flags; | ||
| 653 | int can_queue; | ||
| 654 | |||
| 655 | spin_lock_irqsave(lp->host->host_lock, flags); | ||
| 656 | if (si->throttled) | ||
| 657 | goto done; | ||
| 658 | si->throttled = 1; | ||
| 659 | |||
| 660 | can_queue = lp->host->can_queue; | ||
| 661 | can_queue >>= 1; | ||
| 662 | if (!can_queue) | ||
| 663 | can_queue = 1; | ||
| 664 | lp->host->can_queue = can_queue; | ||
| 665 | shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" | ||
| 666 | "Reducing can_queue to %d.\n", can_queue); | ||
| 667 | done: | ||
| 668 | spin_unlock_irqrestore(lp->host->host_lock, flags); | ||
| 669 | } | ||
| 670 | |||
| 671 | /** | ||
| 672 | * fc_fcp_recv() - Reveive FCP frames | ||
| 673 | * @seq: The sequence the frame is on | 712 | * @seq: The sequence the frame is on |
| 674 | * @fp: The FC frame | 713 | * @fp: The received frame |
| 675 | * @arg: The related FCP packet | 714 | * @arg: The related FCP packet |
| 676 | * | 715 | * |
| 677 | * Return : None | 716 | * Context: Called from Soft IRQ context. Can not be called |
| 678 | * Context : called from Soft IRQ context | 717 | * holding the FCP packet list lock. |
| 679 | * can not called holding list lock | ||
| 680 | */ | 718 | */ |
| 681 | static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 719 | static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
| 682 | { | 720 | { |
| @@ -687,8 +725,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 687 | u8 r_ctl; | 725 | u8 r_ctl; |
| 688 | int rc = 0; | 726 | int rc = 0; |
| 689 | 727 | ||
| 690 | if (IS_ERR(fp)) | 728 | if (IS_ERR(fp)) { |
| 691 | goto errout; | 729 | fc_fcp_error(fsp, fp); |
| 730 | return; | ||
| 731 | } | ||
| 692 | 732 | ||
| 693 | fh = fc_frame_header_get(fp); | 733 | fh = fc_frame_header_get(fp); |
| 694 | r_ctl = fh->fh_r_ctl; | 734 | r_ctl = fh->fh_r_ctl; |
| @@ -721,8 +761,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 721 | (size_t) ntohl(dd->ft_burst_len)); | 761 | (size_t) ntohl(dd->ft_burst_len)); |
| 722 | if (!rc) | 762 | if (!rc) |
| 723 | seq->rec_data = fsp->xfer_len; | 763 | seq->rec_data = fsp->xfer_len; |
| 724 | else if (rc == -ENOMEM) | ||
| 725 | fsp->state |= FC_SRB_NOMEM; | ||
| 726 | } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { | 764 | } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { |
| 727 | /* | 765 | /* |
| 728 | * received a DATA frame | 766 | * received a DATA frame |
| @@ -742,13 +780,13 @@ unlock: | |||
| 742 | fc_fcp_unlock_pkt(fsp); | 780 | fc_fcp_unlock_pkt(fsp); |
| 743 | out: | 781 | out: |
| 744 | fc_frame_free(fp); | 782 | fc_frame_free(fp); |
| 745 | errout: | ||
| 746 | if (IS_ERR(fp)) | ||
| 747 | fc_fcp_error(fsp, fp); | ||
| 748 | else if (rc == -ENOMEM) | ||
| 749 | fc_fcp_reduce_can_queue(lport); | ||
| 750 | } | 783 | } |
| 751 | 784 | ||
| 785 | /** | ||
| 786 | * fc_fcp_resp() - Handler for FCP responses | ||
| 787 | * @fsp: The FCP packet the response is for | ||
| 788 | * @fp: The response frame | ||
| 789 | */ | ||
| 752 | static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 790 | static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 753 | { | 791 | { |
| 754 | struct fc_frame_header *fh; | 792 | struct fc_frame_header *fh; |
| @@ -862,15 +900,16 @@ err: | |||
| 862 | } | 900 | } |
| 863 | 901 | ||
| 864 | /** | 902 | /** |
| 865 | * fc_fcp_complete_locked() - complete processing of a fcp packet | 903 | * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the |
| 866 | * @fsp: fcp packet | 904 | * fcp_pkt lock held |
| 905 | * @fsp: The FCP packet to be completed | ||
| 867 | * | 906 | * |
| 868 | * This function may sleep if a timer is pending. The packet lock must be | 907 | * This function may sleep if a timer is pending. The packet lock must be |
| 869 | * held, and the host lock must not be held. | 908 | * held, and the host lock must not be held. |
| 870 | */ | 909 | */ |
| 871 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | 910 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) |
| 872 | { | 911 | { |
| 873 | struct fc_lport *lp = fsp->lp; | 912 | struct fc_lport *lport = fsp->lp; |
| 874 | struct fc_seq *seq; | 913 | struct fc_seq *seq; |
| 875 | struct fc_exch *ep; | 914 | struct fc_exch *ep; |
| 876 | u32 f_ctl; | 915 | u32 f_ctl; |
| @@ -901,8 +940,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | |||
| 901 | struct fc_frame *conf_frame; | 940 | struct fc_frame *conf_frame; |
| 902 | struct fc_seq *csp; | 941 | struct fc_seq *csp; |
| 903 | 942 | ||
| 904 | csp = lp->tt.seq_start_next(seq); | 943 | csp = lport->tt.seq_start_next(seq); |
| 905 | conf_frame = fc_frame_alloc(fsp->lp, 0); | 944 | conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); |
| 906 | if (conf_frame) { | 945 | if (conf_frame) { |
| 907 | f_ctl = FC_FC_SEQ_INIT; | 946 | f_ctl = FC_FC_SEQ_INIT; |
| 908 | f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; | 947 | f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; |
| @@ -910,43 +949,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | |||
| 910 | fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, | 949 | fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, |
| 911 | ep->did, ep->sid, | 950 | ep->did, ep->sid, |
| 912 | FC_TYPE_FCP, f_ctl, 0); | 951 | FC_TYPE_FCP, f_ctl, 0); |
| 913 | lp->tt.seq_send(lp, csp, conf_frame); | 952 | lport->tt.seq_send(lport, csp, conf_frame); |
| 914 | } | 953 | } |
| 915 | } | 954 | } |
| 916 | lp->tt.exch_done(seq); | 955 | lport->tt.exch_done(seq); |
| 917 | } | 956 | } |
| 918 | fc_io_compl(fsp); | 957 | fc_io_compl(fsp); |
| 919 | } | 958 | } |
| 920 | 959 | ||
| 960 | /** | ||
| 961 | * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt | ||
| 962 | * @fsp: The FCP packet whose exchanges should be canceled | ||
| 963 | * @error: The reason for the cancellation | ||
| 964 | */ | ||
| 921 | static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) | 965 | static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) |
| 922 | { | 966 | { |
| 923 | struct fc_lport *lp = fsp->lp; | 967 | struct fc_lport *lport = fsp->lp; |
| 924 | 968 | ||
| 925 | if (fsp->seq_ptr) { | 969 | if (fsp->seq_ptr) { |
| 926 | lp->tt.exch_done(fsp->seq_ptr); | 970 | lport->tt.exch_done(fsp->seq_ptr); |
| 927 | fsp->seq_ptr = NULL; | 971 | fsp->seq_ptr = NULL; |
| 928 | } | 972 | } |
| 929 | fsp->status_code = error; | 973 | fsp->status_code = error; |
| 930 | } | 974 | } |
| 931 | 975 | ||
| 932 | /** | 976 | /** |
| 933 | * fc_fcp_cleanup_each_cmd() - Cleanup active commads | 977 | * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port |
| 934 | * @lp: logical port | 978 | * @lport: The local port whose exchanges should be canceled |
| 935 | * @id: target id | 979 | * @id: The target's ID |
| 936 | * @lun: lun | 980 | * @lun: The LUN |
| 937 | * @error: fsp status code | 981 | * @error: The reason for cancellation |
| 938 | * | 982 | * |
| 939 | * If lun or id is -1, they are ignored. | 983 | * If lun or id is -1, they are ignored. |
| 940 | */ | 984 | */ |
| 941 | static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, | 985 | static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, |
| 942 | unsigned int lun, int error) | 986 | unsigned int lun, int error) |
| 943 | { | 987 | { |
| 944 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 988 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
| 945 | struct fc_fcp_pkt *fsp; | 989 | struct fc_fcp_pkt *fsp; |
| 946 | struct scsi_cmnd *sc_cmd; | 990 | struct scsi_cmnd *sc_cmd; |
| 947 | unsigned long flags; | 991 | unsigned long flags; |
| 948 | 992 | ||
| 949 | spin_lock_irqsave(lp->host->host_lock, flags); | 993 | spin_lock_irqsave(lport->host->host_lock, flags); |
| 950 | restart: | 994 | restart: |
| 951 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { | 995 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { |
| 952 | sc_cmd = fsp->cmd; | 996 | sc_cmd = fsp->cmd; |
| @@ -957,7 +1001,7 @@ restart: | |||
| 957 | continue; | 1001 | continue; |
| 958 | 1002 | ||
| 959 | fc_fcp_pkt_hold(fsp); | 1003 | fc_fcp_pkt_hold(fsp); |
| 960 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1004 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 961 | 1005 | ||
| 962 | if (!fc_fcp_lock_pkt(fsp)) { | 1006 | if (!fc_fcp_lock_pkt(fsp)) { |
| 963 | fc_fcp_cleanup_cmd(fsp, error); | 1007 | fc_fcp_cleanup_cmd(fsp, error); |
| @@ -966,35 +1010,36 @@ restart: | |||
| 966 | } | 1010 | } |
| 967 | 1011 | ||
| 968 | fc_fcp_pkt_release(fsp); | 1012 | fc_fcp_pkt_release(fsp); |
| 969 | spin_lock_irqsave(lp->host->host_lock, flags); | 1013 | spin_lock_irqsave(lport->host->host_lock, flags); |
| 970 | /* | 1014 | /* |
| 971 | * while we dropped the lock multiple pkts could | 1015 | * while we dropped the lock multiple pkts could |
| 972 | * have been released, so we have to start over. | 1016 | * have been released, so we have to start over. |
| 973 | */ | 1017 | */ |
| 974 | goto restart; | 1018 | goto restart; |
| 975 | } | 1019 | } |
| 976 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1020 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 977 | } | 1021 | } |
| 978 | 1022 | ||
| 979 | static void fc_fcp_abort_io(struct fc_lport *lp) | 1023 | /** |
| 1024 | * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port | ||
| 1025 | * @lport: The local port whose exchanges are to be aborted | ||
| 1026 | */ | ||
| 1027 | static void fc_fcp_abort_io(struct fc_lport *lport) | ||
| 980 | { | 1028 | { |
| 981 | fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); | 1029 | fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); |
| 982 | } | 1030 | } |
| 983 | 1031 | ||
| 984 | /** | 1032 | /** |
| 985 | * fc_fcp_pkt_send() - send a fcp packet to the lower level. | 1033 | * fc_fcp_pkt_send() - Send a fcp_pkt |
| 986 | * @lp: fc lport | 1034 | * @lport: The local port to send the FCP packet on |
| 987 | * @fsp: fc packet. | 1035 | * @fsp: The FCP packet to send |
| 988 | * | 1036 | * |
| 989 | * This is called by upper layer protocol. | 1037 | * Return: Zero for success and -1 for failure |
| 990 | * Return : zero for success and -1 for failure | 1038 | * Locks: Called with the host lock and irqs disabled. |
| 991 | * Context : called from queuecommand which can be called from process | ||
| 992 | * or scsi soft irq. | ||
| 993 | * Locks : called with the host lock and irqs disabled. | ||
| 994 | */ | 1039 | */ |
| 995 | static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | 1040 | static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) |
| 996 | { | 1041 | { |
| 997 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 1042 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
| 998 | int rc; | 1043 | int rc; |
| 999 | 1044 | ||
| 1000 | fsp->cmd->SCp.ptr = (char *)fsp; | 1045 | fsp->cmd->SCp.ptr = (char *)fsp; |
| @@ -1006,16 +1051,22 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | |||
| 1006 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); | 1051 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); |
| 1007 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); | 1052 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); |
| 1008 | 1053 | ||
| 1009 | spin_unlock_irq(lp->host->host_lock); | 1054 | spin_unlock_irq(lport->host->host_lock); |
| 1010 | rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); | 1055 | rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); |
| 1011 | spin_lock_irq(lp->host->host_lock); | 1056 | spin_lock_irq(lport->host->host_lock); |
| 1012 | if (rc) | 1057 | if (rc) |
| 1013 | list_del(&fsp->list); | 1058 | list_del(&fsp->list); |
| 1014 | 1059 | ||
| 1015 | return rc; | 1060 | return rc; |
| 1016 | } | 1061 | } |
| 1017 | 1062 | ||
| 1018 | static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | 1063 | /** |
| 1064 | * fc_fcp_cmd_send() - Send a FCP command | ||
| 1065 | * @lport: The local port to send the command on | ||
| 1066 | * @fsp: The FCP packet the command is on | ||
| 1067 | * @resp: The handler for the response | ||
| 1068 | */ | ||
| 1069 | static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, | ||
| 1019 | void (*resp)(struct fc_seq *, | 1070 | void (*resp)(struct fc_seq *, |
| 1020 | struct fc_frame *fp, | 1071 | struct fc_frame *fp, |
| 1021 | void *arg)) | 1072 | void *arg)) |
| @@ -1023,14 +1074,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
| 1023 | struct fc_frame *fp; | 1074 | struct fc_frame *fp; |
| 1024 | struct fc_seq *seq; | 1075 | struct fc_seq *seq; |
| 1025 | struct fc_rport *rport; | 1076 | struct fc_rport *rport; |
| 1026 | struct fc_rport_libfc_priv *rp; | 1077 | struct fc_rport_libfc_priv *rpriv; |
| 1027 | const size_t len = sizeof(fsp->cdb_cmd); | 1078 | const size_t len = sizeof(fsp->cdb_cmd); |
| 1028 | int rc = 0; | 1079 | int rc = 0; |
| 1029 | 1080 | ||
| 1030 | if (fc_fcp_lock_pkt(fsp)) | 1081 | if (fc_fcp_lock_pkt(fsp)) |
| 1031 | return 0; | 1082 | return 0; |
| 1032 | 1083 | ||
| 1033 | fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); | 1084 | fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd)); |
| 1034 | if (!fp) { | 1085 | if (!fp) { |
| 1035 | rc = -1; | 1086 | rc = -1; |
| 1036 | goto unlock; | 1087 | goto unlock; |
| @@ -1040,15 +1091,15 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
| 1040 | fr_fsp(fp) = fsp; | 1091 | fr_fsp(fp) = fsp; |
| 1041 | rport = fsp->rport; | 1092 | rport = fsp->rport; |
| 1042 | fsp->max_payload = rport->maxframe_size; | 1093 | fsp->max_payload = rport->maxframe_size; |
| 1043 | rp = rport->dd_data; | 1094 | rpriv = rport->dd_data; |
| 1044 | 1095 | ||
| 1045 | fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, | 1096 | fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, |
| 1046 | fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, | 1097 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, |
| 1047 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1098 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
| 1048 | 1099 | ||
| 1049 | seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); | 1100 | seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, |
| 1101 | fsp, 0); | ||
| 1050 | if (!seq) { | 1102 | if (!seq) { |
| 1051 | fc_frame_free(fp); | ||
| 1052 | rc = -1; | 1103 | rc = -1; |
| 1053 | goto unlock; | 1104 | goto unlock; |
| 1054 | } | 1105 | } |
| @@ -1065,8 +1116,10 @@ unlock: | |||
| 1065 | return rc; | 1116 | return rc; |
| 1066 | } | 1117 | } |
| 1067 | 1118 | ||
| 1068 | /* | 1119 | /** |
| 1069 | * transport error handler | 1120 | * fc_fcp_error() - Handler for FCP layer errors |
| 1121 | * @fsp: The FCP packet the error is on | ||
| 1122 | * @fp: The frame that has errored | ||
| 1070 | */ | 1123 | */ |
| 1071 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1124 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 1072 | { | 1125 | { |
| @@ -1091,11 +1144,13 @@ unlock: | |||
| 1091 | fc_fcp_unlock_pkt(fsp); | 1144 | fc_fcp_unlock_pkt(fsp); |
| 1092 | } | 1145 | } |
| 1093 | 1146 | ||
| 1094 | /* | 1147 | /** |
| 1095 | * Scsi abort handler- calls to send an abort | 1148 | * fc_fcp_pkt_abort() - Abort a fcp_pkt |
| 1096 | * and then wait for abort completion | 1149 | * @fsp: The FCP packet to abort on |
| 1150 | * | ||
| 1151 | * Called to send an abort and then wait for abort completion | ||
| 1097 | */ | 1152 | */ |
| 1098 | static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | 1153 | static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) |
| 1099 | { | 1154 | { |
| 1100 | int rc = FAILED; | 1155 | int rc = FAILED; |
| 1101 | 1156 | ||
| @@ -1122,14 +1177,15 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | |||
| 1122 | return rc; | 1177 | return rc; |
| 1123 | } | 1178 | } |
| 1124 | 1179 | ||
| 1125 | /* | 1180 | /** |
| 1126 | * Retry LUN reset after resource allocation failed. | 1181 | * fc_lun_reset_send() - Send LUN reset command |
| 1182 | * @data: The FCP packet that identifies the LUN to be reset | ||
| 1127 | */ | 1183 | */ |
| 1128 | static void fc_lun_reset_send(unsigned long data) | 1184 | static void fc_lun_reset_send(unsigned long data) |
| 1129 | { | 1185 | { |
| 1130 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; | 1186 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; |
| 1131 | struct fc_lport *lp = fsp->lp; | 1187 | struct fc_lport *lport = fsp->lp; |
| 1132 | if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { | 1188 | if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { |
| 1133 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) | 1189 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) |
| 1134 | return; | 1190 | return; |
| 1135 | if (fc_fcp_lock_pkt(fsp)) | 1191 | if (fc_fcp_lock_pkt(fsp)) |
| @@ -1140,11 +1196,15 @@ static void fc_lun_reset_send(unsigned long data) | |||
| 1140 | } | 1196 | } |
| 1141 | } | 1197 | } |
| 1142 | 1198 | ||
| 1143 | /* | 1199 | /** |
| 1144 | * Scsi device reset handler- send a LUN RESET to the device | 1200 | * fc_lun_reset() - Send a LUN RESET command to a device |
| 1145 | * and wait for reset reply | 1201 | * and wait for the reply |
| 1202 | * @lport: The local port to sent the comand on | ||
| 1203 | * @fsp: The FCP packet that identifies the LUN to be reset | ||
| 1204 | * @id: The SCSI command ID | ||
| 1205 | * @lun: The LUN ID to be reset | ||
| 1146 | */ | 1206 | */ |
| 1147 | static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | 1207 | static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, |
| 1148 | unsigned int id, unsigned int lun) | 1208 | unsigned int id, unsigned int lun) |
| 1149 | { | 1209 | { |
| 1150 | int rc; | 1210 | int rc; |
| @@ -1172,14 +1232,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
| 1172 | 1232 | ||
| 1173 | spin_lock_bh(&fsp->scsi_pkt_lock); | 1233 | spin_lock_bh(&fsp->scsi_pkt_lock); |
| 1174 | if (fsp->seq_ptr) { | 1234 | if (fsp->seq_ptr) { |
| 1175 | lp->tt.exch_done(fsp->seq_ptr); | 1235 | lport->tt.exch_done(fsp->seq_ptr); |
| 1176 | fsp->seq_ptr = NULL; | 1236 | fsp->seq_ptr = NULL; |
| 1177 | } | 1237 | } |
| 1178 | fsp->wait_for_comp = 0; | 1238 | fsp->wait_for_comp = 0; |
| 1179 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 1239 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
| 1180 | 1240 | ||
| 1181 | if (!rc) { | 1241 | if (!rc) { |
| 1182 | FC_SCSI_DBG(lp, "lun reset failed\n"); | 1242 | FC_SCSI_DBG(lport, "lun reset failed\n"); |
| 1183 | return FAILED; | 1243 | return FAILED; |
| 1184 | } | 1244 | } |
| 1185 | 1245 | ||
| @@ -1187,13 +1247,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
| 1187 | if (fsp->cdb_status != FCP_TMF_CMPL) | 1247 | if (fsp->cdb_status != FCP_TMF_CMPL) |
| 1188 | return FAILED; | 1248 | return FAILED; |
| 1189 | 1249 | ||
| 1190 | FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); | 1250 | FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); |
| 1191 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); | 1251 | fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); |
| 1192 | return SUCCESS; | 1252 | return SUCCESS; |
| 1193 | } | 1253 | } |
| 1194 | 1254 | ||
| 1195 | /* | 1255 | /** |
| 1196 | * Task Managment response handler | 1256 | * fc_tm_done() - Task Managment response handler |
| 1257 | * @seq: The sequence that the response is on | ||
| 1258 | * @fp: The response frame | ||
| 1259 | * @arg: The FCP packet the response is for | ||
| 1197 | */ | 1260 | */ |
| 1198 | static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1261 | static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
| 1199 | { | 1262 | { |
| @@ -1230,34 +1293,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 1230 | fc_fcp_unlock_pkt(fsp); | 1293 | fc_fcp_unlock_pkt(fsp); |
| 1231 | } | 1294 | } |
| 1232 | 1295 | ||
| 1233 | static void fc_fcp_cleanup(struct fc_lport *lp) | 1296 | /** |
| 1297 | * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port | ||
| 1298 | * @lport: The local port to be cleaned up | ||
| 1299 | */ | ||
| 1300 | static void fc_fcp_cleanup(struct fc_lport *lport) | ||
| 1234 | { | 1301 | { |
| 1235 | fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); | 1302 | fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); |
| 1236 | } | 1303 | } |
| 1237 | 1304 | ||
| 1238 | /* | 1305 | /** |
| 1239 | * fc_fcp_timeout: called by OS timer function. | 1306 | * fc_fcp_timeout() - Handler for fcp_pkt timeouts |
| 1240 | * | 1307 | * @data: The FCP packet that has timed out |
| 1241 | * The timer has been inactivated and must be reactivated if desired | ||
| 1242 | * using fc_fcp_timer_set(). | ||
| 1243 | * | ||
| 1244 | * Algorithm: | ||
| 1245 | * | ||
| 1246 | * If REC is supported, just issue it, and return. The REC exchange will | ||
| 1247 | * complete or time out, and recovery can continue at that point. | ||
| 1248 | * | ||
| 1249 | * Otherwise, if the response has been received without all the data, | ||
| 1250 | * it has been ER_TIMEOUT since the response was received. | ||
| 1251 | * | 1308 | * |
| 1252 | * If the response has not been received, | 1309 | * If REC is supported then just issue it and return. The REC exchange will |
| 1253 | * we see if data was received recently. If it has been, we continue waiting, | 1310 | * complete or time out and recovery can continue at that point. Otherwise, |
| 1254 | * otherwise, we abort the command. | 1311 | * if the response has been received without all the data it has been |
| 1312 | * ER_TIMEOUT since the response was received. If the response has not been | ||
| 1313 | * received we see if data was received recently. If it has been then we | ||
| 1314 | * continue waiting, otherwise, we abort the command. | ||
| 1255 | */ | 1315 | */ |
| 1256 | static void fc_fcp_timeout(unsigned long data) | 1316 | static void fc_fcp_timeout(unsigned long data) |
| 1257 | { | 1317 | { |
| 1258 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; | 1318 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; |
| 1259 | struct fc_rport *rport = fsp->rport; | 1319 | struct fc_rport *rport = fsp->rport; |
| 1260 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 1320 | struct fc_rport_libfc_priv *rpriv = rport->dd_data; |
| 1261 | 1321 | ||
| 1262 | if (fc_fcp_lock_pkt(fsp)) | 1322 | if (fc_fcp_lock_pkt(fsp)) |
| 1263 | return; | 1323 | return; |
| @@ -1267,7 +1327,7 @@ static void fc_fcp_timeout(unsigned long data) | |||
| 1267 | 1327 | ||
| 1268 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; | 1328 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; |
| 1269 | 1329 | ||
| 1270 | if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) | 1330 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) |
| 1271 | fc_fcp_rec(fsp); | 1331 | fc_fcp_rec(fsp); |
| 1272 | else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), | 1332 | else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), |
| 1273 | jiffies)) | 1333 | jiffies)) |
| @@ -1281,39 +1341,40 @@ unlock: | |||
| 1281 | fc_fcp_unlock_pkt(fsp); | 1341 | fc_fcp_unlock_pkt(fsp); |
| 1282 | } | 1342 | } |
| 1283 | 1343 | ||
| 1284 | /* | 1344 | /** |
| 1285 | * Send a REC ELS request | 1345 | * fc_fcp_rec() - Send a REC ELS request |
| 1346 | * @fsp: The FCP packet to send the REC request on | ||
| 1286 | */ | 1347 | */ |
| 1287 | static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | 1348 | static void fc_fcp_rec(struct fc_fcp_pkt *fsp) |
| 1288 | { | 1349 | { |
| 1289 | struct fc_lport *lp; | 1350 | struct fc_lport *lport; |
| 1290 | struct fc_frame *fp; | 1351 | struct fc_frame *fp; |
| 1291 | struct fc_rport *rport; | 1352 | struct fc_rport *rport; |
| 1292 | struct fc_rport_libfc_priv *rp; | 1353 | struct fc_rport_libfc_priv *rpriv; |
| 1293 | 1354 | ||
| 1294 | lp = fsp->lp; | 1355 | lport = fsp->lp; |
| 1295 | rport = fsp->rport; | 1356 | rport = fsp->rport; |
| 1296 | rp = rport->dd_data; | 1357 | rpriv = rport->dd_data; |
| 1297 | if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { | 1358 | if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { |
| 1298 | fsp->status_code = FC_HRD_ERROR; | 1359 | fsp->status_code = FC_HRD_ERROR; |
| 1299 | fsp->io_status = 0; | 1360 | fsp->io_status = 0; |
| 1300 | fc_fcp_complete_locked(fsp); | 1361 | fc_fcp_complete_locked(fsp); |
| 1301 | return; | 1362 | return; |
| 1302 | } | 1363 | } |
| 1303 | fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); | 1364 | fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); |
| 1304 | if (!fp) | 1365 | if (!fp) |
| 1305 | goto retry; | 1366 | goto retry; |
| 1306 | 1367 | ||
| 1307 | fr_seq(fp) = fsp->seq_ptr; | 1368 | fr_seq(fp) = fsp->seq_ptr; |
| 1308 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, | 1369 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, |
| 1309 | fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, | 1370 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS, |
| 1310 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1371 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
| 1311 | if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, | 1372 | if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, |
| 1312 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { | 1373 | fc_fcp_rec_resp, fsp, |
| 1374 | jiffies_to_msecs(FC_SCSI_REC_TOV))) { | ||
| 1313 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ | 1375 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ |
| 1314 | return; | 1376 | return; |
| 1315 | } | 1377 | } |
| 1316 | fc_frame_free(fp); | ||
| 1317 | retry: | 1378 | retry: |
| 1318 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1379 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
| 1319 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | 1380 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); |
| @@ -1321,12 +1382,16 @@ retry: | |||
| 1321 | fc_timeout_error(fsp); | 1382 | fc_timeout_error(fsp); |
| 1322 | } | 1383 | } |
| 1323 | 1384 | ||
| 1324 | /* | 1385 | /** |
| 1325 | * Receive handler for REC ELS frame | 1386 | * fc_fcp_rec_resp() - Handler for REC ELS responses |
| 1326 | * if it is a reject then let the scsi layer to handle | 1387 | * @seq: The sequence the response is on |
| 1327 | * the timeout. if it is a LS_ACC then if the io was not completed | 1388 | * @fp: The response frame |
| 1328 | * then set the timeout and return otherwise complete the exchange | 1389 | * @arg: The FCP packet the response is on |
| 1329 | * and tell the scsi layer to restart the I/O. | 1390 | * |
| 1391 | * If the response is a reject then the scsi layer will handle | ||
| 1392 | * the timeout. If the response is a LS_ACC then if the I/O was not completed | ||
| 1393 | * set the timeout and return. If the I/O was completed then complete the | ||
| 1394 | * exchange and tell the SCSI layer. | ||
| 1330 | */ | 1395 | */ |
| 1331 | static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1396 | static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
| 1332 | { | 1397 | { |
| @@ -1338,7 +1403,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 1338 | u32 offset; | 1403 | u32 offset; |
| 1339 | enum dma_data_direction data_dir; | 1404 | enum dma_data_direction data_dir; |
| 1340 | enum fc_rctl r_ctl; | 1405 | enum fc_rctl r_ctl; |
| 1341 | struct fc_rport_libfc_priv *rp; | 1406 | struct fc_rport_libfc_priv *rpriv; |
| 1342 | 1407 | ||
| 1343 | if (IS_ERR(fp)) { | 1408 | if (IS_ERR(fp)) { |
| 1344 | fc_fcp_rec_error(fsp, fp); | 1409 | fc_fcp_rec_error(fsp, fp); |
| @@ -1361,13 +1426,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 1361 | /* fall through */ | 1426 | /* fall through */ |
| 1362 | case ELS_RJT_UNSUP: | 1427 | case ELS_RJT_UNSUP: |
| 1363 | FC_FCP_DBG(fsp, "device does not support REC\n"); | 1428 | FC_FCP_DBG(fsp, "device does not support REC\n"); |
| 1364 | rp = fsp->rport->dd_data; | 1429 | rpriv = fsp->rport->dd_data; |
| 1365 | /* | 1430 | /* |
| 1366 | * if we do not spport RECs or got some bogus | 1431 | * if we do not spport RECs or got some bogus |
| 1367 | * reason then resetup timer so we check for | 1432 | * reason then resetup timer so we check for |
| 1368 | * making progress. | 1433 | * making progress. |
| 1369 | */ | 1434 | */ |
| 1370 | rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; | 1435 | rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; |
| 1371 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); | 1436 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); |
| 1372 | break; | 1437 | break; |
| 1373 | case ELS_RJT_LOGIC: | 1438 | case ELS_RJT_LOGIC: |
| @@ -1464,8 +1529,10 @@ out: | |||
| 1464 | fc_frame_free(fp); | 1529 | fc_frame_free(fp); |
| 1465 | } | 1530 | } |
| 1466 | 1531 | ||
| 1467 | /* | 1532 | /** |
| 1468 | * Handle error response or timeout for REC exchange. | 1533 | * fc_fcp_rec_error() - Handler for REC errors |
| 1534 | * @fsp: The FCP packet the error is on | ||
| 1535 | * @fp: The REC frame | ||
| 1469 | */ | 1536 | */ |
| 1470 | static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1537 | static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 1471 | { | 1538 | { |
| @@ -1504,10 +1571,9 @@ out: | |||
| 1504 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ | 1571 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ |
| 1505 | } | 1572 | } |
| 1506 | 1573 | ||
| 1507 | /* | 1574 | /** |
| 1508 | * Time out error routine: | 1575 | * fc_timeout_error() - Handler for fcp_pkt timeouts |
| 1509 | * abort's the I/O close the exchange and | 1576 | * @fsp: The FCP packt that has timed out |
| 1510 | * send completion notification to scsi layer | ||
| 1511 | */ | 1577 | */ |
| 1512 | static void fc_timeout_error(struct fc_fcp_pkt *fsp) | 1578 | static void fc_timeout_error(struct fc_fcp_pkt *fsp) |
| 1513 | { | 1579 | { |
| @@ -1521,16 +1587,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp) | |||
| 1521 | fc_fcp_send_abort(fsp); | 1587 | fc_fcp_send_abort(fsp); |
| 1522 | } | 1588 | } |
| 1523 | 1589 | ||
| 1524 | /* | 1590 | /** |
| 1525 | * Sequence retransmission request. | 1591 | * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) |
| 1592 | * @fsp: The FCP packet the SRR is to be sent on | ||
| 1593 | * @r_ctl: The R_CTL field for the SRR request | ||
| 1526 | * This is called after receiving status but insufficient data, or | 1594 | * This is called after receiving status but insufficient data, or |
| 1527 | * when expecting status but the request has timed out. | 1595 | * when expecting status but the request has timed out. |
| 1528 | */ | 1596 | */ |
| 1529 | static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | 1597 | static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) |
| 1530 | { | 1598 | { |
| 1531 | struct fc_lport *lp = fsp->lp; | 1599 | struct fc_lport *lport = fsp->lp; |
| 1532 | struct fc_rport *rport; | 1600 | struct fc_rport *rport; |
| 1533 | struct fc_rport_libfc_priv *rp; | 1601 | struct fc_rport_libfc_priv *rpriv; |
| 1534 | struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); | 1602 | struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); |
| 1535 | struct fc_seq *seq; | 1603 | struct fc_seq *seq; |
| 1536 | struct fcp_srr *srr; | 1604 | struct fcp_srr *srr; |
| @@ -1538,12 +1606,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
| 1538 | u8 cdb_op; | 1606 | u8 cdb_op; |
| 1539 | 1607 | ||
| 1540 | rport = fsp->rport; | 1608 | rport = fsp->rport; |
| 1541 | rp = rport->dd_data; | 1609 | rpriv = rport->dd_data; |
| 1542 | cdb_op = fsp->cdb_cmd.fc_cdb[0]; | 1610 | cdb_op = fsp->cdb_cmd.fc_cdb[0]; |
| 1543 | 1611 | ||
| 1544 | if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) | 1612 | if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || |
| 1613 | rpriv->rp_state != RPORT_ST_READY) | ||
| 1545 | goto retry; /* shouldn't happen */ | 1614 | goto retry; /* shouldn't happen */ |
| 1546 | fp = fc_frame_alloc(lp, sizeof(*srr)); | 1615 | fp = fc_fcp_frame_alloc(lport, sizeof(*srr)); |
| 1547 | if (!fp) | 1616 | if (!fp) |
| 1548 | goto retry; | 1617 | goto retry; |
| 1549 | 1618 | ||
| @@ -1556,15 +1625,14 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
| 1556 | srr->srr_rel_off = htonl(offset); | 1625 | srr->srr_rel_off = htonl(offset); |
| 1557 | 1626 | ||
| 1558 | fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, | 1627 | fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, |
| 1559 | fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, | 1628 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, |
| 1560 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1629 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
| 1561 | 1630 | ||
| 1562 | seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, | 1631 | seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, |
| 1563 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); | 1632 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); |
| 1564 | if (!seq) { | 1633 | if (!seq) |
| 1565 | fc_frame_free(fp); | ||
| 1566 | goto retry; | 1634 | goto retry; |
| 1567 | } | 1635 | |
| 1568 | fsp->recov_seq = seq; | 1636 | fsp->recov_seq = seq; |
| 1569 | fsp->xfer_len = offset; | 1637 | fsp->xfer_len = offset; |
| 1570 | fsp->xfer_contig_end = offset; | 1638 | fsp->xfer_contig_end = offset; |
| @@ -1575,8 +1643,11 @@ retry: | |||
| 1575 | fc_fcp_retry_cmd(fsp); | 1643 | fc_fcp_retry_cmd(fsp); |
| 1576 | } | 1644 | } |
| 1577 | 1645 | ||
| 1578 | /* | 1646 | /** |
| 1579 | * Handle response from SRR. | 1647 | * fc_fcp_srr_resp() - Handler for SRR response |
| 1648 | * @seq: The sequence the SRR is on | ||
| 1649 | * @fp: The SRR frame | ||
| 1650 | * @arg: The FCP packet the SRR is on | ||
| 1580 | */ | 1651 | */ |
| 1581 | static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1652 | static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
| 1582 | { | 1653 | { |
| @@ -1622,6 +1693,11 @@ out: | |||
| 1622 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ | 1693 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ |
| 1623 | } | 1694 | } |
| 1624 | 1695 | ||
| 1696 | /** | ||
| 1697 | * fc_fcp_srr_error() - Handler for SRR errors | ||
| 1698 | * @fsp: The FCP packet that the SRR error is on | ||
| 1699 | * @fp: The SRR frame | ||
| 1700 | */ | ||
| 1625 | static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1701 | static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
| 1626 | { | 1702 | { |
| 1627 | if (fc_fcp_lock_pkt(fsp)) | 1703 | if (fc_fcp_lock_pkt(fsp)) |
| @@ -1646,31 +1722,36 @@ out: | |||
| 1646 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ | 1722 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ |
| 1647 | } | 1723 | } |
| 1648 | 1724 | ||
| 1649 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) | 1725 | /** |
| 1726 | * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready | ||
| 1727 | * @lport: The local port to be checked | ||
| 1728 | */ | ||
| 1729 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) | ||
| 1650 | { | 1730 | { |
| 1651 | /* lock ? */ | 1731 | /* lock ? */ |
| 1652 | return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; | 1732 | return (lport->state == LPORT_ST_READY) && |
| 1733 | lport->link_up && !lport->qfull; | ||
| 1653 | } | 1734 | } |
| 1654 | 1735 | ||
| 1655 | /** | 1736 | /** |
| 1656 | * fc_queuecommand - The queuecommand function of the scsi template | 1737 | * fc_queuecommand() - The queuecommand function of the SCSI template |
| 1657 | * @cmd: struct scsi_cmnd to be executed | 1738 | * @cmd: The scsi_cmnd to be executed |
| 1658 | * @done: Callback function to be called when cmd is completed | 1739 | * @done: The callback function to be called when the scsi_cmnd is complete |
| 1659 | * | 1740 | * |
| 1660 | * this is the i/o strategy routine, called by the scsi layer | 1741 | * This is the i/o strategy routine, called by the SCSI layer. This routine |
| 1661 | * this routine is called with holding the host_lock. | 1742 | * is called with the host_lock held. |
| 1662 | */ | 1743 | */ |
| 1663 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | 1744 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) |
| 1664 | { | 1745 | { |
| 1665 | struct fc_lport *lp; | 1746 | struct fc_lport *lport; |
| 1666 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 1747 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
| 1667 | struct fc_fcp_pkt *fsp; | 1748 | struct fc_fcp_pkt *fsp; |
| 1668 | struct fc_rport_libfc_priv *rp; | 1749 | struct fc_rport_libfc_priv *rpriv; |
| 1669 | int rval; | 1750 | int rval; |
| 1670 | int rc = 0; | 1751 | int rc = 0; |
| 1671 | struct fcoe_dev_stats *stats; | 1752 | struct fcoe_dev_stats *stats; |
| 1672 | 1753 | ||
| 1673 | lp = shost_priv(sc_cmd->device->host); | 1754 | lport = shost_priv(sc_cmd->device->host); |
| 1674 | 1755 | ||
| 1675 | rval = fc_remote_port_chkready(rport); | 1756 | rval = fc_remote_port_chkready(rport); |
| 1676 | if (rval) { | 1757 | if (rval) { |
| @@ -1689,14 +1770,16 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
| 1689 | goto out; | 1770 | goto out; |
| 1690 | } | 1771 | } |
| 1691 | 1772 | ||
| 1692 | rp = rport->dd_data; | 1773 | rpriv = rport->dd_data; |
| 1693 | 1774 | ||
| 1694 | if (!fc_fcp_lport_queue_ready(lp)) { | 1775 | if (!fc_fcp_lport_queue_ready(lport)) { |
| 1776 | if (lport->qfull) | ||
| 1777 | fc_fcp_can_queue_ramp_down(lport); | ||
| 1695 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1778 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 1696 | goto out; | 1779 | goto out; |
| 1697 | } | 1780 | } |
| 1698 | 1781 | ||
| 1699 | fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); | 1782 | fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); |
| 1700 | if (fsp == NULL) { | 1783 | if (fsp == NULL) { |
| 1701 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1784 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 1702 | goto out; | 1785 | goto out; |
| @@ -1706,8 +1789,9 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
| 1706 | * build the libfc request pkt | 1789 | * build the libfc request pkt |
| 1707 | */ | 1790 | */ |
| 1708 | fsp->cmd = sc_cmd; /* save the cmd */ | 1791 | fsp->cmd = sc_cmd; /* save the cmd */ |
| 1709 | fsp->lp = lp; /* save the softc ptr */ | 1792 | fsp->lp = lport; /* save the softc ptr */ |
| 1710 | fsp->rport = rport; /* set the remote port ptr */ | 1793 | fsp->rport = rport; /* set the remote port ptr */ |
| 1794 | fsp->xfer_ddp = FC_XID_UNKNOWN; | ||
| 1711 | sc_cmd->scsi_done = done; | 1795 | sc_cmd->scsi_done = done; |
| 1712 | 1796 | ||
| 1713 | /* | 1797 | /* |
| @@ -1719,7 +1803,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
| 1719 | /* | 1803 | /* |
| 1720 | * setup the data direction | 1804 | * setup the data direction |
| 1721 | */ | 1805 | */ |
| 1722 | stats = fc_lport_get_stats(lp); | 1806 | stats = fc_lport_get_stats(lport); |
| 1723 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { | 1807 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { |
| 1724 | fsp->req_flags = FC_SRB_READ; | 1808 | fsp->req_flags = FC_SRB_READ; |
| 1725 | stats->InputRequests++; | 1809 | stats->InputRequests++; |
| @@ -1733,7 +1817,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
| 1733 | stats->ControlRequests++; | 1817 | stats->ControlRequests++; |
| 1734 | } | 1818 | } |
| 1735 | 1819 | ||
| 1736 | fsp->tgt_flags = rp->flags; | 1820 | fsp->tgt_flags = rpriv->flags; |
| 1737 | 1821 | ||
| 1738 | init_timer(&fsp->timer); | 1822 | init_timer(&fsp->timer); |
| 1739 | fsp->timer.data = (unsigned long)fsp; | 1823 | fsp->timer.data = (unsigned long)fsp; |
| @@ -1743,7 +1827,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
| 1743 | * if we get -1 return then put the request in the pending | 1827 | * if we get -1 return then put the request in the pending |
| 1744 | * queue. | 1828 | * queue. |
| 1745 | */ | 1829 | */ |
| 1746 | rval = fc_fcp_pkt_send(lp, fsp); | 1830 | rval = fc_fcp_pkt_send(lport, fsp); |
| 1747 | if (rval != 0) { | 1831 | if (rval != 0) { |
| 1748 | fsp->state = FC_SRB_FREE; | 1832 | fsp->state = FC_SRB_FREE; |
| 1749 | fc_fcp_pkt_release(fsp); | 1833 | fc_fcp_pkt_release(fsp); |
| @@ -1755,18 +1839,17 @@ out: | |||
| 1755 | EXPORT_SYMBOL(fc_queuecommand); | 1839 | EXPORT_SYMBOL(fc_queuecommand); |
| 1756 | 1840 | ||
| 1757 | /** | 1841 | /** |
| 1758 | * fc_io_compl() - Handle responses for completed commands | 1842 | * fc_io_compl() - Handle responses for completed commands |
| 1759 | * @fsp: scsi packet | 1843 | * @fsp: The FCP packet that is complete |
| 1760 | * | ||
| 1761 | * Translates a error to a Linux SCSI error. | ||
| 1762 | * | 1844 | * |
| 1845 | * Translates fcp_pkt errors to a Linux SCSI errors. | ||
| 1763 | * The fcp packet lock must be held when calling. | 1846 | * The fcp packet lock must be held when calling. |
| 1764 | */ | 1847 | */ |
| 1765 | static void fc_io_compl(struct fc_fcp_pkt *fsp) | 1848 | static void fc_io_compl(struct fc_fcp_pkt *fsp) |
| 1766 | { | 1849 | { |
| 1767 | struct fc_fcp_internal *si; | 1850 | struct fc_fcp_internal *si; |
| 1768 | struct scsi_cmnd *sc_cmd; | 1851 | struct scsi_cmnd *sc_cmd; |
| 1769 | struct fc_lport *lp; | 1852 | struct fc_lport *lport; |
| 1770 | unsigned long flags; | 1853 | unsigned long flags; |
| 1771 | 1854 | ||
| 1772 | /* release outstanding ddp context */ | 1855 | /* release outstanding ddp context */ |
| @@ -1779,28 +1862,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
| 1779 | spin_lock_bh(&fsp->scsi_pkt_lock); | 1862 | spin_lock_bh(&fsp->scsi_pkt_lock); |
| 1780 | } | 1863 | } |
| 1781 | 1864 | ||
| 1782 | lp = fsp->lp; | 1865 | lport = fsp->lp; |
| 1783 | si = fc_get_scsi_internal(lp); | 1866 | si = fc_get_scsi_internal(lport); |
| 1784 | spin_lock_irqsave(lp->host->host_lock, flags); | 1867 | spin_lock_irqsave(lport->host->host_lock, flags); |
| 1785 | if (!fsp->cmd) { | 1868 | if (!fsp->cmd) { |
| 1786 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1869 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 1787 | return; | 1870 | return; |
| 1788 | } | 1871 | } |
| 1789 | 1872 | ||
| 1790 | /* | 1873 | /* |
| 1791 | * if a command timed out while we had to try and throttle IO | 1874 | * if can_queue ramp down is done then try can_queue ramp up |
| 1792 | * and it is now getting cleaned up, then we are about to | 1875 | * since commands are completing now. |
| 1793 | * try again so clear the throttled flag incase we get more | ||
| 1794 | * time outs. | ||
| 1795 | */ | 1876 | */ |
| 1796 | if (si->throttled && fsp->state & FC_SRB_NOMEM) | 1877 | if (si->last_can_queue_ramp_down_time) |
| 1797 | si->throttled = 0; | 1878 | fc_fcp_can_queue_ramp_up(lport); |
| 1798 | 1879 | ||
| 1799 | sc_cmd = fsp->cmd; | 1880 | sc_cmd = fsp->cmd; |
| 1800 | fsp->cmd = NULL; | 1881 | fsp->cmd = NULL; |
| 1801 | 1882 | ||
| 1802 | if (!sc_cmd->SCp.ptr) { | 1883 | if (!sc_cmd->SCp.ptr) { |
| 1803 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1884 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 1804 | return; | 1885 | return; |
| 1805 | } | 1886 | } |
| 1806 | 1887 | ||
| @@ -1814,21 +1895,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
| 1814 | sc_cmd->result = DID_OK << 16; | 1895 | sc_cmd->result = DID_OK << 16; |
| 1815 | if (fsp->scsi_resid) | 1896 | if (fsp->scsi_resid) |
| 1816 | CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; | 1897 | CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; |
| 1817 | } else if (fsp->cdb_status == QUEUE_FULL) { | ||
| 1818 | struct scsi_device *tmp_sdev; | ||
| 1819 | struct scsi_device *sdev = sc_cmd->device; | ||
| 1820 | |||
| 1821 | shost_for_each_device(tmp_sdev, sdev->host) { | ||
| 1822 | if (tmp_sdev->id != sdev->id) | ||
| 1823 | continue; | ||
| 1824 | |||
| 1825 | if (tmp_sdev->queue_depth > 1) { | ||
| 1826 | scsi_track_queue_full(tmp_sdev, | ||
| 1827 | tmp_sdev-> | ||
| 1828 | queue_depth - 1); | ||
| 1829 | } | ||
| 1830 | } | ||
| 1831 | sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; | ||
| 1832 | } else { | 1898 | } else { |
| 1833 | /* | 1899 | /* |
| 1834 | * transport level I/O was ok but scsi | 1900 | * transport level I/O was ok but scsi |
| @@ -1846,7 +1912,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
| 1846 | * scsi status is good but transport level | 1912 | * scsi status is good but transport level |
| 1847 | * underrun. | 1913 | * underrun. |
| 1848 | */ | 1914 | */ |
| 1849 | sc_cmd->result = DID_OK << 16; | 1915 | sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? |
| 1916 | DID_OK : DID_ERROR) << 16; | ||
| 1850 | } else { | 1917 | } else { |
| 1851 | /* | 1918 | /* |
| 1852 | * scsi got underrun, this is an error | 1919 | * scsi got underrun, this is an error |
| @@ -1881,60 +1948,42 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
| 1881 | list_del(&fsp->list); | 1948 | list_del(&fsp->list); |
| 1882 | sc_cmd->SCp.ptr = NULL; | 1949 | sc_cmd->SCp.ptr = NULL; |
| 1883 | sc_cmd->scsi_done(sc_cmd); | 1950 | sc_cmd->scsi_done(sc_cmd); |
| 1884 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1951 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 1885 | 1952 | ||
| 1886 | /* release ref from initial allocation in queue command */ | 1953 | /* release ref from initial allocation in queue command */ |
| 1887 | fc_fcp_pkt_release(fsp); | 1954 | fc_fcp_pkt_release(fsp); |
| 1888 | } | 1955 | } |
| 1889 | 1956 | ||
| 1890 | /** | 1957 | /** |
| 1891 | * fc_fcp_complete() - complete processing of a fcp packet | ||
| 1892 | * @fsp: fcp packet | ||
| 1893 | * | ||
| 1894 | * This function may sleep if a fsp timer is pending. | ||
| 1895 | * The host lock must not be held by caller. | ||
| 1896 | */ | ||
| 1897 | void fc_fcp_complete(struct fc_fcp_pkt *fsp) | ||
| 1898 | { | ||
| 1899 | if (fc_fcp_lock_pkt(fsp)) | ||
| 1900 | return; | ||
| 1901 | |||
| 1902 | fc_fcp_complete_locked(fsp); | ||
| 1903 | fc_fcp_unlock_pkt(fsp); | ||
| 1904 | } | ||
| 1905 | EXPORT_SYMBOL(fc_fcp_complete); | ||
| 1906 | |||
| 1907 | /** | ||
| 1908 | * fc_eh_abort() - Abort a command | 1958 | * fc_eh_abort() - Abort a command |
| 1909 | * @sc_cmd: scsi command to abort | 1959 | * @sc_cmd: The SCSI command to abort |
| 1910 | * | 1960 | * |
| 1911 | * From scsi host template. | 1961 | * From SCSI host template. |
| 1912 | * send ABTS to the target device and wait for the response | 1962 | * Send an ABTS to the target device and wait for the response. |
| 1913 | * sc_cmd is the pointer to the command to be aborted. | ||
| 1914 | */ | 1963 | */ |
| 1915 | int fc_eh_abort(struct scsi_cmnd *sc_cmd) | 1964 | int fc_eh_abort(struct scsi_cmnd *sc_cmd) |
| 1916 | { | 1965 | { |
| 1917 | struct fc_fcp_pkt *fsp; | 1966 | struct fc_fcp_pkt *fsp; |
| 1918 | struct fc_lport *lp; | 1967 | struct fc_lport *lport; |
| 1919 | int rc = FAILED; | 1968 | int rc = FAILED; |
| 1920 | unsigned long flags; | 1969 | unsigned long flags; |
| 1921 | 1970 | ||
| 1922 | lp = shost_priv(sc_cmd->device->host); | 1971 | lport = shost_priv(sc_cmd->device->host); |
| 1923 | if (lp->state != LPORT_ST_READY) | 1972 | if (lport->state != LPORT_ST_READY) |
| 1924 | return rc; | 1973 | return rc; |
| 1925 | else if (!lp->link_up) | 1974 | else if (!lport->link_up) |
| 1926 | return rc; | 1975 | return rc; |
| 1927 | 1976 | ||
| 1928 | spin_lock_irqsave(lp->host->host_lock, flags); | 1977 | spin_lock_irqsave(lport->host->host_lock, flags); |
| 1929 | fsp = CMD_SP(sc_cmd); | 1978 | fsp = CMD_SP(sc_cmd); |
| 1930 | if (!fsp) { | 1979 | if (!fsp) { |
| 1931 | /* command completed while scsi eh was setting up */ | 1980 | /* command completed while scsi eh was setting up */ |
| 1932 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1981 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 1933 | return SUCCESS; | 1982 | return SUCCESS; |
| 1934 | } | 1983 | } |
| 1935 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ | 1984 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ |
| 1936 | fc_fcp_pkt_hold(fsp); | 1985 | fc_fcp_pkt_hold(fsp); |
| 1937 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1986 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
| 1938 | 1987 | ||
| 1939 | if (fc_fcp_lock_pkt(fsp)) { | 1988 | if (fc_fcp_lock_pkt(fsp)) { |
| 1940 | /* completed while we were waiting for timer to be deleted */ | 1989 | /* completed while we were waiting for timer to be deleted */ |
| @@ -1942,7 +1991,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
| 1942 | goto release_pkt; | 1991 | goto release_pkt; |
| 1943 | } | 1992 | } |
| 1944 | 1993 | ||
| 1945 | rc = fc_fcp_pkt_abort(lp, fsp); | 1994 | rc = fc_fcp_pkt_abort(fsp); |
| 1946 | fc_fcp_unlock_pkt(fsp); | 1995 | fc_fcp_unlock_pkt(fsp); |
| 1947 | 1996 | ||
| 1948 | release_pkt: | 1997 | release_pkt: |
| @@ -1952,37 +2001,34 @@ release_pkt: | |||
| 1952 | EXPORT_SYMBOL(fc_eh_abort); | 2001 | EXPORT_SYMBOL(fc_eh_abort); |
| 1953 | 2002 | ||
| 1954 | /** | 2003 | /** |
| 1955 | * fc_eh_device_reset() Reset a single LUN | 2004 | * fc_eh_device_reset() - Reset a single LUN |
| 1956 | * @sc_cmd: scsi command | 2005 | * @sc_cmd: The SCSI command which identifies the device whose |
| 2006 | * LUN is to be reset | ||
| 1957 | * | 2007 | * |
| 1958 | * Set from scsi host template to send tm cmd to the target and wait for the | 2008 | * Set from SCSI host template. |
| 1959 | * response. | ||
| 1960 | */ | 2009 | */ |
| 1961 | int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | 2010 | int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) |
| 1962 | { | 2011 | { |
| 1963 | struct fc_lport *lp; | 2012 | struct fc_lport *lport; |
| 1964 | struct fc_fcp_pkt *fsp; | 2013 | struct fc_fcp_pkt *fsp; |
| 1965 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 2014 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
| 1966 | int rc = FAILED; | 2015 | int rc = FAILED; |
| 1967 | struct fc_rport_libfc_priv *rp; | ||
| 1968 | int rval; | 2016 | int rval; |
| 1969 | 2017 | ||
| 1970 | rval = fc_remote_port_chkready(rport); | 2018 | rval = fc_remote_port_chkready(rport); |
| 1971 | if (rval) | 2019 | if (rval) |
| 1972 | goto out; | 2020 | goto out; |
| 1973 | 2021 | ||
| 1974 | rp = rport->dd_data; | 2022 | lport = shost_priv(sc_cmd->device->host); |
| 1975 | lp = shost_priv(sc_cmd->device->host); | ||
| 1976 | 2023 | ||
| 1977 | if (lp->state != LPORT_ST_READY) | 2024 | if (lport->state != LPORT_ST_READY) |
| 1978 | return rc; | 2025 | return rc; |
| 1979 | 2026 | ||
| 1980 | FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); | 2027 | FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id); |
| 1981 | 2028 | ||
| 1982 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); | 2029 | fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); |
| 1983 | if (fsp == NULL) { | 2030 | if (fsp == NULL) { |
| 1984 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); | 2031 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); |
| 1985 | sc_cmd->result = DID_NO_CONNECT << 16; | ||
| 1986 | goto out; | 2032 | goto out; |
| 1987 | } | 2033 | } |
| 1988 | 2034 | ||
| @@ -1991,13 +2037,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | |||
| 1991 | * the sc passed in is not setup for execution like when sent | 2037 | * the sc passed in is not setup for execution like when sent |
| 1992 | * through the queuecommand callout. | 2038 | * through the queuecommand callout. |
| 1993 | */ | 2039 | */ |
| 1994 | fsp->lp = lp; /* save the softc ptr */ | 2040 | fsp->lp = lport; /* save the softc ptr */ |
| 1995 | fsp->rport = rport; /* set the remote port ptr */ | 2041 | fsp->rport = rport; /* set the remote port ptr */ |
| 1996 | 2042 | ||
| 1997 | /* | 2043 | /* |
| 1998 | * flush outstanding commands | 2044 | * flush outstanding commands |
| 1999 | */ | 2045 | */ |
| 2000 | rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); | 2046 | rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); |
| 2001 | fsp->state = FC_SRB_FREE; | 2047 | fsp->state = FC_SRB_FREE; |
| 2002 | fc_fcp_pkt_release(fsp); | 2048 | fc_fcp_pkt_release(fsp); |
| 2003 | 2049 | ||
| @@ -2007,38 +2053,39 @@ out: | |||
| 2007 | EXPORT_SYMBOL(fc_eh_device_reset); | 2053 | EXPORT_SYMBOL(fc_eh_device_reset); |
| 2008 | 2054 | ||
| 2009 | /** | 2055 | /** |
| 2010 | * fc_eh_host_reset() - The reset function will reset the ports on the host. | 2056 | * fc_eh_host_reset() - Reset a Scsi_Host. |
| 2011 | * @sc_cmd: scsi command | 2057 | * @sc_cmd: The SCSI command that identifies the SCSI host to be reset |
| 2012 | */ | 2058 | */ |
| 2013 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | 2059 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) |
| 2014 | { | 2060 | { |
| 2015 | struct Scsi_Host *shost = sc_cmd->device->host; | 2061 | struct Scsi_Host *shost = sc_cmd->device->host; |
| 2016 | struct fc_lport *lp = shost_priv(shost); | 2062 | struct fc_lport *lport = shost_priv(shost); |
| 2017 | unsigned long wait_tmo; | 2063 | unsigned long wait_tmo; |
| 2018 | 2064 | ||
| 2019 | FC_SCSI_DBG(lp, "Resetting host\n"); | 2065 | FC_SCSI_DBG(lport, "Resetting host\n"); |
| 2020 | 2066 | ||
| 2021 | lp->tt.lport_reset(lp); | 2067 | lport->tt.lport_reset(lport); |
| 2022 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; | 2068 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; |
| 2023 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) | 2069 | while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, |
| 2070 | wait_tmo)) | ||
| 2024 | msleep(1000); | 2071 | msleep(1000); |
| 2025 | 2072 | ||
| 2026 | if (fc_fcp_lport_queue_ready(lp)) { | 2073 | if (fc_fcp_lport_queue_ready(lport)) { |
| 2027 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " | 2074 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " |
| 2028 | "on port (%6x)\n", fc_host_port_id(lp->host)); | 2075 | "on port (%6x)\n", fc_host_port_id(lport->host)); |
| 2029 | return SUCCESS; | 2076 | return SUCCESS; |
| 2030 | } else { | 2077 | } else { |
| 2031 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " | 2078 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " |
| 2032 | "port (%6x) is not ready.\n", | 2079 | "port (%6x) is not ready.\n", |
| 2033 | fc_host_port_id(lp->host)); | 2080 | fc_host_port_id(lport->host)); |
| 2034 | return FAILED; | 2081 | return FAILED; |
| 2035 | } | 2082 | } |
| 2036 | } | 2083 | } |
| 2037 | EXPORT_SYMBOL(fc_eh_host_reset); | 2084 | EXPORT_SYMBOL(fc_eh_host_reset); |
| 2038 | 2085 | ||
| 2039 | /** | 2086 | /** |
| 2040 | * fc_slave_alloc() - configure queue depth | 2087 | * fc_slave_alloc() - Configure the queue depth of a Scsi_Host |
| 2041 | * @sdev: scsi device | 2088 | * @sdev: The SCSI device that identifies the SCSI host |
| 2042 | * | 2089 | * |
| 2043 | * Configures queue depth based on host's cmd_per_len. If not set | 2090 | * Configures queue depth based on host's cmd_per_len. If not set |
| 2044 | * then we use the libfc default. | 2091 | * then we use the libfc default. |
| @@ -2046,29 +2093,50 @@ EXPORT_SYMBOL(fc_eh_host_reset); | |||
| 2046 | int fc_slave_alloc(struct scsi_device *sdev) | 2093 | int fc_slave_alloc(struct scsi_device *sdev) |
| 2047 | { | 2094 | { |
| 2048 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | 2095 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
| 2049 | int queue_depth; | ||
| 2050 | 2096 | ||
| 2051 | if (!rport || fc_remote_port_chkready(rport)) | 2097 | if (!rport || fc_remote_port_chkready(rport)) |
| 2052 | return -ENXIO; | 2098 | return -ENXIO; |
| 2053 | 2099 | ||
| 2054 | if (sdev->tagged_supported) { | 2100 | if (sdev->tagged_supported) |
| 2055 | if (sdev->host->hostt->cmd_per_lun) | 2101 | scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); |
| 2056 | queue_depth = sdev->host->hostt->cmd_per_lun; | 2102 | else |
| 2057 | else | 2103 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), |
| 2058 | queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; | 2104 | FC_FCP_DFLT_QUEUE_DEPTH); |
| 2059 | scsi_activate_tcq(sdev, queue_depth); | 2105 | |
| 2060 | } | ||
| 2061 | return 0; | 2106 | return 0; |
| 2062 | } | 2107 | } |
| 2063 | EXPORT_SYMBOL(fc_slave_alloc); | 2108 | EXPORT_SYMBOL(fc_slave_alloc); |
| 2064 | 2109 | ||
| 2065 | int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) | 2110 | /** |
| 2111 | * fc_change_queue_depth() - Change a device's queue depth | ||
| 2112 | * @sdev: The SCSI device whose queue depth is to change | ||
| 2113 | * @qdepth: The new queue depth | ||
| 2114 | * @reason: The resason for the change | ||
| 2115 | */ | ||
| 2116 | int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | ||
| 2066 | { | 2117 | { |
| 2067 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | 2118 | switch (reason) { |
| 2119 | case SCSI_QDEPTH_DEFAULT: | ||
| 2120 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | ||
| 2121 | break; | ||
| 2122 | case SCSI_QDEPTH_QFULL: | ||
| 2123 | scsi_track_queue_full(sdev, qdepth); | ||
| 2124 | break; | ||
| 2125 | case SCSI_QDEPTH_RAMP_UP: | ||
| 2126 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | ||
| 2127 | break; | ||
| 2128 | default: | ||
| 2129 | return -EOPNOTSUPP; | ||
| 2130 | } | ||
| 2068 | return sdev->queue_depth; | 2131 | return sdev->queue_depth; |
| 2069 | } | 2132 | } |
| 2070 | EXPORT_SYMBOL(fc_change_queue_depth); | 2133 | EXPORT_SYMBOL(fc_change_queue_depth); |
| 2071 | 2134 | ||
| 2135 | /** | ||
| 2136 | * fc_change_queue_type() - Change a device's queue type | ||
| 2137 | * @sdev: The SCSI device whose queue depth is to change | ||
| 2138 | * @tag_type: Identifier for queue type | ||
| 2139 | */ | ||
| 2072 | int fc_change_queue_type(struct scsi_device *sdev, int tag_type) | 2140 | int fc_change_queue_type(struct scsi_device *sdev, int tag_type) |
| 2073 | { | 2141 | { |
| 2074 | if (sdev->tagged_supported) { | 2142 | if (sdev->tagged_supported) { |
| @@ -2084,38 +2152,69 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) | |||
| 2084 | } | 2152 | } |
| 2085 | EXPORT_SYMBOL(fc_change_queue_type); | 2153 | EXPORT_SYMBOL(fc_change_queue_type); |
| 2086 | 2154 | ||
| 2087 | void fc_fcp_destroy(struct fc_lport *lp) | 2155 | /** |
| 2156 | * fc_fcp_destory() - Tear down the FCP layer for a given local port | ||
| 2157 | * @lport: The local port that no longer needs the FCP layer | ||
| 2158 | */ | ||
| 2159 | void fc_fcp_destroy(struct fc_lport *lport) | ||
| 2088 | { | 2160 | { |
| 2089 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 2161 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
| 2090 | 2162 | ||
| 2091 | if (!list_empty(&si->scsi_pkt_queue)) | 2163 | if (!list_empty(&si->scsi_pkt_queue)) |
| 2092 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " | 2164 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " |
| 2093 | "port (%6x)\n", fc_host_port_id(lp->host)); | 2165 | "port (%6x)\n", fc_host_port_id(lport->host)); |
| 2094 | 2166 | ||
| 2095 | mempool_destroy(si->scsi_pkt_pool); | 2167 | mempool_destroy(si->scsi_pkt_pool); |
| 2096 | kfree(si); | 2168 | kfree(si); |
| 2097 | lp->scsi_priv = NULL; | 2169 | lport->scsi_priv = NULL; |
| 2098 | } | 2170 | } |
| 2099 | EXPORT_SYMBOL(fc_fcp_destroy); | 2171 | EXPORT_SYMBOL(fc_fcp_destroy); |
| 2100 | 2172 | ||
| 2101 | int fc_fcp_init(struct fc_lport *lp) | 2173 | int fc_setup_fcp() |
| 2174 | { | ||
| 2175 | int rc = 0; | ||
| 2176 | |||
| 2177 | scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", | ||
| 2178 | sizeof(struct fc_fcp_pkt), | ||
| 2179 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
| 2180 | if (!scsi_pkt_cachep) { | ||
| 2181 | printk(KERN_ERR "libfc: Unable to allocate SRB cache, " | ||
| 2182 | "module load failed!"); | ||
| 2183 | rc = -ENOMEM; | ||
| 2184 | } | ||
| 2185 | |||
| 2186 | return rc; | ||
| 2187 | } | ||
| 2188 | |||
| 2189 | void fc_destroy_fcp() | ||
| 2190 | { | ||
| 2191 | if (scsi_pkt_cachep) | ||
| 2192 | kmem_cache_destroy(scsi_pkt_cachep); | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | /** | ||
| 2196 | * fc_fcp_init() - Initialize the FCP layer for a local port | ||
| 2197 | * @lport: The local port to initialize the exchange layer for | ||
| 2198 | */ | ||
| 2199 | int fc_fcp_init(struct fc_lport *lport) | ||
| 2102 | { | 2200 | { |
| 2103 | int rc; | 2201 | int rc; |
| 2104 | struct fc_fcp_internal *si; | 2202 | struct fc_fcp_internal *si; |
| 2105 | 2203 | ||
| 2106 | if (!lp->tt.fcp_cmd_send) | 2204 | if (!lport->tt.fcp_cmd_send) |
| 2107 | lp->tt.fcp_cmd_send = fc_fcp_cmd_send; | 2205 | lport->tt.fcp_cmd_send = fc_fcp_cmd_send; |
| 2108 | 2206 | ||
| 2109 | if (!lp->tt.fcp_cleanup) | 2207 | if (!lport->tt.fcp_cleanup) |
| 2110 | lp->tt.fcp_cleanup = fc_fcp_cleanup; | 2208 | lport->tt.fcp_cleanup = fc_fcp_cleanup; |
| 2111 | 2209 | ||
| 2112 | if (!lp->tt.fcp_abort_io) | 2210 | if (!lport->tt.fcp_abort_io) |
| 2113 | lp->tt.fcp_abort_io = fc_fcp_abort_io; | 2211 | lport->tt.fcp_abort_io = fc_fcp_abort_io; |
| 2114 | 2212 | ||
| 2115 | si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); | 2213 | si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); |
| 2116 | if (!si) | 2214 | if (!si) |
| 2117 | return -ENOMEM; | 2215 | return -ENOMEM; |
| 2118 | lp->scsi_priv = si; | 2216 | lport->scsi_priv = si; |
| 2217 | si->max_can_queue = lport->host->can_queue; | ||
| 2119 | INIT_LIST_HEAD(&si->scsi_pkt_queue); | 2218 | INIT_LIST_HEAD(&si->scsi_pkt_queue); |
| 2120 | 2219 | ||
| 2121 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); | 2220 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); |
| @@ -2130,42 +2229,3 @@ free_internal: | |||
| 2130 | return rc; | 2229 | return rc; |
| 2131 | } | 2230 | } |
| 2132 | EXPORT_SYMBOL(fc_fcp_init); | 2231 | EXPORT_SYMBOL(fc_fcp_init); |
| 2133 | |||
| 2134 | static int __init libfc_init(void) | ||
| 2135 | { | ||
| 2136 | int rc; | ||
| 2137 | |||
| 2138 | scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", | ||
| 2139 | sizeof(struct fc_fcp_pkt), | ||
| 2140 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
| 2141 | if (scsi_pkt_cachep == NULL) { | ||
| 2142 | printk(KERN_ERR "libfc: Unable to allocate SRB cache, " | ||
| 2143 | "module load failed!"); | ||
| 2144 | return -ENOMEM; | ||
| 2145 | } | ||
| 2146 | |||
| 2147 | rc = fc_setup_exch_mgr(); | ||
| 2148 | if (rc) | ||
| 2149 | goto destroy_pkt_cache; | ||
| 2150 | |||
| 2151 | rc = fc_setup_rport(); | ||
| 2152 | if (rc) | ||
| 2153 | goto destroy_em; | ||
| 2154 | |||
| 2155 | return rc; | ||
| 2156 | destroy_em: | ||
| 2157 | fc_destroy_exch_mgr(); | ||
| 2158 | destroy_pkt_cache: | ||
| 2159 | kmem_cache_destroy(scsi_pkt_cachep); | ||
| 2160 | return rc; | ||
| 2161 | } | ||
| 2162 | |||
| 2163 | static void __exit libfc_exit(void) | ||
| 2164 | { | ||
| 2165 | kmem_cache_destroy(scsi_pkt_cachep); | ||
| 2166 | fc_destroy_exch_mgr(); | ||
| 2167 | fc_destroy_rport(); | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | module_init(libfc_init); | ||
| 2171 | module_exit(libfc_exit); | ||
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 63fe00cfe667..6da01c616964 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c | |||
| @@ -51,24 +51,24 @@ EXPORT_SYMBOL(fc_frame_crc_check); | |||
| 51 | * Allocate a frame intended to be sent via fcoe_xmit. | 51 | * Allocate a frame intended to be sent via fcoe_xmit. |
| 52 | * Get an sk_buff for the frame and set the length. | 52 | * Get an sk_buff for the frame and set the length. |
| 53 | */ | 53 | */ |
| 54 | struct fc_frame *__fc_frame_alloc(size_t len) | 54 | struct fc_frame *_fc_frame_alloc(size_t len) |
| 55 | { | 55 | { |
| 56 | struct fc_frame *fp; | 56 | struct fc_frame *fp; |
| 57 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
| 58 | 58 | ||
| 59 | WARN_ON((len % sizeof(u32)) != 0); | 59 | WARN_ON((len % sizeof(u32)) != 0); |
| 60 | len += sizeof(struct fc_frame_header); | 60 | len += sizeof(struct fc_frame_header); |
| 61 | skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM); | 61 | skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + |
| 62 | NET_SKB_PAD, GFP_ATOMIC); | ||
| 62 | if (!skb) | 63 | if (!skb) |
| 63 | return NULL; | 64 | return NULL; |
| 65 | skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); | ||
| 64 | fp = (struct fc_frame *) skb; | 66 | fp = (struct fc_frame *) skb; |
| 65 | fc_frame_init(fp); | 67 | fc_frame_init(fp); |
| 66 | skb_reserve(skb, FC_FRAME_HEADROOM); | ||
| 67 | skb_put(skb, len); | 68 | skb_put(skb, len); |
| 68 | return fp; | 69 | return fp; |
| 69 | } | 70 | } |
| 70 | EXPORT_SYMBOL(__fc_frame_alloc); | 71 | EXPORT_SYMBOL(_fc_frame_alloc); |
| 71 | |||
| 72 | 72 | ||
| 73 | struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) | 73 | struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) |
| 74 | { | 74 | { |
| @@ -78,7 +78,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) | |||
| 78 | fill = payload_len % 4; | 78 | fill = payload_len % 4; |
| 79 | if (fill != 0) | 79 | if (fill != 0) |
| 80 | fill = 4 - fill; | 80 | fill = 4 - fill; |
| 81 | fp = __fc_frame_alloc(payload_len + fill); | 81 | fp = _fc_frame_alloc(payload_len + fill); |
| 82 | if (fp) { | 82 | if (fp) { |
| 83 | memset((char *) fr_hdr(fp) + payload_len, 0, fill); | 83 | memset((char *) fr_hdr(fp) + payload_len, 0, fill); |
| 84 | /* trim is OK, we just allocated it so there are no fragments */ | 84 | /* trim is OK, we just allocated it so there are no fragments */ |
| @@ -87,3 +87,4 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) | |||
| 87 | } | 87 | } |
| 88 | return fp; | 88 | return fp; |
| 89 | } | 89 | } |
| 90 | EXPORT_SYMBOL(fc_frame_alloc_fill); | ||
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c new file mode 100644 index 000000000000..39f4b6ab04b4 --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.c | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 16 | * | ||
| 17 | * Maintained at www.Open-FCoE.org | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/scatterlist.h> | ||
| 23 | #include <linux/crc32.h> | ||
| 24 | |||
| 25 | #include <scsi/libfc.h> | ||
| 26 | |||
| 27 | #include "fc_libfc.h" | ||
| 28 | |||
| 29 | MODULE_AUTHOR("Open-FCoE.org"); | ||
| 30 | MODULE_DESCRIPTION("libfc"); | ||
| 31 | MODULE_LICENSE("GPL v2"); | ||
| 32 | |||
| 33 | unsigned int fc_debug_logging; | ||
| 34 | module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); | ||
| 35 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
| 36 | |||
| 37 | /** | ||
| 38 | * libfc_init() - Initialize libfc.ko | ||
| 39 | */ | ||
| 40 | static int __init libfc_init(void) | ||
| 41 | { | ||
| 42 | int rc = 0; | ||
| 43 | |||
| 44 | rc = fc_setup_fcp(); | ||
| 45 | if (rc) | ||
| 46 | return rc; | ||
| 47 | |||
| 48 | rc = fc_setup_exch_mgr(); | ||
| 49 | if (rc) | ||
| 50 | goto destroy_pkt_cache; | ||
| 51 | |||
| 52 | rc = fc_setup_rport(); | ||
| 53 | if (rc) | ||
| 54 | goto destroy_em; | ||
| 55 | |||
| 56 | return rc; | ||
| 57 | destroy_em: | ||
| 58 | fc_destroy_exch_mgr(); | ||
| 59 | destroy_pkt_cache: | ||
| 60 | fc_destroy_fcp(); | ||
| 61 | return rc; | ||
| 62 | } | ||
| 63 | module_init(libfc_init); | ||
| 64 | |||
| 65 | /** | ||
| 66 | * libfc_exit() - Tear down libfc.ko | ||
| 67 | */ | ||
| 68 | static void __exit libfc_exit(void) | ||
| 69 | { | ||
| 70 | fc_destroy_fcp(); | ||
| 71 | fc_destroy_exch_mgr(); | ||
| 72 | fc_destroy_rport(); | ||
| 73 | } | ||
| 74 | module_exit(libfc_exit); | ||
| 75 | |||
| 76 | /** | ||
| 77 | * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer | ||
| 78 | * into a scatter-gather list (SG list). | ||
| 79 | * | ||
| 80 | * @buf: pointer to the data buffer. | ||
| 81 | * @len: the byte-length of the data buffer. | ||
| 82 | * @sg: pointer to the pointer of the SG list. | ||
| 83 | * @nents: pointer to the remaining number of entries in the SG list. | ||
| 84 | * @offset: pointer to the current offset in the SG list. | ||
| 85 | * @km_type: dedicated page table slot type for kmap_atomic. | ||
| 86 | * @crc: pointer to the 32-bit crc value. | ||
| 87 | * If crc is NULL, CRC is not calculated. | ||
| 88 | */ | ||
| 89 | u32 fc_copy_buffer_to_sglist(void *buf, size_t len, | ||
| 90 | struct scatterlist *sg, | ||
| 91 | u32 *nents, size_t *offset, | ||
| 92 | enum km_type km_type, u32 *crc) | ||
| 93 | { | ||
| 94 | size_t remaining = len; | ||
| 95 | u32 copy_len = 0; | ||
| 96 | |||
| 97 | while (remaining > 0 && sg) { | ||
| 98 | size_t off, sg_bytes; | ||
| 99 | void *page_addr; | ||
| 100 | |||
| 101 | if (*offset >= sg->length) { | ||
| 102 | /* | ||
| 103 | * Check for end and drop resources | ||
| 104 | * from the last iteration. | ||
| 105 | */ | ||
| 106 | if (!(*nents)) | ||
| 107 | break; | ||
| 108 | --(*nents); | ||
| 109 | *offset -= sg->length; | ||
| 110 | sg = sg_next(sg); | ||
| 111 | continue; | ||
| 112 | } | ||
| 113 | sg_bytes = min(remaining, sg->length - *offset); | ||
| 114 | |||
| 115 | /* | ||
| 116 | * The scatterlist item may be bigger than PAGE_SIZE, | ||
| 117 | * but we are limited to mapping PAGE_SIZE at a time. | ||
| 118 | */ | ||
| 119 | off = *offset + sg->offset; | ||
| 120 | sg_bytes = min(sg_bytes, | ||
| 121 | (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); | ||
| 122 | page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), | ||
| 123 | km_type); | ||
| 124 | if (crc) | ||
| 125 | *crc = crc32(*crc, buf, sg_bytes); | ||
| 126 | memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); | ||
| 127 | kunmap_atomic(page_addr, km_type); | ||
| 128 | buf += sg_bytes; | ||
| 129 | *offset += sg_bytes; | ||
| 130 | remaining -= sg_bytes; | ||
| 131 | copy_len += sg_bytes; | ||
| 132 | } | ||
| 133 | return copy_len; | ||
| 134 | } | ||
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h new file mode 100644 index 000000000000..741fd5c72e13 --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.h | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 16 | * | ||
| 17 | * Maintained at www.Open-FCoE.org | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifndef _FC_LIBFC_H_ | ||
| 21 | #define _FC_LIBFC_H_ | ||
| 22 | |||
| 23 | #define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ | ||
| 24 | #define FC_LPORT_LOGGING 0x02 /* lport layer logging */ | ||
| 25 | #define FC_DISC_LOGGING 0x04 /* discovery layer logging */ | ||
| 26 | #define FC_RPORT_LOGGING 0x08 /* rport layer logging */ | ||
| 27 | #define FC_FCP_LOGGING 0x10 /* I/O path logging */ | ||
| 28 | #define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ | ||
| 29 | #define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ | ||
| 30 | #define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ | ||
| 31 | |||
| 32 | extern unsigned int fc_debug_logging; | ||
| 33 | |||
| 34 | #define FC_CHECK_LOGGING(LEVEL, CMD) \ | ||
| 35 | do { \ | ||
| 36 | if (unlikely(fc_debug_logging & LEVEL)) \ | ||
| 37 | do { \ | ||
| 38 | CMD; \ | ||
| 39 | } while (0); \ | ||
| 40 | } while (0) | ||
| 41 | |||
| 42 | #define FC_LIBFC_DBG(fmt, args...) \ | ||
| 43 | FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ | ||
| 44 | printk(KERN_INFO "libfc: " fmt, ##args)) | ||
| 45 | |||
| 46 | #define FC_LPORT_DBG(lport, fmt, args...) \ | ||
| 47 | FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ | ||
| 48 | printk(KERN_INFO "host%u: lport %6x: " fmt, \ | ||
| 49 | (lport)->host->host_no, \ | ||
| 50 | fc_host_port_id((lport)->host), ##args)) | ||
| 51 | |||
| 52 | #define FC_DISC_DBG(disc, fmt, args...) \ | ||
| 53 | FC_CHECK_LOGGING(FC_DISC_LOGGING, \ | ||
| 54 | printk(KERN_INFO "host%u: disc: " fmt, \ | ||
| 55 | (disc)->lport->host->host_no, \ | ||
| 56 | ##args)) | ||
| 57 | |||
| 58 | #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ | ||
| 59 | FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ | ||
| 60 | printk(KERN_INFO "host%u: rport %6x: " fmt, \ | ||
| 61 | (lport)->host->host_no, \ | ||
| 62 | (port_id), ##args)) | ||
| 63 | |||
| 64 | #define FC_RPORT_DBG(rdata, fmt, args...) \ | ||
| 65 | FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) | ||
| 66 | |||
| 67 | #define FC_FCP_DBG(pkt, fmt, args...) \ | ||
| 68 | FC_CHECK_LOGGING(FC_FCP_LOGGING, \ | ||
| 69 | printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ | ||
| 70 | (pkt)->lp->host->host_no, \ | ||
| 71 | pkt->rport->port_id, ##args)) | ||
| 72 | |||
| 73 | #define FC_EXCH_DBG(exch, fmt, args...) \ | ||
| 74 | FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ | ||
| 75 | printk(KERN_INFO "host%u: xid %4x: " fmt, \ | ||
| 76 | (exch)->lp->host->host_no, \ | ||
| 77 | exch->xid, ##args)) | ||
| 78 | |||
| 79 | #define FC_SCSI_DBG(lport, fmt, args...) \ | ||
| 80 | FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ | ||
| 81 | printk(KERN_INFO "host%u: scsi: " fmt, \ | ||
| 82 | (lport)->host->host_no, ##args)) | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Set up direct-data placement for this I/O request | ||
| 86 | */ | ||
| 87 | void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); | ||
| 88 | |||
| 89 | /* | ||
| 90 | * Module setup functions | ||
| 91 | */ | ||
| 92 | int fc_setup_exch_mgr(void); | ||
| 93 | void fc_destroy_exch_mgr(void); | ||
| 94 | int fc_setup_rport(void); | ||
| 95 | void fc_destroy_rport(void); | ||
| 96 | int fc_setup_fcp(void); | ||
| 97 | void fc_destroy_fcp(void); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Internal libfc functions | ||
| 101 | */ | ||
| 102 | const char *fc_els_resp_type(struct fc_frame *); | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Copies a buffer into an sg list | ||
| 106 | */ | ||
| 107 | u32 fc_copy_buffer_to_sglist(void *buf, size_t len, | ||
| 108 | struct scatterlist *sg, | ||
| 109 | u32 *nents, size_t *offset, | ||
| 110 | enum km_type km_type, u32 *crc); | ||
| 111 | |||
| 112 | #endif /* _FC_LIBFC_H_ */ | ||
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd2f77197447..74338c83ad0a 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
| @@ -56,7 +56,7 @@ | |||
| 56 | * at the same time. | 56 | * at the same time. |
| 57 | * | 57 | * |
| 58 | * When discovery succeeds or fails a callback is made to the lport as | 58 | * When discovery succeeds or fails a callback is made to the lport as |
| 59 | * notification. Currently, succesful discovery causes the lport to take no | 59 | * notification. Currently, successful discovery causes the lport to take no |
| 60 | * action. A failure will cause the lport to reset. There is likely a circular | 60 | * action. A failure will cause the lport to reset. There is likely a circular |
| 61 | * locking problem with this implementation. | 61 | * locking problem with this implementation. |
| 62 | */ | 62 | */ |
| @@ -94,6 +94,9 @@ | |||
| 94 | 94 | ||
| 95 | #include <scsi/libfc.h> | 95 | #include <scsi/libfc.h> |
| 96 | #include <scsi/fc_encode.h> | 96 | #include <scsi/fc_encode.h> |
| 97 | #include <linux/scatterlist.h> | ||
| 98 | |||
| 99 | #include "fc_libfc.h" | ||
| 97 | 100 | ||
| 98 | /* Fabric IDs to use for point-to-point mode, chosen on whims. */ | 101 | /* Fabric IDs to use for point-to-point mode, chosen on whims. */ |
| 99 | #define FC_LOCAL_PTP_FID_LO 0x010101 | 102 | #define FC_LOCAL_PTP_FID_LO 0x010101 |
| @@ -106,8 +109,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *); | |||
| 106 | static void fc_lport_enter_reset(struct fc_lport *); | 109 | static void fc_lport_enter_reset(struct fc_lport *); |
| 107 | static void fc_lport_enter_flogi(struct fc_lport *); | 110 | static void fc_lport_enter_flogi(struct fc_lport *); |
| 108 | static void fc_lport_enter_dns(struct fc_lport *); | 111 | static void fc_lport_enter_dns(struct fc_lport *); |
| 109 | static void fc_lport_enter_rpn_id(struct fc_lport *); | 112 | static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); |
| 110 | static void fc_lport_enter_rft_id(struct fc_lport *); | ||
| 111 | static void fc_lport_enter_scr(struct fc_lport *); | 113 | static void fc_lport_enter_scr(struct fc_lport *); |
| 112 | static void fc_lport_enter_ready(struct fc_lport *); | 114 | static void fc_lport_enter_ready(struct fc_lport *); |
| 113 | static void fc_lport_enter_logo(struct fc_lport *); | 115 | static void fc_lport_enter_logo(struct fc_lport *); |
| @@ -116,14 +118,40 @@ static const char *fc_lport_state_names[] = { | |||
| 116 | [LPORT_ST_DISABLED] = "disabled", | 118 | [LPORT_ST_DISABLED] = "disabled", |
| 117 | [LPORT_ST_FLOGI] = "FLOGI", | 119 | [LPORT_ST_FLOGI] = "FLOGI", |
| 118 | [LPORT_ST_DNS] = "dNS", | 120 | [LPORT_ST_DNS] = "dNS", |
| 119 | [LPORT_ST_RPN_ID] = "RPN_ID", | 121 | [LPORT_ST_RNN_ID] = "RNN_ID", |
| 122 | [LPORT_ST_RSNN_NN] = "RSNN_NN", | ||
| 123 | [LPORT_ST_RSPN_ID] = "RSPN_ID", | ||
| 120 | [LPORT_ST_RFT_ID] = "RFT_ID", | 124 | [LPORT_ST_RFT_ID] = "RFT_ID", |
| 125 | [LPORT_ST_RFF_ID] = "RFF_ID", | ||
| 121 | [LPORT_ST_SCR] = "SCR", | 126 | [LPORT_ST_SCR] = "SCR", |
| 122 | [LPORT_ST_READY] = "Ready", | 127 | [LPORT_ST_READY] = "Ready", |
| 123 | [LPORT_ST_LOGO] = "LOGO", | 128 | [LPORT_ST_LOGO] = "LOGO", |
| 124 | [LPORT_ST_RESET] = "reset", | 129 | [LPORT_ST_RESET] = "reset", |
| 125 | }; | 130 | }; |
| 126 | 131 | ||
| 132 | /** | ||
| 133 | * struct fc_bsg_info - FC Passthrough managemet structure | ||
| 134 | * @job: The passthrough job | ||
| 135 | * @lport: The local port to pass through a command | ||
| 136 | * @rsp_code: The expected response code | ||
| 137 | * @sg: job->reply_payload.sg_list | ||
| 138 | * @nents: job->reply_payload.sg_cnt | ||
| 139 | * @offset: The offset into the response data | ||
| 140 | */ | ||
| 141 | struct fc_bsg_info { | ||
| 142 | struct fc_bsg_job *job; | ||
| 143 | struct fc_lport *lport; | ||
| 144 | u16 rsp_code; | ||
| 145 | struct scatterlist *sg; | ||
| 146 | u32 nents; | ||
| 147 | size_t offset; | ||
| 148 | }; | ||
| 149 | |||
| 150 | /** | ||
| 151 | * fc_frame_drop() - Dummy frame handler | ||
| 152 | * @lport: The local port the frame was received on | ||
| 153 | * @fp: The received frame | ||
| 154 | */ | ||
| 127 | static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) | 155 | static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) |
| 128 | { | 156 | { |
| 129 | fc_frame_free(fp); | 157 | fc_frame_free(fp); |
| @@ -150,8 +178,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
| 150 | switch (event) { | 178 | switch (event) { |
| 151 | case RPORT_EV_READY: | 179 | case RPORT_EV_READY: |
| 152 | if (lport->state == LPORT_ST_DNS) { | 180 | if (lport->state == LPORT_ST_DNS) { |
| 153 | lport->dns_rp = rdata; | 181 | lport->dns_rdata = rdata; |
| 154 | fc_lport_enter_rpn_id(lport); | 182 | fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); |
| 155 | } else { | 183 | } else { |
| 156 | FC_LPORT_DBG(lport, "Received an READY event " | 184 | FC_LPORT_DBG(lport, "Received an READY event " |
| 157 | "on port (%6x) for the directory " | 185 | "on port (%6x) for the directory " |
| @@ -165,7 +193,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
| 165 | case RPORT_EV_LOGO: | 193 | case RPORT_EV_LOGO: |
| 166 | case RPORT_EV_FAILED: | 194 | case RPORT_EV_FAILED: |
| 167 | case RPORT_EV_STOP: | 195 | case RPORT_EV_STOP: |
| 168 | lport->dns_rp = NULL; | 196 | lport->dns_rdata = NULL; |
| 169 | break; | 197 | break; |
| 170 | case RPORT_EV_NONE: | 198 | case RPORT_EV_NONE: |
| 171 | break; | 199 | break; |
| @@ -189,8 +217,8 @@ static const char *fc_lport_state(struct fc_lport *lport) | |||
| 189 | 217 | ||
| 190 | /** | 218 | /** |
| 191 | * fc_lport_ptp_setup() - Create an rport for point-to-point mode | 219 | * fc_lport_ptp_setup() - Create an rport for point-to-point mode |
| 192 | * @lport: The lport to attach the ptp rport to | 220 | * @lport: The lport to attach the ptp rport to |
| 193 | * @fid: The FID of the ptp rport | 221 | * @remote_fid: The FID of the ptp rport |
| 194 | * @remote_wwpn: The WWPN of the ptp rport | 222 | * @remote_wwpn: The WWPN of the ptp rport |
| 195 | * @remote_wwnn: The WWNN of the ptp rport | 223 | * @remote_wwnn: The WWNN of the ptp rport |
| 196 | */ | 224 | */ |
| @@ -199,18 +227,22 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, | |||
| 199 | u64 remote_wwnn) | 227 | u64 remote_wwnn) |
| 200 | { | 228 | { |
| 201 | mutex_lock(&lport->disc.disc_mutex); | 229 | mutex_lock(&lport->disc.disc_mutex); |
| 202 | if (lport->ptp_rp) | 230 | if (lport->ptp_rdata) |
| 203 | lport->tt.rport_logoff(lport->ptp_rp); | 231 | lport->tt.rport_logoff(lport->ptp_rdata); |
| 204 | lport->ptp_rp = lport->tt.rport_create(lport, remote_fid); | 232 | lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); |
| 205 | lport->ptp_rp->ids.port_name = remote_wwpn; | 233 | lport->ptp_rdata->ids.port_name = remote_wwpn; |
| 206 | lport->ptp_rp->ids.node_name = remote_wwnn; | 234 | lport->ptp_rdata->ids.node_name = remote_wwnn; |
| 207 | mutex_unlock(&lport->disc.disc_mutex); | 235 | mutex_unlock(&lport->disc.disc_mutex); |
| 208 | 236 | ||
| 209 | lport->tt.rport_login(lport->ptp_rp); | 237 | lport->tt.rport_login(lport->ptp_rdata); |
| 210 | 238 | ||
| 211 | fc_lport_enter_ready(lport); | 239 | fc_lport_enter_ready(lport); |
| 212 | } | 240 | } |
| 213 | 241 | ||
| 242 | /** | ||
| 243 | * fc_get_host_port_type() - Return the port type of the given Scsi_Host | ||
| 244 | * @shost: The SCSI host whose port type is to be determined | ||
| 245 | */ | ||
| 214 | void fc_get_host_port_type(struct Scsi_Host *shost) | 246 | void fc_get_host_port_type(struct Scsi_Host *shost) |
| 215 | { | 247 | { |
| 216 | /* TODO - currently just NPORT */ | 248 | /* TODO - currently just NPORT */ |
| @@ -218,17 +250,33 @@ void fc_get_host_port_type(struct Scsi_Host *shost) | |||
| 218 | } | 250 | } |
| 219 | EXPORT_SYMBOL(fc_get_host_port_type); | 251 | EXPORT_SYMBOL(fc_get_host_port_type); |
| 220 | 252 | ||
| 253 | /** | ||
| 254 | * fc_get_host_port_state() - Return the port state of the given Scsi_Host | ||
| 255 | * @shost: The SCSI host whose port state is to be determined | ||
| 256 | */ | ||
| 221 | void fc_get_host_port_state(struct Scsi_Host *shost) | 257 | void fc_get_host_port_state(struct Scsi_Host *shost) |
| 222 | { | 258 | { |
| 223 | struct fc_lport *lp = shost_priv(shost); | 259 | struct fc_lport *lport = shost_priv(shost); |
| 224 | 260 | ||
| 225 | if (lp->link_up) | 261 | mutex_lock(&lport->lp_mutex); |
| 226 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | 262 | if (!lport->link_up) |
| 263 | fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; | ||
| 227 | else | 264 | else |
| 228 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; | 265 | switch (lport->state) { |
| 266 | case LPORT_ST_READY: | ||
| 267 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | ||
| 268 | break; | ||
| 269 | default: | ||
| 270 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; | ||
| 271 | } | ||
| 272 | mutex_unlock(&lport->lp_mutex); | ||
| 229 | } | 273 | } |
| 230 | EXPORT_SYMBOL(fc_get_host_port_state); | 274 | EXPORT_SYMBOL(fc_get_host_port_state); |
| 231 | 275 | ||
| 276 | /** | ||
| 277 | * fc_get_host_speed() - Return the speed of the given Scsi_Host | ||
| 278 | * @shost: The SCSI host whose port speed is to be determined | ||
| 279 | */ | ||
| 232 | void fc_get_host_speed(struct Scsi_Host *shost) | 280 | void fc_get_host_speed(struct Scsi_Host *shost) |
| 233 | { | 281 | { |
| 234 | struct fc_lport *lport = shost_priv(shost); | 282 | struct fc_lport *lport = shost_priv(shost); |
| @@ -237,24 +285,28 @@ void fc_get_host_speed(struct Scsi_Host *shost) | |||
| 237 | } | 285 | } |
| 238 | EXPORT_SYMBOL(fc_get_host_speed); | 286 | EXPORT_SYMBOL(fc_get_host_speed); |
| 239 | 287 | ||
| 288 | /** | ||
| 289 | * fc_get_host_stats() - Return the Scsi_Host's statistics | ||
| 290 | * @shost: The SCSI host whose statistics are to be returned | ||
| 291 | */ | ||
| 240 | struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) | 292 | struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) |
| 241 | { | 293 | { |
| 242 | struct fc_host_statistics *fcoe_stats; | 294 | struct fc_host_statistics *fcoe_stats; |
| 243 | struct fc_lport *lp = shost_priv(shost); | 295 | struct fc_lport *lport = shost_priv(shost); |
| 244 | struct timespec v0, v1; | 296 | struct timespec v0, v1; |
| 245 | unsigned int cpu; | 297 | unsigned int cpu; |
| 246 | 298 | ||
| 247 | fcoe_stats = &lp->host_stats; | 299 | fcoe_stats = &lport->host_stats; |
| 248 | memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); | 300 | memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); |
| 249 | 301 | ||
| 250 | jiffies_to_timespec(jiffies, &v0); | 302 | jiffies_to_timespec(jiffies, &v0); |
| 251 | jiffies_to_timespec(lp->boot_time, &v1); | 303 | jiffies_to_timespec(lport->boot_time, &v1); |
| 252 | fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); | 304 | fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); |
| 253 | 305 | ||
| 254 | for_each_possible_cpu(cpu) { | 306 | for_each_possible_cpu(cpu) { |
| 255 | struct fcoe_dev_stats *stats; | 307 | struct fcoe_dev_stats *stats; |
| 256 | 308 | ||
| 257 | stats = per_cpu_ptr(lp->dev_stats, cpu); | 309 | stats = per_cpu_ptr(lport->dev_stats, cpu); |
| 258 | 310 | ||
| 259 | fcoe_stats->tx_frames += stats->TxFrames; | 311 | fcoe_stats->tx_frames += stats->TxFrames; |
| 260 | fcoe_stats->tx_words += stats->TxWords; | 312 | fcoe_stats->tx_words += stats->TxWords; |
| @@ -279,12 +331,15 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) | |||
| 279 | } | 331 | } |
| 280 | EXPORT_SYMBOL(fc_get_host_stats); | 332 | EXPORT_SYMBOL(fc_get_host_stats); |
| 281 | 333 | ||
| 282 | /* | 334 | /** |
| 283 | * Fill in FLOGI command for request. | 335 | * fc_lport_flogi_fill() - Fill in FLOGI command for request |
| 336 | * @lport: The local port the FLOGI is for | ||
| 337 | * @flogi: The FLOGI command | ||
| 338 | * @op: The opcode | ||
| 284 | */ | 339 | */ |
| 285 | static void | 340 | static void fc_lport_flogi_fill(struct fc_lport *lport, |
| 286 | fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, | 341 | struct fc_els_flogi *flogi, |
| 287 | unsigned int op) | 342 | unsigned int op) |
| 288 | { | 343 | { |
| 289 | struct fc_els_csp *sp; | 344 | struct fc_els_csp *sp; |
| 290 | struct fc_els_cssp *cp; | 345 | struct fc_els_cssp *cp; |
| @@ -312,8 +367,10 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, | |||
| 312 | } | 367 | } |
| 313 | } | 368 | } |
| 314 | 369 | ||
| 315 | /* | 370 | /** |
| 316 | * Add a supported FC-4 type. | 371 | * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port |
| 372 | * @lport: The local port to add a new FC-4 type to | ||
| 373 | * @type: The new FC-4 type | ||
| 317 | */ | 374 | */ |
| 318 | static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | 375 | static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) |
| 319 | { | 376 | { |
| @@ -325,11 +382,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | |||
| 325 | 382 | ||
| 326 | /** | 383 | /** |
| 327 | * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. | 384 | * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. |
| 385 | * @sp: The sequence in the RLIR exchange | ||
| 386 | * @fp: The RLIR request frame | ||
| 328 | * @lport: Fibre Channel local port recieving the RLIR | 387 | * @lport: Fibre Channel local port recieving the RLIR |
| 329 | * @sp: current sequence in the RLIR exchange | ||
| 330 | * @fp: RLIR request frame | ||
| 331 | * | 388 | * |
| 332 | * Locking Note: The lport lock is exected to be held before calling | 389 | * Locking Note: The lport lock is expected to be held before calling |
| 333 | * this function. | 390 | * this function. |
| 334 | */ | 391 | */ |
| 335 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | 392 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, |
| @@ -344,11 +401,11 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | |||
| 344 | 401 | ||
| 345 | /** | 402 | /** |
| 346 | * fc_lport_recv_echo_req() - Handle received ECHO request | 403 | * fc_lport_recv_echo_req() - Handle received ECHO request |
| 347 | * @lport: Fibre Channel local port recieving the ECHO | 404 | * @sp: The sequence in the ECHO exchange |
| 348 | * @sp: current sequence in the ECHO exchange | 405 | * @fp: ECHO request frame |
| 349 | * @fp: ECHO request frame | 406 | * @lport: The local port recieving the ECHO |
| 350 | * | 407 | * |
| 351 | * Locking Note: The lport lock is exected to be held before calling | 408 | * Locking Note: The lport lock is expected to be held before calling |
| 352 | * this function. | 409 | * this function. |
| 353 | */ | 410 | */ |
| 354 | static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | 411 | static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, |
| @@ -361,7 +418,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
| 361 | void *dp; | 418 | void *dp; |
| 362 | u32 f_ctl; | 419 | u32 f_ctl; |
| 363 | 420 | ||
| 364 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", | 421 | FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", |
| 365 | fc_lport_state(lport)); | 422 | fc_lport_state(lport)); |
| 366 | 423 | ||
| 367 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); | 424 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); |
| @@ -374,7 +431,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
| 374 | if (fp) { | 431 | if (fp) { |
| 375 | dp = fc_frame_payload_get(fp, len); | 432 | dp = fc_frame_payload_get(fp, len); |
| 376 | memcpy(dp, pp, len); | 433 | memcpy(dp, pp, len); |
| 377 | *((u32 *)dp) = htonl(ELS_LS_ACC << 24); | 434 | *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); |
| 378 | sp = lport->tt.seq_start_next(sp); | 435 | sp = lport->tt.seq_start_next(sp); |
| 379 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; | 436 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; |
| 380 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, | 437 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, |
| @@ -385,12 +442,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
| 385 | } | 442 | } |
| 386 | 443 | ||
| 387 | /** | 444 | /** |
| 388 | * fc_lport_recv_echo_req() - Handle received Request Node ID data request | 445 | * fc_lport_recv_rnid_req() - Handle received Request Node ID data request |
| 389 | * @lport: Fibre Channel local port recieving the RNID | 446 | * @sp: The sequence in the RNID exchange |
| 390 | * @sp: current sequence in the RNID exchange | 447 | * @fp: The RNID request frame |
| 391 | * @fp: RNID request frame | 448 | * @lport: The local port recieving the RNID |
| 392 | * | 449 | * |
| 393 | * Locking Note: The lport lock is exected to be held before calling | 450 | * Locking Note: The lport lock is expected to be held before calling |
| 394 | * this function. | 451 | * this function. |
| 395 | */ | 452 | */ |
| 396 | static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | 453 | static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, |
| @@ -453,9 +510,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
| 453 | 510 | ||
| 454 | /** | 511 | /** |
| 455 | * fc_lport_recv_logo_req() - Handle received fabric LOGO request | 512 | * fc_lport_recv_logo_req() - Handle received fabric LOGO request |
| 456 | * @lport: Fibre Channel local port recieving the LOGO | 513 | * @sp: The sequence in the LOGO exchange |
| 457 | * @sp: current sequence in the LOGO exchange | 514 | * @fp: The LOGO request frame |
| 458 | * @fp: LOGO request frame | 515 | * @lport: The local port recieving the LOGO |
| 459 | * | 516 | * |
| 460 | * Locking Note: The lport lock is exected to be held before calling | 517 | * Locking Note: The lport lock is exected to be held before calling |
| 461 | * this function. | 518 | * this function. |
| @@ -470,7 +527,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, | |||
| 470 | 527 | ||
| 471 | /** | 528 | /** |
| 472 | * fc_fabric_login() - Start the lport state machine | 529 | * fc_fabric_login() - Start the lport state machine |
| 473 | * @lport: The lport that should log into the fabric | 530 | * @lport: The local port that should log into the fabric |
| 474 | * | 531 | * |
| 475 | * Locking Note: This function should not be called | 532 | * Locking Note: This function should not be called |
| 476 | * with the lport lock held. | 533 | * with the lport lock held. |
| @@ -491,47 +548,69 @@ int fc_fabric_login(struct fc_lport *lport) | |||
| 491 | EXPORT_SYMBOL(fc_fabric_login); | 548 | EXPORT_SYMBOL(fc_fabric_login); |
| 492 | 549 | ||
| 493 | /** | 550 | /** |
| 494 | * fc_linkup() - Handler for transport linkup events | 551 | * __fc_linkup() - Handler for transport linkup events |
| 495 | * @lport: The lport whose link is up | 552 | * @lport: The lport whose link is up |
| 553 | * | ||
| 554 | * Locking: must be called with the lp_mutex held | ||
| 496 | */ | 555 | */ |
| 497 | void fc_linkup(struct fc_lport *lport) | 556 | void __fc_linkup(struct fc_lport *lport) |
| 498 | { | 557 | { |
| 499 | printk(KERN_INFO "libfc: Link up on port (%6x)\n", | ||
| 500 | fc_host_port_id(lport->host)); | ||
| 501 | |||
| 502 | mutex_lock(&lport->lp_mutex); | ||
| 503 | if (!lport->link_up) { | 558 | if (!lport->link_up) { |
| 504 | lport->link_up = 1; | 559 | lport->link_up = 1; |
| 505 | 560 | ||
| 506 | if (lport->state == LPORT_ST_RESET) | 561 | if (lport->state == LPORT_ST_RESET) |
| 507 | fc_lport_enter_flogi(lport); | 562 | fc_lport_enter_flogi(lport); |
| 508 | } | 563 | } |
| 564 | } | ||
| 565 | |||
| 566 | /** | ||
| 567 | * fc_linkup() - Handler for transport linkup events | ||
| 568 | * @lport: The local port whose link is up | ||
| 569 | */ | ||
| 570 | void fc_linkup(struct fc_lport *lport) | ||
| 571 | { | ||
| 572 | printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n", | ||
| 573 | lport->host->host_no, fc_host_port_id(lport->host)); | ||
| 574 | |||
| 575 | mutex_lock(&lport->lp_mutex); | ||
| 576 | __fc_linkup(lport); | ||
| 509 | mutex_unlock(&lport->lp_mutex); | 577 | mutex_unlock(&lport->lp_mutex); |
| 510 | } | 578 | } |
| 511 | EXPORT_SYMBOL(fc_linkup); | 579 | EXPORT_SYMBOL(fc_linkup); |
| 512 | 580 | ||
| 513 | /** | 581 | /** |
| 514 | * fc_linkdown() - Handler for transport linkdown events | 582 | * __fc_linkdown() - Handler for transport linkdown events |
| 515 | * @lport: The lport whose link is down | 583 | * @lport: The lport whose link is down |
| 584 | * | ||
| 585 | * Locking: must be called with the lp_mutex held | ||
| 516 | */ | 586 | */ |
| 517 | void fc_linkdown(struct fc_lport *lport) | 587 | void __fc_linkdown(struct fc_lport *lport) |
| 518 | { | 588 | { |
| 519 | mutex_lock(&lport->lp_mutex); | ||
| 520 | printk(KERN_INFO "libfc: Link down on port (%6x)\n", | ||
| 521 | fc_host_port_id(lport->host)); | ||
| 522 | |||
| 523 | if (lport->link_up) { | 589 | if (lport->link_up) { |
| 524 | lport->link_up = 0; | 590 | lport->link_up = 0; |
| 525 | fc_lport_enter_reset(lport); | 591 | fc_lport_enter_reset(lport); |
| 526 | lport->tt.fcp_cleanup(lport); | 592 | lport->tt.fcp_cleanup(lport); |
| 527 | } | 593 | } |
| 594 | } | ||
| 595 | |||
| 596 | /** | ||
| 597 | * fc_linkdown() - Handler for transport linkdown events | ||
| 598 | * @lport: The local port whose link is down | ||
| 599 | */ | ||
| 600 | void fc_linkdown(struct fc_lport *lport) | ||
| 601 | { | ||
| 602 | printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n", | ||
| 603 | lport->host->host_no, fc_host_port_id(lport->host)); | ||
| 604 | |||
| 605 | mutex_lock(&lport->lp_mutex); | ||
| 606 | __fc_linkdown(lport); | ||
| 528 | mutex_unlock(&lport->lp_mutex); | 607 | mutex_unlock(&lport->lp_mutex); |
| 529 | } | 608 | } |
| 530 | EXPORT_SYMBOL(fc_linkdown); | 609 | EXPORT_SYMBOL(fc_linkdown); |
| 531 | 610 | ||
| 532 | /** | 611 | /** |
| 533 | * fc_fabric_logoff() - Logout of the fabric | 612 | * fc_fabric_logoff() - Logout of the fabric |
| 534 | * @lport: fc_lport pointer to logoff the fabric | 613 | * @lport: The local port to logoff the fabric |
| 535 | * | 614 | * |
| 536 | * Return value: | 615 | * Return value: |
| 537 | * 0 for success, -1 for failure | 616 | * 0 for success, -1 for failure |
| @@ -540,8 +619,8 @@ int fc_fabric_logoff(struct fc_lport *lport) | |||
| 540 | { | 619 | { |
| 541 | lport->tt.disc_stop_final(lport); | 620 | lport->tt.disc_stop_final(lport); |
| 542 | mutex_lock(&lport->lp_mutex); | 621 | mutex_lock(&lport->lp_mutex); |
| 543 | if (lport->dns_rp) | 622 | if (lport->dns_rdata) |
| 544 | lport->tt.rport_logoff(lport->dns_rp); | 623 | lport->tt.rport_logoff(lport->dns_rdata); |
| 545 | mutex_unlock(&lport->lp_mutex); | 624 | mutex_unlock(&lport->lp_mutex); |
| 546 | lport->tt.rport_flush_queue(); | 625 | lport->tt.rport_flush_queue(); |
| 547 | mutex_lock(&lport->lp_mutex); | 626 | mutex_lock(&lport->lp_mutex); |
| @@ -553,11 +632,9 @@ int fc_fabric_logoff(struct fc_lport *lport) | |||
| 553 | EXPORT_SYMBOL(fc_fabric_logoff); | 632 | EXPORT_SYMBOL(fc_fabric_logoff); |
| 554 | 633 | ||
| 555 | /** | 634 | /** |
| 556 | * fc_lport_destroy() - unregister a fc_lport | 635 | * fc_lport_destroy() - Unregister a fc_lport |
| 557 | * @lport: fc_lport pointer to unregister | 636 | * @lport: The local port to unregister |
| 558 | * | 637 | * |
| 559 | * Return value: | ||
| 560 | * None | ||
| 561 | * Note: | 638 | * Note: |
| 562 | * exit routine for fc_lport instance | 639 | * exit routine for fc_lport instance |
| 563 | * clean-up all the allocated memory | 640 | * clean-up all the allocated memory |
| @@ -580,13 +657,9 @@ int fc_lport_destroy(struct fc_lport *lport) | |||
| 580 | EXPORT_SYMBOL(fc_lport_destroy); | 657 | EXPORT_SYMBOL(fc_lport_destroy); |
| 581 | 658 | ||
| 582 | /** | 659 | /** |
| 583 | * fc_set_mfs() - sets up the mfs for the corresponding fc_lport | 660 | * fc_set_mfs() - Set the maximum frame size for a local port |
| 584 | * @lport: fc_lport pointer to unregister | 661 | * @lport: The local port to set the MFS for |
| 585 | * @mfs: the new mfs for fc_lport | 662 | * @mfs: The new MFS |
| 586 | * | ||
| 587 | * Set mfs for the given fc_lport to the new mfs. | ||
| 588 | * | ||
| 589 | * Return: 0 for success | ||
| 590 | */ | 663 | */ |
| 591 | int fc_set_mfs(struct fc_lport *lport, u32 mfs) | 664 | int fc_set_mfs(struct fc_lport *lport, u32 mfs) |
| 592 | { | 665 | { |
| @@ -617,7 +690,7 @@ EXPORT_SYMBOL(fc_set_mfs); | |||
| 617 | 690 | ||
| 618 | /** | 691 | /** |
| 619 | * fc_lport_disc_callback() - Callback for discovery events | 692 | * fc_lport_disc_callback() - Callback for discovery events |
| 620 | * @lport: FC local port | 693 | * @lport: The local port receiving the event |
| 621 | * @event: The discovery event | 694 | * @event: The discovery event |
| 622 | */ | 695 | */ |
| 623 | void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | 696 | void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) |
| @@ -627,8 +700,9 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
| 627 | FC_LPORT_DBG(lport, "Discovery succeeded\n"); | 700 | FC_LPORT_DBG(lport, "Discovery succeeded\n"); |
| 628 | break; | 701 | break; |
| 629 | case DISC_EV_FAILED: | 702 | case DISC_EV_FAILED: |
| 630 | printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", | 703 | printk(KERN_ERR "host%d: libfc: " |
| 631 | fc_host_port_id(lport->host)); | 704 | "Discovery failed for port (%6x)\n", |
| 705 | lport->host->host_no, fc_host_port_id(lport->host)); | ||
| 632 | mutex_lock(&lport->lp_mutex); | 706 | mutex_lock(&lport->lp_mutex); |
| 633 | fc_lport_enter_reset(lport); | 707 | fc_lport_enter_reset(lport); |
| 634 | mutex_unlock(&lport->lp_mutex); | 708 | mutex_unlock(&lport->lp_mutex); |
| @@ -641,7 +715,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
| 641 | 715 | ||
| 642 | /** | 716 | /** |
| 643 | * fc_rport_enter_ready() - Enter the ready state and start discovery | 717 | * fc_rport_enter_ready() - Enter the ready state and start discovery |
| 644 | * @lport: Fibre Channel local port that is ready | 718 | * @lport: The local port that is ready |
| 645 | * | 719 | * |
| 646 | * Locking Note: The lport lock is expected to be held before calling | 720 | * Locking Note: The lport lock is expected to be held before calling |
| 647 | * this routine. | 721 | * this routine. |
| @@ -652,22 +726,46 @@ static void fc_lport_enter_ready(struct fc_lport *lport) | |||
| 652 | fc_lport_state(lport)); | 726 | fc_lport_state(lport)); |
| 653 | 727 | ||
| 654 | fc_lport_state_enter(lport, LPORT_ST_READY); | 728 | fc_lport_state_enter(lport, LPORT_ST_READY); |
| 729 | if (lport->vport) | ||
| 730 | fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); | ||
| 731 | fc_vports_linkchange(lport); | ||
| 655 | 732 | ||
| 656 | if (!lport->ptp_rp) | 733 | if (!lport->ptp_rdata) |
| 657 | lport->tt.disc_start(fc_lport_disc_callback, lport); | 734 | lport->tt.disc_start(fc_lport_disc_callback, lport); |
| 658 | } | 735 | } |
| 659 | 736 | ||
| 660 | /** | 737 | /** |
| 738 | * fc_lport_set_port_id() - set the local port Port ID | ||
| 739 | * @lport: The local port which will have its Port ID set. | ||
| 740 | * @port_id: The new port ID. | ||
| 741 | * @fp: The frame containing the incoming request, or NULL. | ||
| 742 | * | ||
| 743 | * Locking Note: The lport lock is expected to be held before calling | ||
| 744 | * this function. | ||
| 745 | */ | ||
| 746 | static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, | ||
| 747 | struct fc_frame *fp) | ||
| 748 | { | ||
| 749 | if (port_id) | ||
| 750 | printk(KERN_INFO "host%d: Assigned Port ID %6x\n", | ||
| 751 | lport->host->host_no, port_id); | ||
| 752 | |||
| 753 | fc_host_port_id(lport->host) = port_id; | ||
| 754 | if (lport->tt.lport_set_port_id) | ||
| 755 | lport->tt.lport_set_port_id(lport, port_id, fp); | ||
| 756 | } | ||
| 757 | |||
| 758 | /** | ||
| 661 | * fc_lport_recv_flogi_req() - Receive a FLOGI request | 759 | * fc_lport_recv_flogi_req() - Receive a FLOGI request |
| 662 | * @sp_in: The sequence the FLOGI is on | 760 | * @sp_in: The sequence the FLOGI is on |
| 663 | * @rx_fp: The frame the FLOGI is in | 761 | * @rx_fp: The FLOGI frame |
| 664 | * @lport: The lport that recieved the request | 762 | * @lport: The local port that recieved the request |
| 665 | * | 763 | * |
| 666 | * A received FLOGI request indicates a point-to-point connection. | 764 | * A received FLOGI request indicates a point-to-point connection. |
| 667 | * Accept it with the common service parameters indicating our N port. | 765 | * Accept it with the common service parameters indicating our N port. |
| 668 | * Set up to do a PLOGI if we have the higher-number WWPN. | 766 | * Set up to do a PLOGI if we have the higher-number WWPN. |
| 669 | * | 767 | * |
| 670 | * Locking Note: The lport lock is exected to be held before calling | 768 | * Locking Note: The lport lock is expected to be held before calling |
| 671 | * this function. | 769 | * this function. |
| 672 | */ | 770 | */ |
| 673 | static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | 771 | static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, |
| @@ -695,8 +793,9 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
| 695 | goto out; | 793 | goto out; |
| 696 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); | 794 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); |
| 697 | if (remote_wwpn == lport->wwpn) { | 795 | if (remote_wwpn == lport->wwpn) { |
| 698 | printk(KERN_WARNING "libfc: Received FLOGI from port " | 796 | printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " |
| 699 | "with same WWPN %llx\n", remote_wwpn); | 797 | "with same WWPN %llx\n", |
| 798 | lport->host->host_no, remote_wwpn); | ||
| 700 | goto out; | 799 | goto out; |
| 701 | } | 800 | } |
| 702 | FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); | 801 | FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); |
| @@ -715,7 +814,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
| 715 | remote_fid = FC_LOCAL_PTP_FID_HI; | 814 | remote_fid = FC_LOCAL_PTP_FID_HI; |
| 716 | } | 815 | } |
| 717 | 816 | ||
| 718 | fc_host_port_id(lport->host) = local_fid; | 817 | fc_lport_set_port_id(lport, local_fid, rx_fp); |
| 719 | 818 | ||
| 720 | fp = fc_frame_alloc(lport, sizeof(*flp)); | 819 | fp = fc_frame_alloc(lport, sizeof(*flp)); |
| 721 | if (fp) { | 820 | if (fp) { |
| @@ -747,9 +846,9 @@ out: | |||
| 747 | 846 | ||
| 748 | /** | 847 | /** |
| 749 | * fc_lport_recv_req() - The generic lport request handler | 848 | * fc_lport_recv_req() - The generic lport request handler |
| 750 | * @lport: The lport that received the request | 849 | * @lport: The local port that received the request |
| 751 | * @sp: The sequence the request is on | 850 | * @sp: The sequence the request is on |
| 752 | * @fp: The frame the request is in | 851 | * @fp: The request frame |
| 753 | * | 852 | * |
| 754 | * This function will see if the lport handles the request or | 853 | * This function will see if the lport handles the request or |
| 755 | * if an rport should handle the request. | 854 | * if an rport should handle the request. |
| @@ -817,8 +916,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
| 817 | } | 916 | } |
| 818 | 917 | ||
| 819 | /** | 918 | /** |
| 820 | * fc_lport_reset() - Reset an lport | 919 | * fc_lport_reset() - Reset a local port |
| 821 | * @lport: The lport which should be reset | 920 | * @lport: The local port which should be reset |
| 822 | * | 921 | * |
| 823 | * Locking Note: This functions should not be called with the | 922 | * Locking Note: This functions should not be called with the |
| 824 | * lport lock held. | 923 | * lport lock held. |
| @@ -834,29 +933,31 @@ int fc_lport_reset(struct fc_lport *lport) | |||
| 834 | EXPORT_SYMBOL(fc_lport_reset); | 933 | EXPORT_SYMBOL(fc_lport_reset); |
| 835 | 934 | ||
| 836 | /** | 935 | /** |
| 837 | * fc_lport_reset_locked() - Reset the local port | 936 | * fc_lport_reset_locked() - Reset the local port w/ the lport lock held |
| 838 | * @lport: Fibre Channel local port to be reset | 937 | * @lport: The local port to be reset |
| 839 | * | 938 | * |
| 840 | * Locking Note: The lport lock is expected to be held before calling | 939 | * Locking Note: The lport lock is expected to be held before calling |
| 841 | * this routine. | 940 | * this routine. |
| 842 | */ | 941 | */ |
| 843 | static void fc_lport_reset_locked(struct fc_lport *lport) | 942 | static void fc_lport_reset_locked(struct fc_lport *lport) |
| 844 | { | 943 | { |
| 845 | if (lport->dns_rp) | 944 | if (lport->dns_rdata) |
| 846 | lport->tt.rport_logoff(lport->dns_rp); | 945 | lport->tt.rport_logoff(lport->dns_rdata); |
| 847 | 946 | ||
| 848 | lport->ptp_rp = NULL; | 947 | lport->ptp_rdata = NULL; |
| 849 | 948 | ||
| 850 | lport->tt.disc_stop(lport); | 949 | lport->tt.disc_stop(lport); |
| 851 | 950 | ||
| 852 | lport->tt.exch_mgr_reset(lport, 0, 0); | 951 | lport->tt.exch_mgr_reset(lport, 0, 0); |
| 853 | fc_host_fabric_name(lport->host) = 0; | 952 | fc_host_fabric_name(lport->host) = 0; |
| 854 | fc_host_port_id(lport->host) = 0; | 953 | |
| 954 | if (fc_host_port_id(lport->host)) | ||
| 955 | fc_lport_set_port_id(lport, 0, NULL); | ||
| 855 | } | 956 | } |
| 856 | 957 | ||
| 857 | /** | 958 | /** |
| 858 | * fc_lport_enter_reset() - Reset the local port | 959 | * fc_lport_enter_reset() - Reset the local port |
| 859 | * @lport: Fibre Channel local port to be reset | 960 | * @lport: The local port to be reset |
| 860 | * | 961 | * |
| 861 | * Locking Note: The lport lock is expected to be held before calling | 962 | * Locking Note: The lport lock is expected to be held before calling |
| 862 | * this routine. | 963 | * this routine. |
| @@ -866,15 +967,22 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
| 866 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", | 967 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", |
| 867 | fc_lport_state(lport)); | 968 | fc_lport_state(lport)); |
| 868 | 969 | ||
| 970 | if (lport->vport) { | ||
| 971 | if (lport->link_up) | ||
| 972 | fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); | ||
| 973 | else | ||
| 974 | fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); | ||
| 975 | } | ||
| 869 | fc_lport_state_enter(lport, LPORT_ST_RESET); | 976 | fc_lport_state_enter(lport, LPORT_ST_RESET); |
| 977 | fc_vports_linkchange(lport); | ||
| 870 | fc_lport_reset_locked(lport); | 978 | fc_lport_reset_locked(lport); |
| 871 | if (lport->link_up) | 979 | if (lport->link_up) |
| 872 | fc_lport_enter_flogi(lport); | 980 | fc_lport_enter_flogi(lport); |
| 873 | } | 981 | } |
| 874 | 982 | ||
| 875 | /** | 983 | /** |
| 876 | * fc_lport_enter_disabled() - disable the local port | 984 | * fc_lport_enter_disabled() - Disable the local port |
| 877 | * @lport: Fibre Channel local port to be reset | 985 | * @lport: The local port to be reset |
| 878 | * | 986 | * |
| 879 | * Locking Note: The lport lock is expected to be held before calling | 987 | * Locking Note: The lport lock is expected to be held before calling |
| 880 | * this routine. | 988 | * this routine. |
| @@ -885,13 +993,14 @@ static void fc_lport_enter_disabled(struct fc_lport *lport) | |||
| 885 | fc_lport_state(lport)); | 993 | fc_lport_state(lport)); |
| 886 | 994 | ||
| 887 | fc_lport_state_enter(lport, LPORT_ST_DISABLED); | 995 | fc_lport_state_enter(lport, LPORT_ST_DISABLED); |
| 996 | fc_vports_linkchange(lport); | ||
| 888 | fc_lport_reset_locked(lport); | 997 | fc_lport_reset_locked(lport); |
| 889 | } | 998 | } |
| 890 | 999 | ||
| 891 | /** | 1000 | /** |
| 892 | * fc_lport_error() - Handler for any errors | 1001 | * fc_lport_error() - Handler for any errors |
| 893 | * @lport: The fc_lport object | 1002 | * @lport: The local port that the error was on |
| 894 | * @fp: The frame pointer | 1003 | * @fp: The error code encoded in a frame pointer |
| 895 | * | 1004 | * |
| 896 | * If the error was caused by a resource allocation failure | 1005 | * If the error was caused by a resource allocation failure |
| 897 | * then wait for half a second and retry, otherwise retry | 1006 | * then wait for half a second and retry, otherwise retry |
| @@ -922,8 +1031,11 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) | |||
| 922 | case LPORT_ST_DISABLED: | 1031 | case LPORT_ST_DISABLED: |
| 923 | case LPORT_ST_READY: | 1032 | case LPORT_ST_READY: |
| 924 | case LPORT_ST_RESET: | 1033 | case LPORT_ST_RESET: |
| 925 | case LPORT_ST_RPN_ID: | 1034 | case LPORT_ST_RNN_ID: |
| 1035 | case LPORT_ST_RSNN_NN: | ||
| 1036 | case LPORT_ST_RSPN_ID: | ||
| 926 | case LPORT_ST_RFT_ID: | 1037 | case LPORT_ST_RFT_ID: |
| 1038 | case LPORT_ST_RFF_ID: | ||
| 927 | case LPORT_ST_SCR: | 1039 | case LPORT_ST_SCR: |
| 928 | case LPORT_ST_DNS: | 1040 | case LPORT_ST_DNS: |
| 929 | case LPORT_ST_FLOGI: | 1041 | case LPORT_ST_FLOGI: |
| @@ -936,33 +1048,33 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) | |||
| 936 | } | 1048 | } |
| 937 | 1049 | ||
| 938 | /** | 1050 | /** |
| 939 | * fc_lport_rft_id_resp() - Handle response to Register Fibre | 1051 | * fc_lport_ns_resp() - Handle response to a name server |
| 940 | * Channel Types by ID (RPN_ID) request | 1052 | * registration exchange |
| 941 | * @sp: current sequence in RPN_ID exchange | 1053 | * @sp: current sequence in exchange |
| 942 | * @fp: response frame | 1054 | * @fp: response frame |
| 943 | * @lp_arg: Fibre Channel host port instance | 1055 | * @lp_arg: Fibre Channel host port instance |
| 944 | * | 1056 | * |
| 945 | * Locking Note: This function will be called without the lport lock | 1057 | * Locking Note: This function will be called without the lport lock |
| 946 | * held, but it will lock, call an _enter_* function or fc_lport_error | 1058 | * held, but it will lock, call an _enter_* function or fc_lport_error() |
| 947 | * and then unlock the lport. | 1059 | * and then unlock the lport. |
| 948 | */ | 1060 | */ |
| 949 | static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, | 1061 | static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, |
| 950 | void *lp_arg) | 1062 | void *lp_arg) |
| 951 | { | 1063 | { |
| 952 | struct fc_lport *lport = lp_arg; | 1064 | struct fc_lport *lport = lp_arg; |
| 953 | struct fc_frame_header *fh; | 1065 | struct fc_frame_header *fh; |
| 954 | struct fc_ct_hdr *ct; | 1066 | struct fc_ct_hdr *ct; |
| 955 | 1067 | ||
| 956 | FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp)); | 1068 | FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); |
| 957 | 1069 | ||
| 958 | if (fp == ERR_PTR(-FC_EX_CLOSED)) | 1070 | if (fp == ERR_PTR(-FC_EX_CLOSED)) |
| 959 | return; | 1071 | return; |
| 960 | 1072 | ||
| 961 | mutex_lock(&lport->lp_mutex); | 1073 | mutex_lock(&lport->lp_mutex); |
| 962 | 1074 | ||
| 963 | if (lport->state != LPORT_ST_RFT_ID) { | 1075 | if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { |
| 964 | FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " | 1076 | FC_LPORT_DBG(lport, "Received a name server response, " |
| 965 | "%s\n", fc_lport_state(lport)); | 1077 | "but in state %s\n", fc_lport_state(lport)); |
| 966 | if (IS_ERR(fp)) | 1078 | if (IS_ERR(fp)) |
| 967 | goto err; | 1079 | goto err; |
| 968 | goto out; | 1080 | goto out; |
| @@ -980,63 +1092,28 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 980 | ct->ct_fs_type == FC_FST_DIR && | 1092 | ct->ct_fs_type == FC_FST_DIR && |
| 981 | ct->ct_fs_subtype == FC_NS_SUBTYPE && | 1093 | ct->ct_fs_subtype == FC_NS_SUBTYPE && |
| 982 | ntohs(ct->ct_cmd) == FC_FS_ACC) | 1094 | ntohs(ct->ct_cmd) == FC_FS_ACC) |
| 983 | fc_lport_enter_scr(lport); | 1095 | switch (lport->state) { |
| 984 | else | 1096 | case LPORT_ST_RNN_ID: |
| 985 | fc_lport_error(lport, fp); | 1097 | fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); |
| 986 | out: | 1098 | break; |
| 987 | fc_frame_free(fp); | 1099 | case LPORT_ST_RSNN_NN: |
| 988 | err: | 1100 | fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); |
| 989 | mutex_unlock(&lport->lp_mutex); | 1101 | break; |
| 990 | } | 1102 | case LPORT_ST_RSPN_ID: |
| 991 | 1103 | fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); | |
| 992 | /** | 1104 | break; |
| 993 | * fc_lport_rpn_id_resp() - Handle response to Register Port | 1105 | case LPORT_ST_RFT_ID: |
| 994 | * Name by ID (RPN_ID) request | 1106 | fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); |
| 995 | * @sp: current sequence in RPN_ID exchange | 1107 | break; |
| 996 | * @fp: response frame | 1108 | case LPORT_ST_RFF_ID: |
| 997 | * @lp_arg: Fibre Channel host port instance | 1109 | fc_lport_enter_scr(lport); |
| 998 | * | 1110 | break; |
| 999 | * Locking Note: This function will be called without the lport lock | 1111 | default: |
| 1000 | * held, but it will lock, call an _enter_* function or fc_lport_error | 1112 | /* should have already been caught by state checks */ |
| 1001 | * and then unlock the lport. | 1113 | break; |
| 1002 | */ | 1114 | } |
| 1003 | static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, | ||
| 1004 | void *lp_arg) | ||
| 1005 | { | ||
| 1006 | struct fc_lport *lport = lp_arg; | ||
| 1007 | struct fc_frame_header *fh; | ||
| 1008 | struct fc_ct_hdr *ct; | ||
| 1009 | |||
| 1010 | FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp)); | ||
| 1011 | |||
| 1012 | if (fp == ERR_PTR(-FC_EX_CLOSED)) | ||
| 1013 | return; | ||
| 1014 | |||
| 1015 | mutex_lock(&lport->lp_mutex); | ||
| 1016 | |||
| 1017 | if (lport->state != LPORT_ST_RPN_ID) { | ||
| 1018 | FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " | ||
| 1019 | "%s\n", fc_lport_state(lport)); | ||
| 1020 | if (IS_ERR(fp)) | ||
| 1021 | goto err; | ||
| 1022 | goto out; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | if (IS_ERR(fp)) { | ||
| 1026 | fc_lport_error(lport, fp); | ||
| 1027 | goto err; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | fh = fc_frame_header_get(fp); | ||
| 1031 | ct = fc_frame_payload_get(fp, sizeof(*ct)); | ||
| 1032 | if (fh && ct && fh->fh_type == FC_TYPE_CT && | ||
| 1033 | ct->ct_fs_type == FC_FST_DIR && | ||
| 1034 | ct->ct_fs_subtype == FC_NS_SUBTYPE && | ||
| 1035 | ntohs(ct->ct_cmd) == FC_FS_ACC) | ||
| 1036 | fc_lport_enter_rft_id(lport); | ||
| 1037 | else | 1115 | else |
| 1038 | fc_lport_error(lport, fp); | 1116 | fc_lport_error(lport, fp); |
| 1039 | |||
| 1040 | out: | 1117 | out: |
| 1041 | fc_frame_free(fp); | 1118 | fc_frame_free(fp); |
| 1042 | err: | 1119 | err: |
| @@ -1045,8 +1122,8 @@ err: | |||
| 1045 | 1122 | ||
| 1046 | /** | 1123 | /** |
| 1047 | * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request | 1124 | * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request |
| 1048 | * @sp: current sequence in SCR exchange | 1125 | * @sp: current sequence in SCR exchange |
| 1049 | * @fp: response frame | 1126 | * @fp: response frame |
| 1050 | * @lp_arg: Fibre Channel lport port instance that sent the registration request | 1127 | * @lp_arg: Fibre Channel lport port instance that sent the registration request |
| 1051 | * | 1128 | * |
| 1052 | * Locking Note: This function will be called without the lport lock | 1129 | * Locking Note: This function will be called without the lport lock |
| @@ -1092,8 +1169,8 @@ err: | |||
| 1092 | } | 1169 | } |
| 1093 | 1170 | ||
| 1094 | /** | 1171 | /** |
| 1095 | * fc_lport_enter_scr() - Send a State Change Register (SCR) request | 1172 | * fc_lport_enter_scr() - Send a SCR (State Change Register) request |
| 1096 | * @lport: Fibre Channel local port to register for state changes | 1173 | * @lport: The local port to register for state changes |
| 1097 | * | 1174 | * |
| 1098 | * Locking Note: The lport lock is expected to be held before calling | 1175 | * Locking Note: The lport lock is expected to be held before calling |
| 1099 | * this routine. | 1176 | * this routine. |
| @@ -1114,78 +1191,74 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | |||
| 1114 | } | 1191 | } |
| 1115 | 1192 | ||
| 1116 | if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, | 1193 | if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, |
| 1117 | fc_lport_scr_resp, lport, lport->e_d_tov)) | 1194 | fc_lport_scr_resp, lport, |
| 1118 | fc_lport_error(lport, fp); | 1195 | 2 * lport->r_a_tov)) |
| 1196 | fc_lport_error(lport, NULL); | ||
| 1119 | } | 1197 | } |
| 1120 | 1198 | ||
| 1121 | /** | 1199 | /** |
| 1122 | * fc_lport_enter_rft_id() - Register FC4-types with the name server | 1200 | * fc_lport_enter_ns() - register some object with the name server |
| 1123 | * @lport: Fibre Channel local port to register | 1201 | * @lport: Fibre Channel local port to register |
| 1124 | * | 1202 | * |
| 1125 | * Locking Note: The lport lock is expected to be held before calling | 1203 | * Locking Note: The lport lock is expected to be held before calling |
| 1126 | * this routine. | 1204 | * this routine. |
| 1127 | */ | 1205 | */ |
| 1128 | static void fc_lport_enter_rft_id(struct fc_lport *lport) | 1206 | static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) |
| 1129 | { | 1207 | { |
| 1130 | struct fc_frame *fp; | 1208 | struct fc_frame *fp; |
| 1131 | struct fc_ns_fts *lps; | 1209 | enum fc_ns_req cmd; |
| 1132 | int i; | 1210 | int size = sizeof(struct fc_ct_hdr); |
| 1211 | size_t len; | ||
| 1133 | 1212 | ||
| 1134 | FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", | 1213 | FC_LPORT_DBG(lport, "Entered %s state from %s state\n", |
| 1214 | fc_lport_state_names[state], | ||
| 1135 | fc_lport_state(lport)); | 1215 | fc_lport_state(lport)); |
| 1136 | 1216 | ||
| 1137 | fc_lport_state_enter(lport, LPORT_ST_RFT_ID); | 1217 | fc_lport_state_enter(lport, state); |
| 1138 | |||
| 1139 | lps = &lport->fcts; | ||
| 1140 | i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]); | ||
| 1141 | while (--i >= 0) | ||
| 1142 | if (ntohl(lps->ff_type_map[i]) != 0) | ||
| 1143 | break; | ||
| 1144 | if (i < 0) { | ||
| 1145 | /* nothing to register, move on to SCR */ | ||
| 1146 | fc_lport_enter_scr(lport); | ||
| 1147 | return; | ||
| 1148 | } | ||
| 1149 | 1218 | ||
| 1150 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + | 1219 | switch (state) { |
| 1151 | sizeof(struct fc_ns_rft)); | 1220 | case LPORT_ST_RNN_ID: |
| 1152 | if (!fp) { | 1221 | cmd = FC_NS_RNN_ID; |
| 1153 | fc_lport_error(lport, fp); | 1222 | size += sizeof(struct fc_ns_rn_id); |
| 1223 | break; | ||
| 1224 | case LPORT_ST_RSNN_NN: | ||
| 1225 | len = strnlen(fc_host_symbolic_name(lport->host), 255); | ||
| 1226 | /* if there is no symbolic name, skip to RFT_ID */ | ||
| 1227 | if (!len) | ||
| 1228 | return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); | ||
| 1229 | cmd = FC_NS_RSNN_NN; | ||
| 1230 | size += sizeof(struct fc_ns_rsnn) + len; | ||
| 1231 | break; | ||
| 1232 | case LPORT_ST_RSPN_ID: | ||
| 1233 | len = strnlen(fc_host_symbolic_name(lport->host), 255); | ||
| 1234 | /* if there is no symbolic name, skip to RFT_ID */ | ||
| 1235 | if (!len) | ||
| 1236 | return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); | ||
| 1237 | cmd = FC_NS_RSPN_ID; | ||
| 1238 | size += sizeof(struct fc_ns_rspn) + len; | ||
| 1239 | break; | ||
| 1240 | case LPORT_ST_RFT_ID: | ||
| 1241 | cmd = FC_NS_RFT_ID; | ||
| 1242 | size += sizeof(struct fc_ns_rft); | ||
| 1243 | break; | ||
| 1244 | case LPORT_ST_RFF_ID: | ||
| 1245 | cmd = FC_NS_RFF_ID; | ||
| 1246 | size += sizeof(struct fc_ns_rff_id); | ||
| 1247 | break; | ||
| 1248 | default: | ||
| 1249 | fc_lport_error(lport, NULL); | ||
| 1154 | return; | 1250 | return; |
| 1155 | } | 1251 | } |
| 1156 | 1252 | ||
| 1157 | if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, | 1253 | fp = fc_frame_alloc(lport, size); |
| 1158 | fc_lport_rft_id_resp, | ||
| 1159 | lport, lport->e_d_tov)) | ||
| 1160 | fc_lport_error(lport, fp); | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | /** | ||
| 1164 | * fc_rport_enter_rft_id() - Register port name with the name server | ||
| 1165 | * @lport: Fibre Channel local port to register | ||
| 1166 | * | ||
| 1167 | * Locking Note: The lport lock is expected to be held before calling | ||
| 1168 | * this routine. | ||
| 1169 | */ | ||
| 1170 | static void fc_lport_enter_rpn_id(struct fc_lport *lport) | ||
| 1171 | { | ||
| 1172 | struct fc_frame *fp; | ||
| 1173 | |||
| 1174 | FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", | ||
| 1175 | fc_lport_state(lport)); | ||
| 1176 | |||
| 1177 | fc_lport_state_enter(lport, LPORT_ST_RPN_ID); | ||
| 1178 | |||
| 1179 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + | ||
| 1180 | sizeof(struct fc_ns_rn_id)); | ||
| 1181 | if (!fp) { | 1254 | if (!fp) { |
| 1182 | fc_lport_error(lport, fp); | 1255 | fc_lport_error(lport, fp); |
| 1183 | return; | 1256 | return; |
| 1184 | } | 1257 | } |
| 1185 | 1258 | ||
| 1186 | if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, | 1259 | if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, |
| 1187 | fc_lport_rpn_id_resp, | 1260 | fc_lport_ns_resp, |
| 1188 | lport, lport->e_d_tov)) | 1261 | lport, 3 * lport->r_a_tov)) |
| 1189 | fc_lport_error(lport, fp); | 1262 | fc_lport_error(lport, fp); |
| 1190 | } | 1263 | } |
| 1191 | 1264 | ||
| @@ -1194,8 +1267,8 @@ static struct fc_rport_operations fc_lport_rport_ops = { | |||
| 1194 | }; | 1267 | }; |
| 1195 | 1268 | ||
| 1196 | /** | 1269 | /** |
| 1197 | * fc_rport_enter_dns() - Create a rport to the name server | 1270 | * fc_rport_enter_dns() - Create a fc_rport for the name server |
| 1198 | * @lport: Fibre Channel local port requesting a rport for the name server | 1271 | * @lport: The local port requesting a remote port for the name server |
| 1199 | * | 1272 | * |
| 1200 | * Locking Note: The lport lock is expected to be held before calling | 1273 | * Locking Note: The lport lock is expected to be held before calling |
| 1201 | * this routine. | 1274 | * this routine. |
| @@ -1224,8 +1297,8 @@ err: | |||
| 1224 | } | 1297 | } |
| 1225 | 1298 | ||
| 1226 | /** | 1299 | /** |
| 1227 | * fc_lport_timeout() - Handler for the retry_work timer. | 1300 | * fc_lport_timeout() - Handler for the retry_work timer |
| 1228 | * @work: The work struct of the fc_lport | 1301 | * @work: The work struct of the local port |
| 1229 | */ | 1302 | */ |
| 1230 | static void fc_lport_timeout(struct work_struct *work) | 1303 | static void fc_lport_timeout(struct work_struct *work) |
| 1231 | { | 1304 | { |
| @@ -1237,21 +1310,25 @@ static void fc_lport_timeout(struct work_struct *work) | |||
| 1237 | 1310 | ||
| 1238 | switch (lport->state) { | 1311 | switch (lport->state) { |
| 1239 | case LPORT_ST_DISABLED: | 1312 | case LPORT_ST_DISABLED: |
| 1313 | WARN_ON(1); | ||
| 1314 | break; | ||
| 1240 | case LPORT_ST_READY: | 1315 | case LPORT_ST_READY: |
| 1241 | case LPORT_ST_RESET: | ||
| 1242 | WARN_ON(1); | 1316 | WARN_ON(1); |
| 1243 | break; | 1317 | break; |
| 1318 | case LPORT_ST_RESET: | ||
| 1319 | break; | ||
| 1244 | case LPORT_ST_FLOGI: | 1320 | case LPORT_ST_FLOGI: |
| 1245 | fc_lport_enter_flogi(lport); | 1321 | fc_lport_enter_flogi(lport); |
| 1246 | break; | 1322 | break; |
| 1247 | case LPORT_ST_DNS: | 1323 | case LPORT_ST_DNS: |
| 1248 | fc_lport_enter_dns(lport); | 1324 | fc_lport_enter_dns(lport); |
| 1249 | break; | 1325 | break; |
| 1250 | case LPORT_ST_RPN_ID: | 1326 | case LPORT_ST_RNN_ID: |
| 1251 | fc_lport_enter_rpn_id(lport); | 1327 | case LPORT_ST_RSNN_NN: |
| 1252 | break; | 1328 | case LPORT_ST_RSPN_ID: |
| 1253 | case LPORT_ST_RFT_ID: | 1329 | case LPORT_ST_RFT_ID: |
| 1254 | fc_lport_enter_rft_id(lport); | 1330 | case LPORT_ST_RFF_ID: |
| 1331 | fc_lport_enter_ns(lport, lport->state); | ||
| 1255 | break; | 1332 | break; |
| 1256 | case LPORT_ST_SCR: | 1333 | case LPORT_ST_SCR: |
| 1257 | fc_lport_enter_scr(lport); | 1334 | fc_lport_enter_scr(lport); |
| @@ -1266,16 +1343,16 @@ static void fc_lport_timeout(struct work_struct *work) | |||
| 1266 | 1343 | ||
| 1267 | /** | 1344 | /** |
| 1268 | * fc_lport_logo_resp() - Handle response to LOGO request | 1345 | * fc_lport_logo_resp() - Handle response to LOGO request |
| 1269 | * @sp: current sequence in LOGO exchange | 1346 | * @sp: The sequence that the LOGO was on |
| 1270 | * @fp: response frame | 1347 | * @fp: The LOGO frame |
| 1271 | * @lp_arg: Fibre Channel lport port instance that sent the LOGO request | 1348 | * @lp_arg: The lport port that received the LOGO request |
| 1272 | * | 1349 | * |
| 1273 | * Locking Note: This function will be called without the lport lock | 1350 | * Locking Note: This function will be called without the lport lock |
| 1274 | * held, but it will lock, call an _enter_* function or fc_lport_error | 1351 | * held, but it will lock, call an _enter_* function or fc_lport_error() |
| 1275 | * and then unlock the lport. | 1352 | * and then unlock the lport. |
| 1276 | */ | 1353 | */ |
| 1277 | static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | 1354 | void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, |
| 1278 | void *lp_arg) | 1355 | void *lp_arg) |
| 1279 | { | 1356 | { |
| 1280 | struct fc_lport *lport = lp_arg; | 1357 | struct fc_lport *lport = lp_arg; |
| 1281 | u8 op; | 1358 | u8 op; |
| @@ -1311,10 +1388,11 @@ out: | |||
| 1311 | err: | 1388 | err: |
| 1312 | mutex_unlock(&lport->lp_mutex); | 1389 | mutex_unlock(&lport->lp_mutex); |
| 1313 | } | 1390 | } |
| 1391 | EXPORT_SYMBOL(fc_lport_logo_resp); | ||
| 1314 | 1392 | ||
| 1315 | /** | 1393 | /** |
| 1316 | * fc_rport_enter_logo() - Logout of the fabric | 1394 | * fc_rport_enter_logo() - Logout of the fabric |
| 1317 | * @lport: Fibre Channel local port to be logged out | 1395 | * @lport: The local port to be logged out |
| 1318 | * | 1396 | * |
| 1319 | * Locking Note: The lport lock is expected to be held before calling | 1397 | * Locking Note: The lport lock is expected to be held before calling |
| 1320 | * this routine. | 1398 | * this routine. |
| @@ -1328,6 +1406,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | |||
| 1328 | fc_lport_state(lport)); | 1406 | fc_lport_state(lport)); |
| 1329 | 1407 | ||
| 1330 | fc_lport_state_enter(lport, LPORT_ST_LOGO); | 1408 | fc_lport_state_enter(lport, LPORT_ST_LOGO); |
| 1409 | fc_vports_linkchange(lport); | ||
| 1331 | 1410 | ||
| 1332 | fp = fc_frame_alloc(lport, sizeof(*logo)); | 1411 | fp = fc_frame_alloc(lport, sizeof(*logo)); |
| 1333 | if (!fp) { | 1412 | if (!fp) { |
| @@ -1336,22 +1415,23 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | |||
| 1336 | } | 1415 | } |
| 1337 | 1416 | ||
| 1338 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, | 1417 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, |
| 1339 | fc_lport_logo_resp, lport, lport->e_d_tov)) | 1418 | fc_lport_logo_resp, lport, |
| 1340 | fc_lport_error(lport, fp); | 1419 | 2 * lport->r_a_tov)) |
| 1420 | fc_lport_error(lport, NULL); | ||
| 1341 | } | 1421 | } |
| 1342 | 1422 | ||
| 1343 | /** | 1423 | /** |
| 1344 | * fc_lport_flogi_resp() - Handle response to FLOGI request | 1424 | * fc_lport_flogi_resp() - Handle response to FLOGI request |
| 1345 | * @sp: current sequence in FLOGI exchange | 1425 | * @sp: The sequence that the FLOGI was on |
| 1346 | * @fp: response frame | 1426 | * @fp: The FLOGI response frame |
| 1347 | * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request | 1427 | * @lp_arg: The lport port that received the FLOGI response |
| 1348 | * | 1428 | * |
| 1349 | * Locking Note: This function will be called without the lport lock | 1429 | * Locking Note: This function will be called without the lport lock |
| 1350 | * held, but it will lock, call an _enter_* function or fc_lport_error | 1430 | * held, but it will lock, call an _enter_* function or fc_lport_error() |
| 1351 | * and then unlock the lport. | 1431 | * and then unlock the lport. |
| 1352 | */ | 1432 | */ |
| 1353 | static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | 1433 | void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, |
| 1354 | void *lp_arg) | 1434 | void *lp_arg) |
| 1355 | { | 1435 | { |
| 1356 | struct fc_lport *lport = lp_arg; | 1436 | struct fc_lport *lport = lp_arg; |
| 1357 | struct fc_frame_header *fh; | 1437 | struct fc_frame_header *fh; |
| @@ -1385,11 +1465,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1385 | fh = fc_frame_header_get(fp); | 1465 | fh = fc_frame_header_get(fp); |
| 1386 | did = ntoh24(fh->fh_d_id); | 1466 | did = ntoh24(fh->fh_d_id); |
| 1387 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { | 1467 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { |
| 1388 | |||
| 1389 | printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", | ||
| 1390 | did); | ||
| 1391 | fc_host_port_id(lport->host) = did; | ||
| 1392 | |||
| 1393 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | 1468 | flp = fc_frame_payload_get(fp, sizeof(*flp)); |
| 1394 | if (flp) { | 1469 | if (flp) { |
| 1395 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | 1470 | mfs = ntohs(flp->fl_csp.sp_bb_data) & |
| @@ -1402,12 +1477,18 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1402 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | 1477 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); |
| 1403 | if (csp_flags & FC_SP_FT_EDTR) | 1478 | if (csp_flags & FC_SP_FT_EDTR) |
| 1404 | e_d_tov /= 1000000; | 1479 | e_d_tov /= 1000000; |
| 1480 | |||
| 1481 | lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); | ||
| 1482 | |||
| 1405 | if ((csp_flags & FC_SP_FT_FPORT) == 0) { | 1483 | if ((csp_flags & FC_SP_FT_FPORT) == 0) { |
| 1406 | if (e_d_tov > lport->e_d_tov) | 1484 | if (e_d_tov > lport->e_d_tov) |
| 1407 | lport->e_d_tov = e_d_tov; | 1485 | lport->e_d_tov = e_d_tov; |
| 1408 | lport->r_a_tov = 2 * e_d_tov; | 1486 | lport->r_a_tov = 2 * e_d_tov; |
| 1409 | printk(KERN_INFO "libfc: Port (%6x) entered " | 1487 | fc_lport_set_port_id(lport, did, fp); |
| 1410 | "point to point mode\n", did); | 1488 | printk(KERN_INFO "host%d: libfc: " |
| 1489 | "Port (%6x) entered " | ||
| 1490 | "point-to-point mode\n", | ||
| 1491 | lport->host->host_no, did); | ||
| 1411 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), | 1492 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), |
| 1412 | get_unaligned_be64( | 1493 | get_unaligned_be64( |
| 1413 | &flp->fl_wwpn), | 1494 | &flp->fl_wwpn), |
| @@ -1418,6 +1499,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1418 | lport->r_a_tov = r_a_tov; | 1499 | lport->r_a_tov = r_a_tov; |
| 1419 | fc_host_fabric_name(lport->host) = | 1500 | fc_host_fabric_name(lport->host) = |
| 1420 | get_unaligned_be64(&flp->fl_wwnn); | 1501 | get_unaligned_be64(&flp->fl_wwnn); |
| 1502 | fc_lport_set_port_id(lport, did, fp); | ||
| 1421 | fc_lport_enter_dns(lport); | 1503 | fc_lport_enter_dns(lport); |
| 1422 | } | 1504 | } |
| 1423 | } | 1505 | } |
| @@ -1430,6 +1512,7 @@ out: | |||
| 1430 | err: | 1512 | err: |
| 1431 | mutex_unlock(&lport->lp_mutex); | 1513 | mutex_unlock(&lport->lp_mutex); |
| 1432 | } | 1514 | } |
| 1515 | EXPORT_SYMBOL(fc_lport_flogi_resp); | ||
| 1433 | 1516 | ||
| 1434 | /** | 1517 | /** |
| 1435 | * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager | 1518 | * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager |
| @@ -1451,12 +1534,18 @@ void fc_lport_enter_flogi(struct fc_lport *lport) | |||
| 1451 | if (!fp) | 1534 | if (!fp) |
| 1452 | return fc_lport_error(lport, fp); | 1535 | return fc_lport_error(lport, fp); |
| 1453 | 1536 | ||
| 1454 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, | 1537 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, |
| 1455 | fc_lport_flogi_resp, lport, lport->e_d_tov)) | 1538 | lport->vport ? ELS_FDISC : ELS_FLOGI, |
| 1456 | fc_lport_error(lport, fp); | 1539 | fc_lport_flogi_resp, lport, |
| 1540 | lport->vport ? 2 * lport->r_a_tov : | ||
| 1541 | lport->e_d_tov)) | ||
| 1542 | fc_lport_error(lport, NULL); | ||
| 1457 | } | 1543 | } |
| 1458 | 1544 | ||
| 1459 | /* Configure a fc_lport */ | 1545 | /** |
| 1546 | * fc_lport_config() - Configure a fc_lport | ||
| 1547 | * @lport: The local port to be configured | ||
| 1548 | */ | ||
| 1460 | int fc_lport_config(struct fc_lport *lport) | 1549 | int fc_lport_config(struct fc_lport *lport) |
| 1461 | { | 1550 | { |
| 1462 | INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); | 1551 | INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); |
| @@ -1471,6 +1560,10 @@ int fc_lport_config(struct fc_lport *lport) | |||
| 1471 | } | 1560 | } |
| 1472 | EXPORT_SYMBOL(fc_lport_config); | 1561 | EXPORT_SYMBOL(fc_lport_config); |
| 1473 | 1562 | ||
| 1563 | /** | ||
| 1564 | * fc_lport_init() - Initialize the lport layer for a local port | ||
| 1565 | * @lport: The local port to initialize the exchange layer for | ||
| 1566 | */ | ||
| 1474 | int fc_lport_init(struct fc_lport *lport) | 1567 | int fc_lport_init(struct fc_lport *lport) |
| 1475 | { | 1568 | { |
| 1476 | if (!lport->tt.lport_recv) | 1569 | if (!lport->tt.lport_recv) |
| @@ -1500,7 +1593,253 @@ int fc_lport_init(struct fc_lport *lport) | |||
| 1500 | if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) | 1593 | if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) |
| 1501 | fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; | 1594 | fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; |
| 1502 | 1595 | ||
| 1503 | INIT_LIST_HEAD(&lport->ema_list); | ||
| 1504 | return 0; | 1596 | return 0; |
| 1505 | } | 1597 | } |
| 1506 | EXPORT_SYMBOL(fc_lport_init); | 1598 | EXPORT_SYMBOL(fc_lport_init); |
| 1599 | |||
| 1600 | /** | ||
| 1601 | * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests | ||
| 1602 | * @sp: The sequence for the FC Passthrough response | ||
| 1603 | * @fp: The response frame | ||
| 1604 | * @info_arg: The BSG info that the response is for | ||
| 1605 | */ | ||
| 1606 | static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, | ||
| 1607 | void *info_arg) | ||
| 1608 | { | ||
| 1609 | struct fc_bsg_info *info = info_arg; | ||
| 1610 | struct fc_bsg_job *job = info->job; | ||
| 1611 | struct fc_lport *lport = info->lport; | ||
| 1612 | struct fc_frame_header *fh; | ||
| 1613 | size_t len; | ||
| 1614 | void *buf; | ||
| 1615 | |||
| 1616 | if (IS_ERR(fp)) { | ||
| 1617 | job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? | ||
| 1618 | -ECONNABORTED : -ETIMEDOUT; | ||
| 1619 | job->reply_len = sizeof(uint32_t); | ||
| 1620 | job->state_flags |= FC_RQST_STATE_DONE; | ||
| 1621 | job->job_done(job); | ||
| 1622 | kfree(info); | ||
| 1623 | return; | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | mutex_lock(&lport->lp_mutex); | ||
| 1627 | fh = fc_frame_header_get(fp); | ||
| 1628 | len = fr_len(fp) - sizeof(*fh); | ||
| 1629 | buf = fc_frame_payload_get(fp, 0); | ||
| 1630 | |||
| 1631 | if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { | ||
| 1632 | /* Get the response code from the first frame payload */ | ||
| 1633 | unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? | ||
| 1634 | ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : | ||
| 1635 | (unsigned short)fc_frame_payload_op(fp); | ||
| 1636 | |||
| 1637 | /* Save the reply status of the job */ | ||
| 1638 | job->reply->reply_data.ctels_reply.status = | ||
| 1639 | (cmd == info->rsp_code) ? | ||
| 1640 | FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | job->reply->reply_payload_rcv_len += | ||
| 1644 | fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, | ||
| 1645 | &info->offset, KM_BIO_SRC_IRQ, NULL); | ||
| 1646 | |||
| 1647 | if (fr_eof(fp) == FC_EOF_T && | ||
| 1648 | (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == | ||
| 1649 | (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { | ||
| 1650 | if (job->reply->reply_payload_rcv_len > | ||
| 1651 | job->reply_payload.payload_len) | ||
| 1652 | job->reply->reply_payload_rcv_len = | ||
| 1653 | job->reply_payload.payload_len; | ||
| 1654 | job->reply->result = 0; | ||
| 1655 | job->state_flags |= FC_RQST_STATE_DONE; | ||
| 1656 | job->job_done(job); | ||
| 1657 | kfree(info); | ||
| 1658 | } | ||
| 1659 | fc_frame_free(fp); | ||
| 1660 | mutex_unlock(&lport->lp_mutex); | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | /** | ||
| 1664 | * fc_lport_els_request() - Send ELS passthrough request | ||
| 1665 | * @job: The BSG Passthrough job | ||
| 1666 | * @lport: The local port sending the request | ||
| 1667 | * @did: The destination port id | ||
| 1668 | * | ||
| 1669 | * Locking Note: The lport lock is expected to be held before calling | ||
| 1670 | * this routine. | ||
| 1671 | */ | ||
| 1672 | static int fc_lport_els_request(struct fc_bsg_job *job, | ||
| 1673 | struct fc_lport *lport, | ||
| 1674 | u32 did, u32 tov) | ||
| 1675 | { | ||
| 1676 | struct fc_bsg_info *info; | ||
| 1677 | struct fc_frame *fp; | ||
| 1678 | struct fc_frame_header *fh; | ||
| 1679 | char *pp; | ||
| 1680 | int len; | ||
| 1681 | |||
| 1682 | fp = fc_frame_alloc(lport, job->request_payload.payload_len); | ||
| 1683 | if (!fp) | ||
| 1684 | return -ENOMEM; | ||
| 1685 | |||
| 1686 | len = job->request_payload.payload_len; | ||
| 1687 | pp = fc_frame_payload_get(fp, len); | ||
| 1688 | |||
| 1689 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
| 1690 | job->request_payload.sg_cnt, | ||
| 1691 | pp, len); | ||
| 1692 | |||
| 1693 | fh = fc_frame_header_get(fp); | ||
| 1694 | fh->fh_r_ctl = FC_RCTL_ELS_REQ; | ||
| 1695 | hton24(fh->fh_d_id, did); | ||
| 1696 | hton24(fh->fh_s_id, fc_host_port_id(lport->host)); | ||
| 1697 | fh->fh_type = FC_TYPE_ELS; | ||
| 1698 | hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | | ||
| 1699 | FC_FC_END_SEQ | FC_FC_SEQ_INIT); | ||
| 1700 | fh->fh_cs_ctl = 0; | ||
| 1701 | fh->fh_df_ctl = 0; | ||
| 1702 | fh->fh_parm_offset = 0; | ||
| 1703 | |||
| 1704 | info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); | ||
| 1705 | if (!info) { | ||
| 1706 | fc_frame_free(fp); | ||
| 1707 | return -ENOMEM; | ||
| 1708 | } | ||
| 1709 | |||
| 1710 | info->job = job; | ||
| 1711 | info->lport = lport; | ||
| 1712 | info->rsp_code = ELS_LS_ACC; | ||
| 1713 | info->nents = job->reply_payload.sg_cnt; | ||
| 1714 | info->sg = job->reply_payload.sg_list; | ||
| 1715 | |||
| 1716 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, | ||
| 1717 | NULL, info, tov)) | ||
| 1718 | return -ECOMM; | ||
| 1719 | return 0; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | /** | ||
| 1723 | * fc_lport_ct_request() - Send CT Passthrough request | ||
| 1724 | * @job: The BSG Passthrough job | ||
| 1725 | * @lport: The local port sending the request | ||
| 1726 | * @did: The destination FC-ID | ||
| 1727 | * @tov: The timeout period to wait for the response | ||
| 1728 | * | ||
| 1729 | * Locking Note: The lport lock is expected to be held before calling | ||
| 1730 | * this routine. | ||
| 1731 | */ | ||
| 1732 | static int fc_lport_ct_request(struct fc_bsg_job *job, | ||
| 1733 | struct fc_lport *lport, u32 did, u32 tov) | ||
| 1734 | { | ||
| 1735 | struct fc_bsg_info *info; | ||
| 1736 | struct fc_frame *fp; | ||
| 1737 | struct fc_frame_header *fh; | ||
| 1738 | struct fc_ct_req *ct; | ||
| 1739 | size_t len; | ||
| 1740 | |||
| 1741 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + | ||
| 1742 | job->request_payload.payload_len); | ||
| 1743 | if (!fp) | ||
| 1744 | return -ENOMEM; | ||
| 1745 | |||
| 1746 | len = job->request_payload.payload_len; | ||
| 1747 | ct = fc_frame_payload_get(fp, len); | ||
| 1748 | |||
| 1749 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
| 1750 | job->request_payload.sg_cnt, | ||
| 1751 | ct, len); | ||
| 1752 | |||
| 1753 | fh = fc_frame_header_get(fp); | ||
| 1754 | fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; | ||
| 1755 | hton24(fh->fh_d_id, did); | ||
| 1756 | hton24(fh->fh_s_id, fc_host_port_id(lport->host)); | ||
| 1757 | fh->fh_type = FC_TYPE_CT; | ||
| 1758 | hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | | ||
| 1759 | FC_FC_END_SEQ | FC_FC_SEQ_INIT); | ||
| 1760 | fh->fh_cs_ctl = 0; | ||
| 1761 | fh->fh_df_ctl = 0; | ||
| 1762 | fh->fh_parm_offset = 0; | ||
| 1763 | |||
| 1764 | info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); | ||
| 1765 | if (!info) { | ||
| 1766 | fc_frame_free(fp); | ||
| 1767 | return -ENOMEM; | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | info->job = job; | ||
| 1771 | info->lport = lport; | ||
| 1772 | info->rsp_code = FC_FS_ACC; | ||
| 1773 | info->nents = job->reply_payload.sg_cnt; | ||
| 1774 | info->sg = job->reply_payload.sg_list; | ||
| 1775 | |||
| 1776 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, | ||
| 1777 | NULL, info, tov)) | ||
| 1778 | return -ECOMM; | ||
| 1779 | return 0; | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | /** | ||
| 1783 | * fc_lport_bsg_request() - The common entry point for sending | ||
| 1784 | * FC Passthrough requests | ||
| 1785 | * @job: The BSG passthrough job | ||
| 1786 | */ | ||
| 1787 | int fc_lport_bsg_request(struct fc_bsg_job *job) | ||
| 1788 | { | ||
| 1789 | struct request *rsp = job->req->next_rq; | ||
| 1790 | struct Scsi_Host *shost = job->shost; | ||
| 1791 | struct fc_lport *lport = shost_priv(shost); | ||
| 1792 | struct fc_rport *rport; | ||
| 1793 | struct fc_rport_priv *rdata; | ||
| 1794 | int rc = -EINVAL; | ||
| 1795 | u32 did; | ||
| 1796 | |||
| 1797 | job->reply->reply_payload_rcv_len = 0; | ||
| 1798 | rsp->resid_len = job->reply_payload.payload_len; | ||
| 1799 | |||
| 1800 | mutex_lock(&lport->lp_mutex); | ||
| 1801 | |||
| 1802 | switch (job->request->msgcode) { | ||
| 1803 | case FC_BSG_RPT_ELS: | ||
| 1804 | rport = job->rport; | ||
| 1805 | if (!rport) | ||
| 1806 | break; | ||
| 1807 | |||
| 1808 | rdata = rport->dd_data; | ||
| 1809 | rc = fc_lport_els_request(job, lport, rport->port_id, | ||
| 1810 | rdata->e_d_tov); | ||
| 1811 | break; | ||
| 1812 | |||
| 1813 | case FC_BSG_RPT_CT: | ||
| 1814 | rport = job->rport; | ||
| 1815 | if (!rport) | ||
| 1816 | break; | ||
| 1817 | |||
| 1818 | rdata = rport->dd_data; | ||
| 1819 | rc = fc_lport_ct_request(job, lport, rport->port_id, | ||
| 1820 | rdata->e_d_tov); | ||
| 1821 | break; | ||
| 1822 | |||
| 1823 | case FC_BSG_HST_CT: | ||
| 1824 | did = ntoh24(job->request->rqst_data.h_ct.port_id); | ||
| 1825 | if (did == FC_FID_DIR_SERV) | ||
| 1826 | rdata = lport->dns_rdata; | ||
| 1827 | else | ||
| 1828 | rdata = lport->tt.rport_lookup(lport, did); | ||
| 1829 | |||
| 1830 | if (!rdata) | ||
| 1831 | break; | ||
| 1832 | |||
| 1833 | rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); | ||
| 1834 | break; | ||
| 1835 | |||
| 1836 | case FC_BSG_HST_ELS_NOLOGIN: | ||
| 1837 | did = ntoh24(job->request->rqst_data.h_els.port_id); | ||
| 1838 | rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); | ||
| 1839 | break; | ||
| 1840 | } | ||
| 1841 | |||
| 1842 | mutex_unlock(&lport->lp_mutex); | ||
| 1843 | return rc; | ||
| 1844 | } | ||
| 1845 | EXPORT_SYMBOL(fc_lport_bsg_request); | ||
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c new file mode 100644 index 000000000000..c68f6c7341c2 --- /dev/null +++ b/drivers/scsi/libfc/fc_npiv.c | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 16 | * | ||
| 17 | * Maintained at www.Open-FCoE.org | ||
| 18 | */ | ||
| 19 | |||
| 20 | /* | ||
| 21 | * NPIV VN_Port helper functions for libfc | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <scsi/libfc.h> | ||
| 25 | |||
| 26 | /** | ||
| 27 | * fc_vport_create() - Create a new NPIV vport instance | ||
| 28 | * @vport: fc_vport structure from scsi_transport_fc | ||
| 29 | * @privsize: driver private data size to allocate along with the Scsi_Host | ||
| 30 | */ | ||
| 31 | |||
| 32 | struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) | ||
| 33 | { | ||
| 34 | struct Scsi_Host *shost = vport_to_shost(vport); | ||
| 35 | struct fc_lport *n_port = shost_priv(shost); | ||
| 36 | struct fc_lport *vn_port; | ||
| 37 | |||
| 38 | vn_port = libfc_host_alloc(shost->hostt, privsize); | ||
| 39 | if (!vn_port) | ||
| 40 | goto err_out; | ||
| 41 | if (fc_exch_mgr_list_clone(n_port, vn_port)) | ||
| 42 | goto err_put; | ||
| 43 | |||
| 44 | vn_port->vport = vport; | ||
| 45 | vport->dd_data = vn_port; | ||
| 46 | |||
| 47 | mutex_lock(&n_port->lp_mutex); | ||
| 48 | list_add_tail(&vn_port->list, &n_port->vports); | ||
| 49 | mutex_unlock(&n_port->lp_mutex); | ||
| 50 | |||
| 51 | return vn_port; | ||
| 52 | |||
| 53 | err_put: | ||
| 54 | scsi_host_put(vn_port->host); | ||
| 55 | err_out: | ||
| 56 | return NULL; | ||
| 57 | } | ||
| 58 | EXPORT_SYMBOL(libfc_vport_create); | ||
| 59 | |||
| 60 | /** | ||
| 61 | * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID | ||
| 62 | * @n_port: Top level N_Port which may have multiple NPIV VN_Ports | ||
| 63 | * @port_id: Fabric ID to find a match for | ||
| 64 | * | ||
| 65 | * Returns: matching lport pointer or NULL if there is no match | ||
| 66 | */ | ||
| 67 | struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) | ||
| 68 | { | ||
| 69 | struct fc_lport *lport = NULL; | ||
| 70 | struct fc_lport *vn_port; | ||
| 71 | |||
| 72 | if (fc_host_port_id(n_port->host) == port_id) | ||
| 73 | return n_port; | ||
| 74 | |||
| 75 | mutex_lock(&n_port->lp_mutex); | ||
| 76 | list_for_each_entry(vn_port, &n_port->vports, list) { | ||
| 77 | if (fc_host_port_id(vn_port->host) == port_id) { | ||
| 78 | lport = vn_port; | ||
| 79 | break; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | mutex_unlock(&n_port->lp_mutex); | ||
| 83 | |||
| 84 | return lport; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* | ||
| 88 | * When setting the link state of vports during an lport state change, it's | ||
| 89 | * necessary to hold the lp_mutex of both the N_Port and the VN_Port. | ||
| 90 | * This tells the lockdep engine to treat the nested locking of the VN_Port | ||
| 91 | * as a different lock class. | ||
| 92 | */ | ||
| 93 | enum libfc_lport_mutex_class { | ||
| 94 | LPORT_MUTEX_NORMAL = 0, | ||
| 95 | LPORT_MUTEX_VN_PORT = 1, | ||
| 96 | }; | ||
| 97 | |||
| 98 | /** | ||
| 99 | * __fc_vport_setlink() - update link and status on a VN_Port | ||
| 100 | * @n_port: parent N_Port | ||
| 101 | * @vn_port: VN_Port to update | ||
| 102 | * | ||
| 103 | * Locking: must be called with both the N_Port and VN_Port lp_mutex held | ||
| 104 | */ | ||
| 105 | static void __fc_vport_setlink(struct fc_lport *n_port, | ||
| 106 | struct fc_lport *vn_port) | ||
| 107 | { | ||
| 108 | struct fc_vport *vport = vn_port->vport; | ||
| 109 | |||
| 110 | if (vn_port->state == LPORT_ST_DISABLED) | ||
| 111 | return; | ||
| 112 | |||
| 113 | if (n_port->state == LPORT_ST_READY) { | ||
| 114 | if (n_port->npiv_enabled) { | ||
| 115 | fc_vport_set_state(vport, FC_VPORT_INITIALIZING); | ||
| 116 | __fc_linkup(vn_port); | ||
| 117 | } else { | ||
| 118 | fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); | ||
| 119 | __fc_linkdown(vn_port); | ||
| 120 | } | ||
| 121 | } else { | ||
| 122 | fc_vport_set_state(vport, FC_VPORT_LINKDOWN); | ||
| 123 | __fc_linkdown(vn_port); | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | /** | ||
| 128 | * fc_vport_setlink() - update link and status on a VN_Port | ||
| 129 | * @vn_port: virtual port to update | ||
| 130 | */ | ||
| 131 | void fc_vport_setlink(struct fc_lport *vn_port) | ||
| 132 | { | ||
| 133 | struct fc_vport *vport = vn_port->vport; | ||
| 134 | struct Scsi_Host *shost = vport_to_shost(vport); | ||
| 135 | struct fc_lport *n_port = shost_priv(shost); | ||
| 136 | |||
| 137 | mutex_lock(&n_port->lp_mutex); | ||
| 138 | mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); | ||
| 139 | __fc_vport_setlink(n_port, vn_port); | ||
| 140 | mutex_unlock(&vn_port->lp_mutex); | ||
| 141 | mutex_unlock(&n_port->lp_mutex); | ||
| 142 | } | ||
| 143 | EXPORT_SYMBOL(fc_vport_setlink); | ||
| 144 | |||
| 145 | /** | ||
| 146 | * fc_vports_linkchange() - change the link state of all vports | ||
| 147 | * @n_port: Parent N_Port that has changed state | ||
| 148 | * | ||
| 149 | * Locking: called with the n_port lp_mutex held | ||
| 150 | */ | ||
| 151 | void fc_vports_linkchange(struct fc_lport *n_port) | ||
| 152 | { | ||
| 153 | struct fc_lport *vn_port; | ||
| 154 | |||
| 155 | list_for_each_entry(vn_port, &n_port->vports, list) { | ||
| 156 | mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); | ||
| 157 | __fc_vport_setlink(n_port, vn_port); | ||
| 158 | mutex_unlock(&vn_port->lp_mutex); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 03ea6748e7ee..35ca0e72df46 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
| @@ -55,6 +55,8 @@ | |||
| 55 | #include <scsi/libfc.h> | 55 | #include <scsi/libfc.h> |
| 56 | #include <scsi/fc_encode.h> | 56 | #include <scsi/fc_encode.h> |
| 57 | 57 | ||
| 58 | #include "fc_libfc.h" | ||
| 59 | |||
| 58 | struct workqueue_struct *rport_event_queue; | 60 | struct workqueue_struct *rport_event_queue; |
| 59 | 61 | ||
| 60 | static void fc_rport_enter_plogi(struct fc_rport_priv *); | 62 | static void fc_rport_enter_plogi(struct fc_rport_priv *); |
| @@ -86,12 +88,13 @@ static const char *fc_rport_state_names[] = { | |||
| 86 | [RPORT_ST_LOGO] = "LOGO", | 88 | [RPORT_ST_LOGO] = "LOGO", |
| 87 | [RPORT_ST_ADISC] = "ADISC", | 89 | [RPORT_ST_ADISC] = "ADISC", |
| 88 | [RPORT_ST_DELETE] = "Delete", | 90 | [RPORT_ST_DELETE] = "Delete", |
| 91 | [RPORT_ST_RESTART] = "Restart", | ||
| 89 | }; | 92 | }; |
| 90 | 93 | ||
| 91 | /** | 94 | /** |
| 92 | * fc_rport_lookup() - lookup a remote port by port_id | 95 | * fc_rport_lookup() - Lookup a remote port by port_id |
| 93 | * @lport: Fibre Channel host port instance | 96 | * @lport: The local port to lookup the remote port on |
| 94 | * @port_id: remote port port_id to match | 97 | * @port_id: The remote port ID to look up |
| 95 | */ | 98 | */ |
| 96 | static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, | 99 | static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, |
| 97 | u32 port_id) | 100 | u32 port_id) |
| @@ -99,16 +102,17 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, | |||
| 99 | struct fc_rport_priv *rdata; | 102 | struct fc_rport_priv *rdata; |
| 100 | 103 | ||
| 101 | list_for_each_entry(rdata, &lport->disc.rports, peers) | 104 | list_for_each_entry(rdata, &lport->disc.rports, peers) |
| 102 | if (rdata->ids.port_id == port_id && | 105 | if (rdata->ids.port_id == port_id) |
| 103 | rdata->rp_state != RPORT_ST_DELETE) | ||
| 104 | return rdata; | 106 | return rdata; |
| 105 | return NULL; | 107 | return NULL; |
| 106 | } | 108 | } |
| 107 | 109 | ||
| 108 | /** | 110 | /** |
| 109 | * fc_rport_create() - Create a new remote port | 111 | * fc_rport_create() - Create a new remote port |
| 110 | * @lport: The local port that the new remote port is for | 112 | * @lport: The local port this remote port will be associated with |
| 111 | * @port_id: The port ID for the new remote port | 113 | * @ids: The identifiers for the new remote port |
| 114 | * | ||
| 115 | * The remote port will start in the INIT state. | ||
| 112 | * | 116 | * |
| 113 | * Locking note: must be called with the disc_mutex held. | 117 | * Locking note: must be called with the disc_mutex held. |
| 114 | */ | 118 | */ |
| @@ -147,8 +151,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, | |||
| 147 | } | 151 | } |
| 148 | 152 | ||
| 149 | /** | 153 | /** |
| 150 | * fc_rport_destroy() - free a remote port after last reference is released. | 154 | * fc_rport_destroy() - Free a remote port after last reference is released |
| 151 | * @kref: pointer to kref inside struct fc_rport_priv | 155 | * @kref: The remote port's kref |
| 152 | */ | 156 | */ |
| 153 | static void fc_rport_destroy(struct kref *kref) | 157 | static void fc_rport_destroy(struct kref *kref) |
| 154 | { | 158 | { |
| @@ -159,8 +163,8 @@ static void fc_rport_destroy(struct kref *kref) | |||
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | /** | 165 | /** |
| 162 | * fc_rport_state() - return a string for the state the rport is in | 166 | * fc_rport_state() - Return a string identifying the remote port's state |
| 163 | * @rdata: remote port private data | 167 | * @rdata: The remote port |
| 164 | */ | 168 | */ |
| 165 | static const char *fc_rport_state(struct fc_rport_priv *rdata) | 169 | static const char *fc_rport_state(struct fc_rport_priv *rdata) |
| 166 | { | 170 | { |
| @@ -173,9 +177,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata) | |||
| 173 | } | 177 | } |
| 174 | 178 | ||
| 175 | /** | 179 | /** |
| 176 | * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. | 180 | * fc_set_rport_loss_tmo() - Set the remote port loss timeout |
| 177 | * @rport: Pointer to Fibre Channel remote port structure | 181 | * @rport: The remote port that gets a new timeout value |
| 178 | * @timeout: timeout in seconds | 182 | * @timeout: The new timeout value (in seconds) |
| 179 | */ | 183 | */ |
| 180 | void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) | 184 | void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) |
| 181 | { | 185 | { |
| @@ -187,9 +191,11 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) | |||
| 187 | EXPORT_SYMBOL(fc_set_rport_loss_tmo); | 191 | EXPORT_SYMBOL(fc_set_rport_loss_tmo); |
| 188 | 192 | ||
| 189 | /** | 193 | /** |
| 190 | * fc_plogi_get_maxframe() - Get max payload from the common service parameters | 194 | * fc_plogi_get_maxframe() - Get the maximum payload from the common service |
| 191 | * @flp: FLOGI payload structure | 195 | * parameters in a FLOGI frame |
| 192 | * @maxval: upper limit, may be less than what is in the service parameters | 196 | * @flp: The FLOGI payload |
| 197 | * @maxval: The maximum frame size upper limit; this may be less than what | ||
| 198 | * is in the service parameters | ||
| 193 | */ | 199 | */ |
| 194 | static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, | 200 | static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, |
| 195 | unsigned int maxval) | 201 | unsigned int maxval) |
| @@ -210,9 +216,9 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, | |||
| 210 | } | 216 | } |
| 211 | 217 | ||
| 212 | /** | 218 | /** |
| 213 | * fc_rport_state_enter() - Change the rport's state | 219 | * fc_rport_state_enter() - Change the state of a remote port |
| 214 | * @rdata: The rport whose state should change | 220 | * @rdata: The remote port whose state should change |
| 215 | * @new: The new state of the rport | 221 | * @new: The new state |
| 216 | * | 222 | * |
| 217 | * Locking Note: Called with the rport lock held | 223 | * Locking Note: Called with the rport lock held |
| 218 | */ | 224 | */ |
| @@ -224,17 +230,22 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata, | |||
| 224 | rdata->rp_state = new; | 230 | rdata->rp_state = new; |
| 225 | } | 231 | } |
| 226 | 232 | ||
| 233 | /** | ||
| 234 | * fc_rport_work() - Handler for remote port events in the rport_event_queue | ||
| 235 | * @work: Handle to the remote port being dequeued | ||
| 236 | */ | ||
| 227 | static void fc_rport_work(struct work_struct *work) | 237 | static void fc_rport_work(struct work_struct *work) |
| 228 | { | 238 | { |
| 229 | u32 port_id; | 239 | u32 port_id; |
| 230 | struct fc_rport_priv *rdata = | 240 | struct fc_rport_priv *rdata = |
| 231 | container_of(work, struct fc_rport_priv, event_work); | 241 | container_of(work, struct fc_rport_priv, event_work); |
| 232 | struct fc_rport_libfc_priv *rp; | 242 | struct fc_rport_libfc_priv *rpriv; |
| 233 | enum fc_rport_event event; | 243 | enum fc_rport_event event; |
| 234 | struct fc_lport *lport = rdata->local_port; | 244 | struct fc_lport *lport = rdata->local_port; |
| 235 | struct fc_rport_operations *rport_ops; | 245 | struct fc_rport_operations *rport_ops; |
| 236 | struct fc_rport_identifiers ids; | 246 | struct fc_rport_identifiers ids; |
| 237 | struct fc_rport *rport; | 247 | struct fc_rport *rport; |
| 248 | int restart = 0; | ||
| 238 | 249 | ||
| 239 | mutex_lock(&rdata->rp_mutex); | 250 | mutex_lock(&rdata->rp_mutex); |
| 240 | event = rdata->event; | 251 | event = rdata->event; |
| @@ -265,12 +276,12 @@ static void fc_rport_work(struct work_struct *work) | |||
| 265 | rport->maxframe_size = rdata->maxframe_size; | 276 | rport->maxframe_size = rdata->maxframe_size; |
| 266 | rport->supported_classes = rdata->supported_classes; | 277 | rport->supported_classes = rdata->supported_classes; |
| 267 | 278 | ||
| 268 | rp = rport->dd_data; | 279 | rpriv = rport->dd_data; |
| 269 | rp->local_port = lport; | 280 | rpriv->local_port = lport; |
| 270 | rp->rp_state = rdata->rp_state; | 281 | rpriv->rp_state = rdata->rp_state; |
| 271 | rp->flags = rdata->flags; | 282 | rpriv->flags = rdata->flags; |
| 272 | rp->e_d_tov = rdata->e_d_tov; | 283 | rpriv->e_d_tov = rdata->e_d_tov; |
| 273 | rp->r_a_tov = rdata->r_a_tov; | 284 | rpriv->r_a_tov = rdata->r_a_tov; |
| 274 | mutex_unlock(&rdata->rp_mutex); | 285 | mutex_unlock(&rdata->rp_mutex); |
| 275 | 286 | ||
| 276 | if (rport_ops && rport_ops->event_callback) { | 287 | if (rport_ops && rport_ops->event_callback) { |
| @@ -287,8 +298,19 @@ static void fc_rport_work(struct work_struct *work) | |||
| 287 | mutex_unlock(&rdata->rp_mutex); | 298 | mutex_unlock(&rdata->rp_mutex); |
| 288 | 299 | ||
| 289 | if (port_id != FC_FID_DIR_SERV) { | 300 | if (port_id != FC_FID_DIR_SERV) { |
| 301 | /* | ||
| 302 | * We must drop rp_mutex before taking disc_mutex. | ||
| 303 | * Re-evaluate state to allow for restart. | ||
| 304 | * A transition to RESTART state must only happen | ||
| 305 | * while disc_mutex is held and rdata is on the list. | ||
| 306 | */ | ||
| 290 | mutex_lock(&lport->disc.disc_mutex); | 307 | mutex_lock(&lport->disc.disc_mutex); |
| 291 | list_del(&rdata->peers); | 308 | mutex_lock(&rdata->rp_mutex); |
| 309 | if (rdata->rp_state == RPORT_ST_RESTART) | ||
| 310 | restart = 1; | ||
| 311 | else | ||
| 312 | list_del(&rdata->peers); | ||
| 313 | mutex_unlock(&rdata->rp_mutex); | ||
| 292 | mutex_unlock(&lport->disc.disc_mutex); | 314 | mutex_unlock(&lport->disc.disc_mutex); |
| 293 | } | 315 | } |
| 294 | 316 | ||
| @@ -305,14 +327,20 @@ static void fc_rport_work(struct work_struct *work) | |||
| 305 | lport->tt.exch_mgr_reset(lport, port_id, 0); | 327 | lport->tt.exch_mgr_reset(lport, port_id, 0); |
| 306 | 328 | ||
| 307 | if (rport) { | 329 | if (rport) { |
| 308 | rp = rport->dd_data; | 330 | rpriv = rport->dd_data; |
| 309 | rp->rp_state = RPORT_ST_DELETE; | 331 | rpriv->rp_state = RPORT_ST_DELETE; |
| 310 | mutex_lock(&rdata->rp_mutex); | 332 | mutex_lock(&rdata->rp_mutex); |
| 311 | rdata->rport = NULL; | 333 | rdata->rport = NULL; |
| 312 | mutex_unlock(&rdata->rp_mutex); | 334 | mutex_unlock(&rdata->rp_mutex); |
| 313 | fc_remote_port_delete(rport); | 335 | fc_remote_port_delete(rport); |
| 314 | } | 336 | } |
| 315 | kref_put(&rdata->kref, lport->tt.rport_destroy); | 337 | if (restart) { |
| 338 | mutex_lock(&rdata->rp_mutex); | ||
| 339 | FC_RPORT_DBG(rdata, "work restart\n"); | ||
| 340 | fc_rport_enter_plogi(rdata); | ||
| 341 | mutex_unlock(&rdata->rp_mutex); | ||
| 342 | } else | ||
| 343 | kref_put(&rdata->kref, lport->tt.rport_destroy); | ||
| 316 | break; | 344 | break; |
| 317 | 345 | ||
| 318 | default: | 346 | default: |
| @@ -323,7 +351,7 @@ static void fc_rport_work(struct work_struct *work) | |||
| 323 | 351 | ||
| 324 | /** | 352 | /** |
| 325 | * fc_rport_login() - Start the remote port login state machine | 353 | * fc_rport_login() - Start the remote port login state machine |
| 326 | * @rdata: private remote port | 354 | * @rdata: The remote port to be logged in to |
| 327 | * | 355 | * |
| 328 | * Locking Note: Called without the rport lock held. This | 356 | * Locking Note: Called without the rport lock held. This |
| 329 | * function will hold the rport lock, call an _enter_* | 357 | * function will hold the rport lock, call an _enter_* |
| @@ -342,6 +370,12 @@ int fc_rport_login(struct fc_rport_priv *rdata) | |||
| 342 | FC_RPORT_DBG(rdata, "ADISC port\n"); | 370 | FC_RPORT_DBG(rdata, "ADISC port\n"); |
| 343 | fc_rport_enter_adisc(rdata); | 371 | fc_rport_enter_adisc(rdata); |
| 344 | break; | 372 | break; |
| 373 | case RPORT_ST_RESTART: | ||
| 374 | break; | ||
| 375 | case RPORT_ST_DELETE: | ||
| 376 | FC_RPORT_DBG(rdata, "Restart deleted port\n"); | ||
| 377 | fc_rport_state_enter(rdata, RPORT_ST_RESTART); | ||
| 378 | break; | ||
| 345 | default: | 379 | default: |
| 346 | FC_RPORT_DBG(rdata, "Login to port\n"); | 380 | FC_RPORT_DBG(rdata, "Login to port\n"); |
| 347 | fc_rport_enter_plogi(rdata); | 381 | fc_rport_enter_plogi(rdata); |
| @@ -353,9 +387,9 @@ int fc_rport_login(struct fc_rport_priv *rdata) | |||
| 353 | } | 387 | } |
| 354 | 388 | ||
| 355 | /** | 389 | /** |
| 356 | * fc_rport_enter_delete() - schedule a remote port to be deleted. | 390 | * fc_rport_enter_delete() - Schedule a remote port to be deleted |
| 357 | * @rdata: private remote port | 391 | * @rdata: The remote port to be deleted |
| 358 | * @event: event to report as the reason for deletion | 392 | * @event: The event to report as the reason for deletion |
| 359 | * | 393 | * |
| 360 | * Locking Note: Called with the rport lock held. | 394 | * Locking Note: Called with the rport lock held. |
| 361 | * | 395 | * |
| @@ -382,8 +416,8 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, | |||
| 382 | } | 416 | } |
| 383 | 417 | ||
| 384 | /** | 418 | /** |
| 385 | * fc_rport_logoff() - Logoff and remove an rport | 419 | * fc_rport_logoff() - Logoff and remove a remote port |
| 386 | * @rdata: private remote port | 420 | * @rdata: The remote port to be logged off of |
| 387 | * | 421 | * |
| 388 | * Locking Note: Called without the rport lock held. This | 422 | * Locking Note: Called without the rport lock held. This |
| 389 | * function will hold the rport lock, call an _enter_* | 423 | * function will hold the rport lock, call an _enter_* |
| @@ -397,26 +431,27 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) | |||
| 397 | 431 | ||
| 398 | if (rdata->rp_state == RPORT_ST_DELETE) { | 432 | if (rdata->rp_state == RPORT_ST_DELETE) { |
| 399 | FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); | 433 | FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); |
| 400 | mutex_unlock(&rdata->rp_mutex); | ||
| 401 | goto out; | 434 | goto out; |
| 402 | } | 435 | } |
| 403 | 436 | ||
| 404 | fc_rport_enter_logo(rdata); | 437 | if (rdata->rp_state == RPORT_ST_RESTART) |
| 438 | FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); | ||
| 439 | else | ||
| 440 | fc_rport_enter_logo(rdata); | ||
| 405 | 441 | ||
| 406 | /* | 442 | /* |
| 407 | * Change the state to Delete so that we discard | 443 | * Change the state to Delete so that we discard |
| 408 | * the response. | 444 | * the response. |
| 409 | */ | 445 | */ |
| 410 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); | 446 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); |
| 411 | mutex_unlock(&rdata->rp_mutex); | ||
| 412 | |||
| 413 | out: | 447 | out: |
| 448 | mutex_unlock(&rdata->rp_mutex); | ||
| 414 | return 0; | 449 | return 0; |
| 415 | } | 450 | } |
| 416 | 451 | ||
| 417 | /** | 452 | /** |
| 418 | * fc_rport_enter_ready() - The rport is ready | 453 | * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state |
| 419 | * @rdata: private remote port | 454 | * @rdata: The remote port that is ready |
| 420 | * | 455 | * |
| 421 | * Locking Note: The rport lock is expected to be held before calling | 456 | * Locking Note: The rport lock is expected to be held before calling |
| 422 | * this routine. | 457 | * this routine. |
| @@ -433,8 +468,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) | |||
| 433 | } | 468 | } |
| 434 | 469 | ||
| 435 | /** | 470 | /** |
| 436 | * fc_rport_timeout() - Handler for the retry_work timer. | 471 | * fc_rport_timeout() - Handler for the retry_work timer |
| 437 | * @work: The work struct of the fc_rport_priv | 472 | * @work: Handle to the remote port that has timed out |
| 438 | * | 473 | * |
| 439 | * Locking Note: Called without the rport lock held. This | 474 | * Locking Note: Called without the rport lock held. This |
| 440 | * function will hold the rport lock, call an _enter_* | 475 | * function will hold the rport lock, call an _enter_* |
| @@ -466,6 +501,7 @@ static void fc_rport_timeout(struct work_struct *work) | |||
| 466 | case RPORT_ST_READY: | 501 | case RPORT_ST_READY: |
| 467 | case RPORT_ST_INIT: | 502 | case RPORT_ST_INIT: |
| 468 | case RPORT_ST_DELETE: | 503 | case RPORT_ST_DELETE: |
| 504 | case RPORT_ST_RESTART: | ||
| 469 | break; | 505 | break; |
| 470 | } | 506 | } |
| 471 | 507 | ||
| @@ -474,8 +510,8 @@ static void fc_rport_timeout(struct work_struct *work) | |||
| 474 | 510 | ||
| 475 | /** | 511 | /** |
| 476 | * fc_rport_error() - Error handler, called once retries have been exhausted | 512 | * fc_rport_error() - Error handler, called once retries have been exhausted |
| 477 | * @rdata: private remote port | 513 | * @rdata: The remote port the error is happened on |
| 478 | * @fp: The frame pointer | 514 | * @fp: The error code encapsulated in a frame pointer |
| 479 | * | 515 | * |
| 480 | * Locking Note: The rport lock is expected to be held before | 516 | * Locking Note: The rport lock is expected to be held before |
| 481 | * calling this routine | 517 | * calling this routine |
| @@ -499,6 +535,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) | |||
| 499 | fc_rport_enter_logo(rdata); | 535 | fc_rport_enter_logo(rdata); |
| 500 | break; | 536 | break; |
| 501 | case RPORT_ST_DELETE: | 537 | case RPORT_ST_DELETE: |
| 538 | case RPORT_ST_RESTART: | ||
| 502 | case RPORT_ST_READY: | 539 | case RPORT_ST_READY: |
| 503 | case RPORT_ST_INIT: | 540 | case RPORT_ST_INIT: |
| 504 | break; | 541 | break; |
| @@ -506,9 +543,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) | |||
| 506 | } | 543 | } |
| 507 | 544 | ||
| 508 | /** | 545 | /** |
| 509 | * fc_rport_error_retry() - Error handler when retries are desired | 546 | * fc_rport_error_retry() - Handler for remote port state retries |
| 510 | * @rdata: private remote port data | 547 | * @rdata: The remote port whose state is to be retried |
| 511 | * @fp: The frame pointer | 548 | * @fp: The error code encapsulated in a frame pointer |
| 512 | * | 549 | * |
| 513 | * If the error was an exchange timeout retry immediately, | 550 | * If the error was an exchange timeout retry immediately, |
| 514 | * otherwise wait for E_D_TOV. | 551 | * otherwise wait for E_D_TOV. |
| @@ -540,10 +577,10 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, | |||
| 540 | } | 577 | } |
| 541 | 578 | ||
| 542 | /** | 579 | /** |
| 543 | * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response | 580 | * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses |
| 544 | * @sp: current sequence in the PLOGI exchange | 581 | * @sp: The sequence the PLOGI is on |
| 545 | * @fp: response frame | 582 | * @fp: The PLOGI response frame |
| 546 | * @rdata_arg: private remote port data | 583 | * @rdata_arg: The remote port that sent the PLOGI response |
| 547 | * | 584 | * |
| 548 | * Locking Note: This function will be called without the rport lock | 585 | * Locking Note: This function will be called without the rport lock |
| 549 | * held, but it will lock, call an _enter_* function or fc_rport_error | 586 | * held, but it will lock, call an _enter_* function or fc_rport_error |
| @@ -606,8 +643,8 @@ err: | |||
| 606 | } | 643 | } |
| 607 | 644 | ||
| 608 | /** | 645 | /** |
| 609 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer | 646 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request |
| 610 | * @rdata: private remote port data | 647 | * @rdata: The remote port to send a PLOGI to |
| 611 | * | 648 | * |
| 612 | * Locking Note: The rport lock is expected to be held before calling | 649 | * Locking Note: The rport lock is expected to be held before calling |
| 613 | * this routine. | 650 | * this routine. |
| @@ -631,17 +668,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) | |||
| 631 | rdata->e_d_tov = lport->e_d_tov; | 668 | rdata->e_d_tov = lport->e_d_tov; |
| 632 | 669 | ||
| 633 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, | 670 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, |
| 634 | fc_rport_plogi_resp, rdata, lport->e_d_tov)) | 671 | fc_rport_plogi_resp, rdata, |
| 635 | fc_rport_error_retry(rdata, fp); | 672 | 2 * lport->r_a_tov)) |
| 673 | fc_rport_error_retry(rdata, NULL); | ||
| 636 | else | 674 | else |
| 637 | kref_get(&rdata->kref); | 675 | kref_get(&rdata->kref); |
| 638 | } | 676 | } |
| 639 | 677 | ||
| 640 | /** | 678 | /** |
| 641 | * fc_rport_prli_resp() - Process Login (PRLI) response handler | 679 | * fc_rport_prli_resp() - Process Login (PRLI) response handler |
| 642 | * @sp: current sequence in the PRLI exchange | 680 | * @sp: The sequence the PRLI response was on |
| 643 | * @fp: response frame | 681 | * @fp: The PRLI response frame |
| 644 | * @rdata_arg: private remote port data | 682 | * @rdata_arg: The remote port that sent the PRLI response |
| 645 | * | 683 | * |
| 646 | * Locking Note: This function will be called without the rport lock | 684 | * Locking Note: This function will be called without the rport lock |
| 647 | * held, but it will lock, call an _enter_* function or fc_rport_error | 685 | * held, but it will lock, call an _enter_* function or fc_rport_error |
| @@ -710,10 +748,10 @@ err: | |||
| 710 | } | 748 | } |
| 711 | 749 | ||
| 712 | /** | 750 | /** |
| 713 | * fc_rport_logo_resp() - Logout (LOGO) response handler | 751 | * fc_rport_logo_resp() - Handler for logout (LOGO) responses |
| 714 | * @sp: current sequence in the LOGO exchange | 752 | * @sp: The sequence the LOGO was on |
| 715 | * @fp: response frame | 753 | * @fp: The LOGO response frame |
| 716 | * @rdata_arg: private remote port data | 754 | * @rdata_arg: The remote port that sent the LOGO response |
| 717 | * | 755 | * |
| 718 | * Locking Note: This function will be called without the rport lock | 756 | * Locking Note: This function will be called without the rport lock |
| 719 | * held, but it will lock, call an _enter_* function or fc_rport_error | 757 | * held, but it will lock, call an _enter_* function or fc_rport_error |
| @@ -756,8 +794,8 @@ err: | |||
| 756 | } | 794 | } |
| 757 | 795 | ||
| 758 | /** | 796 | /** |
| 759 | * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer | 797 | * fc_rport_enter_prli() - Send Process Login (PRLI) request |
| 760 | * @rdata: private remote port data | 798 | * @rdata: The remote port to send the PRLI request to |
| 761 | * | 799 | * |
| 762 | * Locking Note: The rport lock is expected to be held before calling | 800 | * Locking Note: The rport lock is expected to be held before calling |
| 763 | * this routine. | 801 | * this routine. |
| @@ -792,17 +830,18 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) | |||
| 792 | } | 830 | } |
| 793 | 831 | ||
| 794 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, | 832 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, |
| 795 | fc_rport_prli_resp, rdata, lport->e_d_tov)) | 833 | fc_rport_prli_resp, rdata, |
| 796 | fc_rport_error_retry(rdata, fp); | 834 | 2 * lport->r_a_tov)) |
| 835 | fc_rport_error_retry(rdata, NULL); | ||
| 797 | else | 836 | else |
| 798 | kref_get(&rdata->kref); | 837 | kref_get(&rdata->kref); |
| 799 | } | 838 | } |
| 800 | 839 | ||
| 801 | /** | 840 | /** |
| 802 | * fc_rport_els_rtv_resp() - Request Timeout Value response handler | 841 | * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses |
| 803 | * @sp: current sequence in the RTV exchange | 842 | * @sp: The sequence the RTV was on |
| 804 | * @fp: response frame | 843 | * @fp: The RTV response frame |
| 805 | * @rdata_arg: private remote port data | 844 | * @rdata_arg: The remote port that sent the RTV response |
| 806 | * | 845 | * |
| 807 | * Many targets don't seem to support this. | 846 | * Many targets don't seem to support this. |
| 808 | * | 847 | * |
| @@ -865,8 +904,8 @@ err: | |||
| 865 | } | 904 | } |
| 866 | 905 | ||
| 867 | /** | 906 | /** |
| 868 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer | 907 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request |
| 869 | * @rdata: private remote port data | 908 | * @rdata: The remote port to send the RTV request to |
| 870 | * | 909 | * |
| 871 | * Locking Note: The rport lock is expected to be held before calling | 910 | * Locking Note: The rport lock is expected to be held before calling |
| 872 | * this routine. | 911 | * this routine. |
| @@ -888,15 +927,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) | |||
| 888 | } | 927 | } |
| 889 | 928 | ||
| 890 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, | 929 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, |
| 891 | fc_rport_rtv_resp, rdata, lport->e_d_tov)) | 930 | fc_rport_rtv_resp, rdata, |
| 892 | fc_rport_error_retry(rdata, fp); | 931 | 2 * lport->r_a_tov)) |
| 932 | fc_rport_error_retry(rdata, NULL); | ||
| 893 | else | 933 | else |
| 894 | kref_get(&rdata->kref); | 934 | kref_get(&rdata->kref); |
| 895 | } | 935 | } |
| 896 | 936 | ||
| 897 | /** | 937 | /** |
| 898 | * fc_rport_enter_logo() - Send Logout (LOGO) request to peer | 938 | * fc_rport_enter_logo() - Send a logout (LOGO) request |
| 899 | * @rdata: private remote port data | 939 | * @rdata: The remote port to send the LOGO request to |
| 900 | * | 940 | * |
| 901 | * Locking Note: The rport lock is expected to be held before calling | 941 | * Locking Note: The rport lock is expected to be held before calling |
| 902 | * this routine. | 942 | * this routine. |
| @@ -918,24 +958,25 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) | |||
| 918 | } | 958 | } |
| 919 | 959 | ||
| 920 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, | 960 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, |
| 921 | fc_rport_logo_resp, rdata, lport->e_d_tov)) | 961 | fc_rport_logo_resp, rdata, |
| 922 | fc_rport_error_retry(rdata, fp); | 962 | 2 * lport->r_a_tov)) |
| 963 | fc_rport_error_retry(rdata, NULL); | ||
| 923 | else | 964 | else |
| 924 | kref_get(&rdata->kref); | 965 | kref_get(&rdata->kref); |
| 925 | } | 966 | } |
| 926 | 967 | ||
| 927 | /** | 968 | /** |
| 928 | * fc_rport_els_adisc_resp() - Address Discovery response handler | 969 | * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses |
| 929 | * @sp: current sequence in the ADISC exchange | 970 | * @sp: The sequence the ADISC response was on |
| 930 | * @fp: response frame | 971 | * @fp: The ADISC response frame |
| 931 | * @rdata_arg: remote port private. | 972 | * @rdata_arg: The remote port that sent the ADISC response |
| 932 | * | 973 | * |
| 933 | * Locking Note: This function will be called without the rport lock | 974 | * Locking Note: This function will be called without the rport lock |
| 934 | * held, but it will lock, call an _enter_* function or fc_rport_error | 975 | * held, but it will lock, call an _enter_* function or fc_rport_error |
| 935 | * and then unlock the rport. | 976 | * and then unlock the rport. |
| 936 | */ | 977 | */ |
| 937 | static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, | 978 | static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, |
| 938 | void *rdata_arg) | 979 | void *rdata_arg) |
| 939 | { | 980 | { |
| 940 | struct fc_rport_priv *rdata = rdata_arg; | 981 | struct fc_rport_priv *rdata = rdata_arg; |
| 941 | struct fc_els_adisc *adisc; | 982 | struct fc_els_adisc *adisc; |
| @@ -983,8 +1024,8 @@ err: | |||
| 983 | } | 1024 | } |
| 984 | 1025 | ||
| 985 | /** | 1026 | /** |
| 986 | * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer | 1027 | * fc_rport_enter_adisc() - Send Address Discover (ADISC) request |
| 987 | * @rdata: remote port private data | 1028 | * @rdata: The remote port to send the ADISC request to |
| 988 | * | 1029 | * |
| 989 | * Locking Note: The rport lock is expected to be held before calling | 1030 | * Locking Note: The rport lock is expected to be held before calling |
| 990 | * this routine. | 1031 | * this routine. |
| @@ -1005,17 +1046,18 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) | |||
| 1005 | return; | 1046 | return; |
| 1006 | } | 1047 | } |
| 1007 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, | 1048 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, |
| 1008 | fc_rport_adisc_resp, rdata, lport->e_d_tov)) | 1049 | fc_rport_adisc_resp, rdata, |
| 1009 | fc_rport_error_retry(rdata, fp); | 1050 | 2 * lport->r_a_tov)) |
| 1051 | fc_rport_error_retry(rdata, NULL); | ||
| 1010 | else | 1052 | else |
| 1011 | kref_get(&rdata->kref); | 1053 | kref_get(&rdata->kref); |
| 1012 | } | 1054 | } |
| 1013 | 1055 | ||
| 1014 | /** | 1056 | /** |
| 1015 | * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request | 1057 | * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests |
| 1016 | * @rdata: remote port private | 1058 | * @rdata: The remote port that sent the ADISC request |
| 1017 | * @sp: current sequence in the ADISC exchange | 1059 | * @sp: The sequence the ADISC request was on |
| 1018 | * @in_fp: ADISC request frame | 1060 | * @in_fp: The ADISC request frame |
| 1019 | * | 1061 | * |
| 1020 | * Locking Note: Called with the lport and rport locks held. | 1062 | * Locking Note: Called with the lport and rport locks held. |
| 1021 | */ | 1063 | */ |
| @@ -1056,10 +1098,82 @@ drop: | |||
| 1056 | } | 1098 | } |
| 1057 | 1099 | ||
| 1058 | /** | 1100 | /** |
| 1059 | * fc_rport_recv_els_req() - handle a validated ELS request. | 1101 | * fc_rport_recv_rls_req() - Handle received Read Link Status request |
| 1060 | * @lport: Fibre Channel local port | 1102 | * @rdata: The remote port that sent the RLS request |
| 1061 | * @sp: current sequence in the PLOGI exchange | 1103 | * @sp: The sequence that the RLS was on |
| 1062 | * @fp: response frame | 1104 | * @rx_fp: The PRLI request frame |
| 1105 | * | ||
| 1106 | * Locking Note: The rport lock is expected to be held before calling | ||
| 1107 | * this function. | ||
| 1108 | */ | ||
| 1109 | static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, | ||
| 1110 | struct fc_seq *sp, struct fc_frame *rx_fp) | ||
| 1111 | |||
| 1112 | { | ||
| 1113 | struct fc_lport *lport = rdata->local_port; | ||
| 1114 | struct fc_frame *fp; | ||
| 1115 | struct fc_exch *ep = fc_seq_exch(sp); | ||
| 1116 | struct fc_els_rls *rls; | ||
| 1117 | struct fc_els_rls_resp *rsp; | ||
| 1118 | struct fc_els_lesb *lesb; | ||
| 1119 | struct fc_seq_els_data rjt_data; | ||
| 1120 | struct fc_host_statistics *hst; | ||
| 1121 | u32 f_ctl; | ||
| 1122 | |||
| 1123 | FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", | ||
| 1124 | fc_rport_state(rdata)); | ||
| 1125 | |||
| 1126 | rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); | ||
| 1127 | if (!rls) { | ||
| 1128 | rjt_data.reason = ELS_RJT_PROT; | ||
| 1129 | rjt_data.explan = ELS_EXPL_INV_LEN; | ||
| 1130 | goto out_rjt; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | fp = fc_frame_alloc(lport, sizeof(*rsp)); | ||
| 1134 | if (!fp) { | ||
| 1135 | rjt_data.reason = ELS_RJT_UNAB; | ||
| 1136 | rjt_data.explan = ELS_EXPL_INSUF_RES; | ||
| 1137 | goto out_rjt; | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | rsp = fc_frame_payload_get(fp, sizeof(*rsp)); | ||
| 1141 | memset(rsp, 0, sizeof(*rsp)); | ||
| 1142 | rsp->rls_cmd = ELS_LS_ACC; | ||
| 1143 | lesb = &rsp->rls_lesb; | ||
| 1144 | if (lport->tt.get_lesb) { | ||
| 1145 | /* get LESB from LLD if it supports it */ | ||
| 1146 | lport->tt.get_lesb(lport, lesb); | ||
| 1147 | } else { | ||
| 1148 | fc_get_host_stats(lport->host); | ||
| 1149 | hst = &lport->host_stats; | ||
| 1150 | lesb->lesb_link_fail = htonl(hst->link_failure_count); | ||
| 1151 | lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); | ||
| 1152 | lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); | ||
| 1153 | lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); | ||
| 1154 | lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); | ||
| 1155 | lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | sp = lport->tt.seq_start_next(sp); | ||
| 1159 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; | ||
| 1160 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, | ||
| 1161 | FC_TYPE_ELS, f_ctl, 0); | ||
| 1162 | lport->tt.seq_send(lport, sp, fp); | ||
| 1163 | goto out; | ||
| 1164 | |||
| 1165 | out_rjt: | ||
| 1166 | rjt_data.fp = NULL; | ||
| 1167 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); | ||
| 1168 | out: | ||
| 1169 | fc_frame_free(rx_fp); | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | /** | ||
| 1173 | * fc_rport_recv_els_req() - Handler for validated ELS requests | ||
| 1174 | * @lport: The local port that received the ELS request | ||
| 1175 | * @sp: The sequence that the ELS request was on | ||
| 1176 | * @fp: The ELS request frame | ||
| 1063 | * | 1177 | * |
| 1064 | * Handle incoming ELS requests that require port login. | 1178 | * Handle incoming ELS requests that require port login. |
| 1065 | * The ELS opcode has already been validated by the caller. | 1179 | * The ELS opcode has already been validated by the caller. |
| @@ -1117,6 +1231,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, | |||
| 1117 | els_data.fp = fp; | 1231 | els_data.fp = fp; |
| 1118 | lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); | 1232 | lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); |
| 1119 | break; | 1233 | break; |
| 1234 | case ELS_RLS: | ||
| 1235 | fc_rport_recv_rls_req(rdata, sp, fp); | ||
| 1236 | break; | ||
| 1120 | default: | 1237 | default: |
| 1121 | fc_frame_free(fp); /* can't happen */ | 1238 | fc_frame_free(fp); /* can't happen */ |
| 1122 | break; | 1239 | break; |
| @@ -1131,10 +1248,10 @@ reject: | |||
| 1131 | } | 1248 | } |
| 1132 | 1249 | ||
| 1133 | /** | 1250 | /** |
| 1134 | * fc_rport_recv_req() - Handle a received ELS request from a rport | 1251 | * fc_rport_recv_req() - Handler for requests |
| 1135 | * @sp: current sequence in the PLOGI exchange | 1252 | * @sp: The sequence the request was on |
| 1136 | * @fp: response frame | 1253 | * @fp: The request frame |
| 1137 | * @lport: Fibre Channel local port | 1254 | * @lport: The local port that received the request |
| 1138 | * | 1255 | * |
| 1139 | * Locking Note: Called with the lport lock held. | 1256 | * Locking Note: Called with the lport lock held. |
| 1140 | */ | 1257 | */ |
| @@ -1161,6 +1278,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1161 | case ELS_ADISC: | 1278 | case ELS_ADISC: |
| 1162 | case ELS_RRQ: | 1279 | case ELS_RRQ: |
| 1163 | case ELS_REC: | 1280 | case ELS_REC: |
| 1281 | case ELS_RLS: | ||
| 1164 | fc_rport_recv_els_req(lport, sp, fp); | 1282 | fc_rport_recv_els_req(lport, sp, fp); |
| 1165 | break; | 1283 | break; |
| 1166 | default: | 1284 | default: |
| @@ -1174,10 +1292,10 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1174 | } | 1292 | } |
| 1175 | 1293 | ||
| 1176 | /** | 1294 | /** |
| 1177 | * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request | 1295 | * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests |
| 1178 | * @lport: local port | 1296 | * @lport: The local port that received the PLOGI request |
| 1179 | * @sp: current sequence in the PLOGI exchange | 1297 | * @sp: The sequence that the PLOGI request was on |
| 1180 | * @fp: PLOGI request frame | 1298 | * @rx_fp: The PLOGI request frame |
| 1181 | * | 1299 | * |
| 1182 | * Locking Note: The rport lock is held before calling this function. | 1300 | * Locking Note: The rport lock is held before calling this function. |
| 1183 | */ | 1301 | */ |
| @@ -1248,6 +1366,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, | |||
| 1248 | } | 1366 | } |
| 1249 | break; | 1367 | break; |
| 1250 | case RPORT_ST_PRLI: | 1368 | case RPORT_ST_PRLI: |
| 1369 | case RPORT_ST_RTV: | ||
| 1251 | case RPORT_ST_READY: | 1370 | case RPORT_ST_READY: |
| 1252 | case RPORT_ST_ADISC: | 1371 | case RPORT_ST_ADISC: |
| 1253 | FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " | 1372 | FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " |
| @@ -1255,11 +1374,14 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, | |||
| 1255 | /* XXX TBD - should reset */ | 1374 | /* XXX TBD - should reset */ |
| 1256 | break; | 1375 | break; |
| 1257 | case RPORT_ST_DELETE: | 1376 | case RPORT_ST_DELETE: |
| 1258 | default: | 1377 | case RPORT_ST_LOGO: |
| 1259 | FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", | 1378 | case RPORT_ST_RESTART: |
| 1260 | rdata->rp_state); | 1379 | FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", |
| 1261 | fc_frame_free(rx_fp); | 1380 | fc_rport_state(rdata)); |
| 1262 | goto out; | 1381 | mutex_unlock(&rdata->rp_mutex); |
| 1382 | rjt_data.reason = ELS_RJT_BUSY; | ||
| 1383 | rjt_data.explan = ELS_EXPL_NONE; | ||
| 1384 | goto reject; | ||
| 1263 | } | 1385 | } |
| 1264 | 1386 | ||
| 1265 | /* | 1387 | /* |
| @@ -1295,10 +1417,10 @@ reject: | |||
| 1295 | } | 1417 | } |
| 1296 | 1418 | ||
| 1297 | /** | 1419 | /** |
| 1298 | * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request | 1420 | * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests |
| 1299 | * @rdata: private remote port data | 1421 | * @rdata: The remote port that sent the PRLI request |
| 1300 | * @sp: current sequence in the PRLI exchange | 1422 | * @sp: The sequence that the PRLI was on |
| 1301 | * @fp: PRLI request frame | 1423 | * @rx_fp: The PRLI request frame |
| 1302 | * | 1424 | * |
| 1303 | * Locking Note: The rport lock is exected to be held before calling | 1425 | * Locking Note: The rport lock is exected to be held before calling |
| 1304 | * this function. | 1426 | * this function. |
| @@ -1402,7 +1524,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, | |||
| 1402 | break; | 1524 | break; |
| 1403 | case FC_TYPE_FCP: | 1525 | case FC_TYPE_FCP: |
| 1404 | fcp_parm = ntohl(rspp->spp_params); | 1526 | fcp_parm = ntohl(rspp->spp_params); |
| 1405 | if (fcp_parm * FCP_SPPF_RETRY) | 1527 | if (fcp_parm & FCP_SPPF_RETRY) |
| 1406 | rdata->flags |= FC_RP_FLAGS_RETRY; | 1528 | rdata->flags |= FC_RP_FLAGS_RETRY; |
| 1407 | rdata->supported_classes = FC_COS_CLASS3; | 1529 | rdata->supported_classes = FC_COS_CLASS3; |
| 1408 | if (fcp_parm & FCP_SPPF_INIT_FCN) | 1530 | if (fcp_parm & FCP_SPPF_INIT_FCN) |
| @@ -1452,10 +1574,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, | |||
| 1452 | } | 1574 | } |
| 1453 | 1575 | ||
| 1454 | /** | 1576 | /** |
| 1455 | * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request | 1577 | * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests |
| 1456 | * @rdata: private remote port data | 1578 | * @rdata: The remote port that sent the PRLO request |
| 1457 | * @sp: current sequence in the PRLO exchange | 1579 | * @sp: The sequence that the PRLO was on |
| 1458 | * @fp: PRLO request frame | 1580 | * @fp: The PRLO request frame |
| 1459 | * | 1581 | * |
| 1460 | * Locking Note: The rport lock is exected to be held before calling | 1582 | * Locking Note: The rport lock is exected to be held before calling |
| 1461 | * this function. | 1583 | * this function. |
| @@ -1482,10 +1604,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, | |||
| 1482 | } | 1604 | } |
| 1483 | 1605 | ||
| 1484 | /** | 1606 | /** |
| 1485 | * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request | 1607 | * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests |
| 1486 | * @lport: local port. | 1608 | * @lport: The local port that received the LOGO request |
| 1487 | * @sp: current sequence in the LOGO exchange | 1609 | * @sp: The sequence that the LOGO request was on |
| 1488 | * @fp: LOGO request frame | 1610 | * @fp: The LOGO request frame |
| 1489 | * | 1611 | * |
| 1490 | * Locking Note: The rport lock is exected to be held before calling | 1612 | * Locking Note: The rport lock is exected to be held before calling |
| 1491 | * this function. | 1613 | * this function. |
| @@ -1510,14 +1632,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, | |||
| 1510 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", | 1632 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", |
| 1511 | fc_rport_state(rdata)); | 1633 | fc_rport_state(rdata)); |
| 1512 | 1634 | ||
| 1635 | fc_rport_enter_delete(rdata, RPORT_EV_LOGO); | ||
| 1636 | |||
| 1513 | /* | 1637 | /* |
| 1514 | * If the remote port was created due to discovery, | 1638 | * If the remote port was created due to discovery, set state |
| 1515 | * log back in. It may have seen a stale RSCN about us. | 1639 | * to log back in. It may have seen a stale RSCN about us. |
| 1516 | */ | 1640 | */ |
| 1517 | if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) | 1641 | if (rdata->disc_id) |
| 1518 | fc_rport_enter_plogi(rdata); | 1642 | fc_rport_state_enter(rdata, RPORT_ST_RESTART); |
| 1519 | else | ||
| 1520 | fc_rport_enter_delete(rdata, RPORT_EV_LOGO); | ||
| 1521 | mutex_unlock(&rdata->rp_mutex); | 1643 | mutex_unlock(&rdata->rp_mutex); |
| 1522 | } else | 1644 | } else |
| 1523 | FC_RPORT_ID_DBG(lport, sid, | 1645 | FC_RPORT_ID_DBG(lport, sid, |
| @@ -1526,11 +1648,18 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, | |||
| 1526 | fc_frame_free(fp); | 1648 | fc_frame_free(fp); |
| 1527 | } | 1649 | } |
| 1528 | 1650 | ||
| 1651 | /** | ||
| 1652 | * fc_rport_flush_queue() - Flush the rport_event_queue | ||
| 1653 | */ | ||
| 1529 | static void fc_rport_flush_queue(void) | 1654 | static void fc_rport_flush_queue(void) |
| 1530 | { | 1655 | { |
| 1531 | flush_workqueue(rport_event_queue); | 1656 | flush_workqueue(rport_event_queue); |
| 1532 | } | 1657 | } |
| 1533 | 1658 | ||
| 1659 | /** | ||
| 1660 | * fc_rport_init() - Initialize the remote port layer for a local port | ||
| 1661 | * @lport: The local port to initialize the remote port layer for | ||
| 1662 | */ | ||
| 1534 | int fc_rport_init(struct fc_lport *lport) | 1663 | int fc_rport_init(struct fc_lport *lport) |
| 1535 | { | 1664 | { |
| 1536 | if (!lport->tt.rport_lookup) | 1665 | if (!lport->tt.rport_lookup) |
| @@ -1558,25 +1687,33 @@ int fc_rport_init(struct fc_lport *lport) | |||
| 1558 | } | 1687 | } |
| 1559 | EXPORT_SYMBOL(fc_rport_init); | 1688 | EXPORT_SYMBOL(fc_rport_init); |
| 1560 | 1689 | ||
| 1561 | int fc_setup_rport(void) | 1690 | /** |
| 1691 | * fc_setup_rport() - Initialize the rport_event_queue | ||
| 1692 | */ | ||
| 1693 | int fc_setup_rport() | ||
| 1562 | { | 1694 | { |
| 1563 | rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); | 1695 | rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); |
| 1564 | if (!rport_event_queue) | 1696 | if (!rport_event_queue) |
| 1565 | return -ENOMEM; | 1697 | return -ENOMEM; |
| 1566 | return 0; | 1698 | return 0; |
| 1567 | } | 1699 | } |
| 1568 | EXPORT_SYMBOL(fc_setup_rport); | ||
| 1569 | 1700 | ||
| 1570 | void fc_destroy_rport(void) | 1701 | /** |
| 1702 | * fc_destroy_rport() - Destroy the rport_event_queue | ||
| 1703 | */ | ||
| 1704 | void fc_destroy_rport() | ||
| 1571 | { | 1705 | { |
| 1572 | destroy_workqueue(rport_event_queue); | 1706 | destroy_workqueue(rport_event_queue); |
| 1573 | } | 1707 | } |
| 1574 | EXPORT_SYMBOL(fc_destroy_rport); | ||
| 1575 | 1708 | ||
| 1709 | /** | ||
| 1710 | * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port | ||
| 1711 | * @rport: The remote port whose I/O should be terminated | ||
| 1712 | */ | ||
| 1576 | void fc_rport_terminate_io(struct fc_rport *rport) | 1713 | void fc_rport_terminate_io(struct fc_rport *rport) |
| 1577 | { | 1714 | { |
| 1578 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 1715 | struct fc_rport_libfc_priv *rpriv = rport->dd_data; |
| 1579 | struct fc_lport *lport = rp->local_port; | 1716 | struct fc_lport *lport = rpriv->local_port; |
| 1580 | 1717 | ||
| 1581 | lport->tt.exch_mgr_reset(lport, 0, rport->port_id); | 1718 | lport->tt.exch_mgr_reset(lport, 0, rport->port_id); |
| 1582 | lport->tt.exch_mgr_reset(lport, rport->port_id, 0); | 1719 | lport->tt.exch_mgr_reset(lport, rport->port_id, 0); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f1a4246f890c..b7689f3d05f5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -266,6 +266,88 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task) | |||
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | /** | 268 | /** |
| 269 | * iscsi_check_tmf_restrictions - check if a task is affected by TMF | ||
| 270 | * @task: iscsi task | ||
| 271 | * @opcode: opcode to check for | ||
| 272 | * | ||
| 273 | * During TMF a task has to be checked if it's affected. | ||
| 274 | * All unrelated I/O can be passed through, but I/O to the | ||
| 275 | * affected LUN should be restricted. | ||
| 276 | * If 'fast_abort' is set we won't be sending any I/O to the | ||
| 277 | * affected LUN. | ||
| 278 | * Otherwise the target is waiting for all TTTs to be completed, | ||
| 279 | * so we have to send all outstanding Data-Out PDUs to the target. | ||
| 280 | */ | ||
| 281 | static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) | ||
| 282 | { | ||
| 283 | struct iscsi_conn *conn = task->conn; | ||
| 284 | struct iscsi_tm *tmf = &conn->tmhdr; | ||
| 285 | unsigned int hdr_lun; | ||
| 286 | |||
| 287 | if (conn->tmf_state == TMF_INITIAL) | ||
| 288 | return 0; | ||
| 289 | |||
| 290 | if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) | ||
| 291 | return 0; | ||
| 292 | |||
| 293 | switch (ISCSI_TM_FUNC_VALUE(tmf)) { | ||
| 294 | case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: | ||
| 295 | /* | ||
| 296 | * Allow PDUs for unrelated LUNs | ||
| 297 | */ | ||
| 298 | hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); | ||
| 299 | if (hdr_lun != task->sc->device->lun) | ||
| 300 | return 0; | ||
| 301 | /* fall through */ | ||
| 302 | case ISCSI_TM_FUNC_TARGET_WARM_RESET: | ||
| 303 | /* | ||
| 304 | * Fail all SCSI cmd PDUs | ||
| 305 | */ | ||
| 306 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) { | ||
| 307 | iscsi_conn_printk(KERN_INFO, conn, | ||
| 308 | "task [op %x/%x itt " | ||
| 309 | "0x%x/0x%x] " | ||
| 310 | "rejected.\n", | ||
| 311 | task->hdr->opcode, opcode, | ||
| 312 | task->itt, task->hdr_itt); | ||
| 313 | return -EACCES; | ||
| 314 | } | ||
| 315 | /* | ||
| 316 | * And also all data-out PDUs in response to R2T | ||
| 317 | * if fast_abort is set. | ||
| 318 | */ | ||
| 319 | if (conn->session->fast_abort) { | ||
| 320 | iscsi_conn_printk(KERN_INFO, conn, | ||
| 321 | "task [op %x/%x itt " | ||
| 322 | "0x%x/0x%x] fast abort.\n", | ||
| 323 | task->hdr->opcode, opcode, | ||
| 324 | task->itt, task->hdr_itt); | ||
| 325 | return -EACCES; | ||
| 326 | } | ||
| 327 | break; | ||
| 328 | case ISCSI_TM_FUNC_ABORT_TASK: | ||
| 329 | /* | ||
| 330 | * the caller has already checked if the task | ||
| 331 | * they want to abort was in the pending queue so if | ||
| 332 | * we are here the cmd pdu has gone out already, and | ||
| 333 | * we will only hit this for data-outs | ||
| 334 | */ | ||
| 335 | if (opcode == ISCSI_OP_SCSI_DATA_OUT && | ||
| 336 | task->hdr_itt == tmf->rtt) { | ||
| 337 | ISCSI_DBG_SESSION(conn->session, | ||
| 338 | "Preventing task %x/%x from sending " | ||
| 339 | "data-out due to abort task in " | ||
| 340 | "progress\n", task->itt, | ||
| 341 | task->hdr_itt); | ||
| 342 | return -EACCES; | ||
| 343 | } | ||
| 344 | break; | ||
| 345 | } | ||
| 346 | |||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | |||
| 350 | /** | ||
| 269 | * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu | 351 | * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu |
| 270 | * @task: iscsi task | 352 | * @task: iscsi task |
| 271 | * | 353 | * |
| @@ -282,6 +364,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) | |||
| 282 | itt_t itt; | 364 | itt_t itt; |
| 283 | int rc; | 365 | int rc; |
| 284 | 366 | ||
| 367 | rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); | ||
| 368 | if (rc) | ||
| 369 | return rc; | ||
| 370 | |||
| 285 | if (conn->session->tt->alloc_pdu) { | 371 | if (conn->session->tt->alloc_pdu) { |
| 286 | rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); | 372 | rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); |
| 287 | if (rc) | 373 | if (rc) |
| @@ -577,12 +663,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, | |||
| 577 | struct iscsi_session *session = conn->session; | 663 | struct iscsi_session *session = conn->session; |
| 578 | struct iscsi_hdr *hdr = task->hdr; | 664 | struct iscsi_hdr *hdr = task->hdr; |
| 579 | struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; | 665 | struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; |
| 666 | uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; | ||
| 580 | 667 | ||
| 581 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) | 668 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) |
| 582 | return -ENOTCONN; | 669 | return -ENOTCONN; |
| 583 | 670 | ||
| 584 | if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && | 671 | if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) |
| 585 | hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) | ||
| 586 | nop->exp_statsn = cpu_to_be32(conn->exp_statsn); | 672 | nop->exp_statsn = cpu_to_be32(conn->exp_statsn); |
| 587 | /* | 673 | /* |
| 588 | * pre-format CmdSN for outgoing PDU. | 674 | * pre-format CmdSN for outgoing PDU. |
| @@ -590,9 +676,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, | |||
| 590 | nop->cmdsn = cpu_to_be32(session->cmdsn); | 676 | nop->cmdsn = cpu_to_be32(session->cmdsn); |
| 591 | if (hdr->itt != RESERVED_ITT) { | 677 | if (hdr->itt != RESERVED_ITT) { |
| 592 | /* | 678 | /* |
| 593 | * TODO: We always use immediate, so we never hit this. | 679 | * TODO: We always use immediate for normal session pdus. |
| 594 | * If we start to send tmfs or nops as non-immediate then | 680 | * If we start to send tmfs or nops as non-immediate then |
| 595 | * we should start checking the cmdsn numbers for mgmt tasks. | 681 | * we should start checking the cmdsn numbers for mgmt tasks. |
| 682 | * | ||
| 683 | * During discovery sessions iscsid sends TEXT as non immediate, | ||
| 684 | * but we always only send one PDU at a time. | ||
| 596 | */ | 685 | */ |
| 597 | if (conn->c_stage == ISCSI_CONN_STARTED && | 686 | if (conn->c_stage == ISCSI_CONN_STARTED && |
| 598 | !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { | 687 | !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { |
| @@ -620,22 +709,28 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
| 620 | { | 709 | { |
| 621 | struct iscsi_session *session = conn->session; | 710 | struct iscsi_session *session = conn->session; |
| 622 | struct iscsi_host *ihost = shost_priv(session->host); | 711 | struct iscsi_host *ihost = shost_priv(session->host); |
| 712 | uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; | ||
| 623 | struct iscsi_task *task; | 713 | struct iscsi_task *task; |
| 624 | itt_t itt; | 714 | itt_t itt; |
| 625 | 715 | ||
| 626 | if (session->state == ISCSI_STATE_TERMINATE) | 716 | if (session->state == ISCSI_STATE_TERMINATE) |
| 627 | return NULL; | 717 | return NULL; |
| 628 | 718 | ||
| 629 | if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || | 719 | if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { |
| 630 | hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) | ||
| 631 | /* | 720 | /* |
| 632 | * Login and Text are sent serially, in | 721 | * Login and Text are sent serially, in |
| 633 | * request-followed-by-response sequence. | 722 | * request-followed-by-response sequence. |
| 634 | * Same task can be used. Same ITT must be used. | 723 | * Same task can be used. Same ITT must be used. |
| 635 | * Note that login_task is preallocated at conn_create(). | 724 | * Note that login_task is preallocated at conn_create(). |
| 636 | */ | 725 | */ |
| 726 | if (conn->login_task->state != ISCSI_TASK_FREE) { | ||
| 727 | iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " | ||
| 728 | "progress. Cannot start new task.\n"); | ||
| 729 | return NULL; | ||
| 730 | } | ||
| 731 | |||
| 637 | task = conn->login_task; | 732 | task = conn->login_task; |
| 638 | else { | 733 | } else { |
| 639 | if (session->state != ISCSI_STATE_LOGGED_IN) | 734 | if (session->state != ISCSI_STATE_LOGGED_IN) |
| 640 | return NULL; | 735 | return NULL; |
| 641 | 736 | ||
| @@ -1357,6 +1452,7 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task); | |||
| 1357 | **/ | 1452 | **/ |
| 1358 | static int iscsi_data_xmit(struct iscsi_conn *conn) | 1453 | static int iscsi_data_xmit(struct iscsi_conn *conn) |
| 1359 | { | 1454 | { |
| 1455 | struct iscsi_task *task; | ||
| 1360 | int rc = 0; | 1456 | int rc = 0; |
| 1361 | 1457 | ||
| 1362 | spin_lock_bh(&conn->session->lock); | 1458 | spin_lock_bh(&conn->session->lock); |
| @@ -1394,11 +1490,8 @@ check_mgmt: | |||
| 1394 | 1490 | ||
| 1395 | /* process pending command queue */ | 1491 | /* process pending command queue */ |
| 1396 | while (!list_empty(&conn->cmdqueue)) { | 1492 | while (!list_empty(&conn->cmdqueue)) { |
| 1397 | if (conn->tmf_state == TMF_QUEUED) | 1493 | conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, |
| 1398 | break; | 1494 | running); |
| 1399 | |||
| 1400 | conn->task = list_entry(conn->cmdqueue.next, | ||
| 1401 | struct iscsi_task, running); | ||
| 1402 | list_del_init(&conn->task->running); | 1495 | list_del_init(&conn->task->running); |
| 1403 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { | 1496 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { |
| 1404 | fail_scsi_task(conn->task, DID_IMM_RETRY); | 1497 | fail_scsi_task(conn->task, DID_IMM_RETRY); |
| @@ -1406,7 +1499,7 @@ check_mgmt: | |||
| 1406 | } | 1499 | } |
| 1407 | rc = iscsi_prep_scsi_cmd_pdu(conn->task); | 1500 | rc = iscsi_prep_scsi_cmd_pdu(conn->task); |
| 1408 | if (rc) { | 1501 | if (rc) { |
| 1409 | if (rc == -ENOMEM) { | 1502 | if (rc == -ENOMEM || rc == -EACCES) { |
| 1410 | list_add_tail(&conn->task->running, | 1503 | list_add_tail(&conn->task->running, |
| 1411 | &conn->cmdqueue); | 1504 | &conn->cmdqueue); |
| 1412 | conn->task = NULL; | 1505 | conn->task = NULL; |
| @@ -1428,17 +1521,18 @@ check_mgmt: | |||
| 1428 | } | 1521 | } |
| 1429 | 1522 | ||
| 1430 | while (!list_empty(&conn->requeue)) { | 1523 | while (!list_empty(&conn->requeue)) { |
| 1431 | if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL) | ||
| 1432 | break; | ||
| 1433 | |||
| 1434 | /* | 1524 | /* |
| 1435 | * we always do fastlogout - conn stop code will clean up. | 1525 | * we always do fastlogout - conn stop code will clean up. |
| 1436 | */ | 1526 | */ |
| 1437 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) | 1527 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) |
| 1438 | break; | 1528 | break; |
| 1439 | 1529 | ||
| 1440 | conn->task = list_entry(conn->requeue.next, | 1530 | task = list_entry(conn->requeue.next, struct iscsi_task, |
| 1441 | struct iscsi_task, running); | 1531 | running); |
| 1532 | if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) | ||
| 1533 | break; | ||
| 1534 | |||
| 1535 | conn->task = task; | ||
| 1442 | list_del_init(&conn->task->running); | 1536 | list_del_init(&conn->task->running); |
| 1443 | conn->task->state = ISCSI_TASK_RUNNING; | 1537 | conn->task->state = ISCSI_TASK_RUNNING; |
| 1444 | rc = iscsi_xmit_task(conn); | 1538 | rc = iscsi_xmit_task(conn); |
| @@ -1591,7 +1685,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
| 1591 | if (!ihost->workq) { | 1685 | if (!ihost->workq) { |
| 1592 | reason = iscsi_prep_scsi_cmd_pdu(task); | 1686 | reason = iscsi_prep_scsi_cmd_pdu(task); |
| 1593 | if (reason) { | 1687 | if (reason) { |
| 1594 | if (reason == -ENOMEM) { | 1688 | if (reason == -ENOMEM || reason == -EACCES) { |
| 1595 | reason = FAILURE_OOM; | 1689 | reason = FAILURE_OOM; |
| 1596 | goto prepd_reject; | 1690 | goto prepd_reject; |
| 1597 | } else { | 1691 | } else { |
| @@ -1643,9 +1737,21 @@ fault: | |||
| 1643 | } | 1737 | } |
| 1644 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); | 1738 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); |
| 1645 | 1739 | ||
| 1646 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) | 1740 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) |
| 1647 | { | 1741 | { |
| 1648 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | 1742 | switch (reason) { |
| 1743 | case SCSI_QDEPTH_DEFAULT: | ||
| 1744 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
| 1745 | break; | ||
| 1746 | case SCSI_QDEPTH_QFULL: | ||
| 1747 | scsi_track_queue_full(sdev, depth); | ||
| 1748 | break; | ||
| 1749 | case SCSI_QDEPTH_RAMP_UP: | ||
| 1750 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
| 1751 | break; | ||
| 1752 | default: | ||
| 1753 | return -EOPNOTSUPP; | ||
| 1754 | } | ||
| 1649 | return sdev->queue_depth; | 1755 | return sdev->queue_depth; |
| 1650 | } | 1756 | } |
| 1651 | EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); | 1757 | EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); |
| @@ -1660,72 +1766,6 @@ int iscsi_target_alloc(struct scsi_target *starget) | |||
| 1660 | } | 1766 | } |
| 1661 | EXPORT_SYMBOL_GPL(iscsi_target_alloc); | 1767 | EXPORT_SYMBOL_GPL(iscsi_target_alloc); |
| 1662 | 1768 | ||
| 1663 | void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) | ||
| 1664 | { | ||
| 1665 | struct iscsi_session *session = cls_session->dd_data; | ||
| 1666 | |||
| 1667 | spin_lock_bh(&session->lock); | ||
| 1668 | if (session->state != ISCSI_STATE_LOGGED_IN) { | ||
| 1669 | session->state = ISCSI_STATE_RECOVERY_FAILED; | ||
| 1670 | if (session->leadconn) | ||
| 1671 | wake_up(&session->leadconn->ehwait); | ||
| 1672 | } | ||
| 1673 | spin_unlock_bh(&session->lock); | ||
| 1674 | } | ||
| 1675 | EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); | ||
| 1676 | |||
| 1677 | int iscsi_eh_target_reset(struct scsi_cmnd *sc) | ||
| 1678 | { | ||
| 1679 | struct iscsi_cls_session *cls_session; | ||
| 1680 | struct iscsi_session *session; | ||
| 1681 | struct iscsi_conn *conn; | ||
| 1682 | |||
| 1683 | cls_session = starget_to_session(scsi_target(sc->device)); | ||
| 1684 | session = cls_session->dd_data; | ||
| 1685 | conn = session->leadconn; | ||
| 1686 | |||
| 1687 | mutex_lock(&session->eh_mutex); | ||
| 1688 | spin_lock_bh(&session->lock); | ||
| 1689 | if (session->state == ISCSI_STATE_TERMINATE) { | ||
| 1690 | failed: | ||
| 1691 | ISCSI_DBG_EH(session, | ||
| 1692 | "failing target reset: Could not log back into " | ||
| 1693 | "target [age %d]\n", | ||
| 1694 | session->age); | ||
| 1695 | spin_unlock_bh(&session->lock); | ||
| 1696 | mutex_unlock(&session->eh_mutex); | ||
| 1697 | return FAILED; | ||
| 1698 | } | ||
| 1699 | |||
| 1700 | spin_unlock_bh(&session->lock); | ||
| 1701 | mutex_unlock(&session->eh_mutex); | ||
| 1702 | /* | ||
| 1703 | * we drop the lock here but the leadconn cannot be destoyed while | ||
| 1704 | * we are in the scsi eh | ||
| 1705 | */ | ||
| 1706 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
| 1707 | |||
| 1708 | ISCSI_DBG_EH(session, "wait for relogin\n"); | ||
| 1709 | wait_event_interruptible(conn->ehwait, | ||
| 1710 | session->state == ISCSI_STATE_TERMINATE || | ||
| 1711 | session->state == ISCSI_STATE_LOGGED_IN || | ||
| 1712 | session->state == ISCSI_STATE_RECOVERY_FAILED); | ||
| 1713 | if (signal_pending(current)) | ||
| 1714 | flush_signals(current); | ||
| 1715 | |||
| 1716 | mutex_lock(&session->eh_mutex); | ||
| 1717 | spin_lock_bh(&session->lock); | ||
| 1718 | if (session->state == ISCSI_STATE_LOGGED_IN) { | ||
| 1719 | ISCSI_DBG_EH(session, | ||
| 1720 | "target reset succeeded\n"); | ||
| 1721 | } else | ||
| 1722 | goto failed; | ||
| 1723 | spin_unlock_bh(&session->lock); | ||
| 1724 | mutex_unlock(&session->eh_mutex); | ||
| 1725 | return SUCCESS; | ||
| 1726 | } | ||
| 1727 | EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); | ||
| 1728 | |||
| 1729 | static void iscsi_tmf_timedout(unsigned long data) | 1769 | static void iscsi_tmf_timedout(unsigned long data) |
| 1730 | { | 1770 | { |
| 1731 | struct iscsi_conn *conn = (struct iscsi_conn *)data; | 1771 | struct iscsi_conn *conn = (struct iscsi_conn *)data; |
| @@ -2108,6 +2148,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
| 2108 | spin_lock_bh(&session->lock); | 2148 | spin_lock_bh(&session->lock); |
| 2109 | fail_scsi_task(task, DID_ABORT); | 2149 | fail_scsi_task(task, DID_ABORT); |
| 2110 | conn->tmf_state = TMF_INITIAL; | 2150 | conn->tmf_state = TMF_INITIAL; |
| 2151 | memset(hdr, 0, sizeof(*hdr)); | ||
| 2111 | spin_unlock_bh(&session->lock); | 2152 | spin_unlock_bh(&session->lock); |
| 2112 | iscsi_start_tx(conn); | 2153 | iscsi_start_tx(conn); |
| 2113 | goto success_unlocked; | 2154 | goto success_unlocked; |
| @@ -2118,6 +2159,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
| 2118 | case TMF_NOT_FOUND: | 2159 | case TMF_NOT_FOUND: |
| 2119 | if (!sc->SCp.ptr) { | 2160 | if (!sc->SCp.ptr) { |
| 2120 | conn->tmf_state = TMF_INITIAL; | 2161 | conn->tmf_state = TMF_INITIAL; |
| 2162 | memset(hdr, 0, sizeof(*hdr)); | ||
| 2121 | /* task completed before tmf abort response */ | 2163 | /* task completed before tmf abort response */ |
| 2122 | ISCSI_DBG_EH(session, "sc completed while abort in " | 2164 | ISCSI_DBG_EH(session, "sc completed while abort in " |
| 2123 | "progress\n"); | 2165 | "progress\n"); |
| @@ -2212,6 +2254,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
| 2212 | iscsi_suspend_tx(conn); | 2254 | iscsi_suspend_tx(conn); |
| 2213 | 2255 | ||
| 2214 | spin_lock_bh(&session->lock); | 2256 | spin_lock_bh(&session->lock); |
| 2257 | memset(hdr, 0, sizeof(*hdr)); | ||
| 2215 | fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); | 2258 | fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); |
| 2216 | conn->tmf_state = TMF_INITIAL; | 2259 | conn->tmf_state = TMF_INITIAL; |
| 2217 | spin_unlock_bh(&session->lock); | 2260 | spin_unlock_bh(&session->lock); |
| @@ -2229,6 +2272,172 @@ done: | |||
| 2229 | } | 2272 | } |
| 2230 | EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); | 2273 | EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); |
| 2231 | 2274 | ||
| 2275 | void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) | ||
| 2276 | { | ||
| 2277 | struct iscsi_session *session = cls_session->dd_data; | ||
| 2278 | |||
| 2279 | spin_lock_bh(&session->lock); | ||
| 2280 | if (session->state != ISCSI_STATE_LOGGED_IN) { | ||
| 2281 | session->state = ISCSI_STATE_RECOVERY_FAILED; | ||
| 2282 | if (session->leadconn) | ||
| 2283 | wake_up(&session->leadconn->ehwait); | ||
| 2284 | } | ||
| 2285 | spin_unlock_bh(&session->lock); | ||
| 2286 | } | ||
| 2287 | EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); | ||
| 2288 | |||
| 2289 | /** | ||
| 2290 | * iscsi_eh_session_reset - drop session and attempt relogin | ||
| 2291 | * @sc: scsi command | ||
| 2292 | * | ||
| 2293 | * This function will wait for a relogin, session termination from | ||
| 2294 | * userspace, or a recovery/replacement timeout. | ||
| 2295 | */ | ||
| 2296 | static int iscsi_eh_session_reset(struct scsi_cmnd *sc) | ||
| 2297 | { | ||
| 2298 | struct iscsi_cls_session *cls_session; | ||
| 2299 | struct iscsi_session *session; | ||
| 2300 | struct iscsi_conn *conn; | ||
| 2301 | |||
| 2302 | cls_session = starget_to_session(scsi_target(sc->device)); | ||
| 2303 | session = cls_session->dd_data; | ||
| 2304 | conn = session->leadconn; | ||
| 2305 | |||
| 2306 | mutex_lock(&session->eh_mutex); | ||
| 2307 | spin_lock_bh(&session->lock); | ||
| 2308 | if (session->state == ISCSI_STATE_TERMINATE) { | ||
| 2309 | failed: | ||
| 2310 | ISCSI_DBG_EH(session, | ||
| 2311 | "failing session reset: Could not log back into " | ||
| 2312 | "%s, %s [age %d]\n", session->targetname, | ||
| 2313 | conn->persistent_address, session->age); | ||
| 2314 | spin_unlock_bh(&session->lock); | ||
| 2315 | mutex_unlock(&session->eh_mutex); | ||
| 2316 | return FAILED; | ||
| 2317 | } | ||
| 2318 | |||
| 2319 | spin_unlock_bh(&session->lock); | ||
| 2320 | mutex_unlock(&session->eh_mutex); | ||
| 2321 | /* | ||
| 2322 | * we drop the lock here but the leadconn cannot be destoyed while | ||
| 2323 | * we are in the scsi eh | ||
| 2324 | */ | ||
| 2325 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
| 2326 | |||
| 2327 | ISCSI_DBG_EH(session, "wait for relogin\n"); | ||
| 2328 | wait_event_interruptible(conn->ehwait, | ||
| 2329 | session->state == ISCSI_STATE_TERMINATE || | ||
| 2330 | session->state == ISCSI_STATE_LOGGED_IN || | ||
| 2331 | session->state == ISCSI_STATE_RECOVERY_FAILED); | ||
| 2332 | if (signal_pending(current)) | ||
| 2333 | flush_signals(current); | ||
| 2334 | |||
| 2335 | mutex_lock(&session->eh_mutex); | ||
| 2336 | spin_lock_bh(&session->lock); | ||
| 2337 | if (session->state == ISCSI_STATE_LOGGED_IN) { | ||
| 2338 | ISCSI_DBG_EH(session, | ||
| 2339 | "session reset succeeded for %s,%s\n", | ||
| 2340 | session->targetname, conn->persistent_address); | ||
| 2341 | } else | ||
| 2342 | goto failed; | ||
| 2343 | spin_unlock_bh(&session->lock); | ||
| 2344 | mutex_unlock(&session->eh_mutex); | ||
| 2345 | return SUCCESS; | ||
| 2346 | } | ||
| 2347 | |||
| 2348 | static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) | ||
| 2349 | { | ||
| 2350 | memset(hdr, 0, sizeof(*hdr)); | ||
| 2351 | hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; | ||
| 2352 | hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; | ||
| 2353 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; | ||
| 2354 | hdr->rtt = RESERVED_ITT; | ||
| 2355 | } | ||
| 2356 | |||
| 2357 | /** | ||
| 2358 | * iscsi_eh_target_reset - reset target | ||
| 2359 | * @sc: scsi command | ||
| 2360 | * | ||
| 2361 | * This will attempt to send a warm target reset. If that fails | ||
| 2362 | * then we will drop the session and attempt ERL0 recovery. | ||
| 2363 | */ | ||
| 2364 | int iscsi_eh_target_reset(struct scsi_cmnd *sc) | ||
| 2365 | { | ||
| 2366 | struct iscsi_cls_session *cls_session; | ||
| 2367 | struct iscsi_session *session; | ||
| 2368 | struct iscsi_conn *conn; | ||
| 2369 | struct iscsi_tm *hdr; | ||
| 2370 | int rc = FAILED; | ||
| 2371 | |||
| 2372 | cls_session = starget_to_session(scsi_target(sc->device)); | ||
| 2373 | session = cls_session->dd_data; | ||
| 2374 | |||
| 2375 | ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, | ||
| 2376 | session->targetname); | ||
| 2377 | |||
| 2378 | mutex_lock(&session->eh_mutex); | ||
| 2379 | spin_lock_bh(&session->lock); | ||
| 2380 | /* | ||
| 2381 | * Just check if we are not logged in. We cannot check for | ||
| 2382 | * the phase because the reset could come from a ioctl. | ||
| 2383 | */ | ||
| 2384 | if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) | ||
| 2385 | goto unlock; | ||
| 2386 | conn = session->leadconn; | ||
| 2387 | |||
| 2388 | /* only have one tmf outstanding at a time */ | ||
| 2389 | if (conn->tmf_state != TMF_INITIAL) | ||
| 2390 | goto unlock; | ||
| 2391 | conn->tmf_state = TMF_QUEUED; | ||
| 2392 | |||
| 2393 | hdr = &conn->tmhdr; | ||
| 2394 | iscsi_prep_tgt_reset_pdu(sc, hdr); | ||
| 2395 | |||
| 2396 | if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, | ||
| 2397 | session->tgt_reset_timeout)) { | ||
| 2398 | rc = FAILED; | ||
| 2399 | goto unlock; | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | switch (conn->tmf_state) { | ||
| 2403 | case TMF_SUCCESS: | ||
| 2404 | break; | ||
| 2405 | case TMF_TIMEDOUT: | ||
| 2406 | spin_unlock_bh(&session->lock); | ||
| 2407 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
| 2408 | goto done; | ||
| 2409 | default: | ||
| 2410 | conn->tmf_state = TMF_INITIAL; | ||
| 2411 | goto unlock; | ||
| 2412 | } | ||
| 2413 | |||
| 2414 | rc = SUCCESS; | ||
| 2415 | spin_unlock_bh(&session->lock); | ||
| 2416 | |||
| 2417 | iscsi_suspend_tx(conn); | ||
| 2418 | |||
| 2419 | spin_lock_bh(&session->lock); | ||
| 2420 | memset(hdr, 0, sizeof(*hdr)); | ||
| 2421 | fail_scsi_tasks(conn, -1, DID_ERROR); | ||
| 2422 | conn->tmf_state = TMF_INITIAL; | ||
| 2423 | spin_unlock_bh(&session->lock); | ||
| 2424 | |||
| 2425 | iscsi_start_tx(conn); | ||
| 2426 | goto done; | ||
| 2427 | |||
| 2428 | unlock: | ||
| 2429 | spin_unlock_bh(&session->lock); | ||
| 2430 | done: | ||
| 2431 | ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, | ||
| 2432 | rc == SUCCESS ? "SUCCESS" : "FAILED"); | ||
| 2433 | mutex_unlock(&session->eh_mutex); | ||
| 2434 | |||
| 2435 | if (rc == FAILED) | ||
| 2436 | rc = iscsi_eh_session_reset(sc); | ||
| 2437 | return rc; | ||
| 2438 | } | ||
| 2439 | EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); | ||
| 2440 | |||
| 2232 | /* | 2441 | /* |
| 2233 | * Pre-allocate a pool of @max items of @item_size. By default, the pool | 2442 | * Pre-allocate a pool of @max items of @item_size. By default, the pool |
| 2234 | * should be accessed via kfifo_{get,put} on q->queue. | 2443 | * should be accessed via kfifo_{get,put} on q->queue. |
| @@ -2495,6 +2704,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, | |||
| 2495 | session->host = shost; | 2704 | session->host = shost; |
| 2496 | session->state = ISCSI_STATE_FREE; | 2705 | session->state = ISCSI_STATE_FREE; |
| 2497 | session->fast_abort = 1; | 2706 | session->fast_abort = 1; |
| 2707 | session->tgt_reset_timeout = 30; | ||
| 2498 | session->lu_reset_timeout = 15; | 2708 | session->lu_reset_timeout = 15; |
| 2499 | session->abort_timeout = 10; | 2709 | session->abort_timeout = 10; |
| 2500 | session->scsi_cmds_max = scsi_cmds; | 2710 | session->scsi_cmds_max = scsi_cmds; |
| @@ -2856,6 +3066,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
| 2856 | spin_lock_bh(&session->lock); | 3066 | spin_lock_bh(&session->lock); |
| 2857 | fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); | 3067 | fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); |
| 2858 | fail_mgmt_tasks(session, conn); | 3068 | fail_mgmt_tasks(session, conn); |
| 3069 | memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); | ||
| 2859 | spin_unlock_bh(&session->lock); | 3070 | spin_unlock_bh(&session->lock); |
| 2860 | mutex_unlock(&session->eh_mutex); | 3071 | mutex_unlock(&session->eh_mutex); |
| 2861 | } | 3072 | } |
| @@ -2932,6 +3143,9 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, | |||
| 2932 | case ISCSI_PARAM_LU_RESET_TMO: | 3143 | case ISCSI_PARAM_LU_RESET_TMO: |
| 2933 | sscanf(buf, "%d", &session->lu_reset_timeout); | 3144 | sscanf(buf, "%d", &session->lu_reset_timeout); |
| 2934 | break; | 3145 | break; |
| 3146 | case ISCSI_PARAM_TGT_RESET_TMO: | ||
| 3147 | sscanf(buf, "%d", &session->tgt_reset_timeout); | ||
| 3148 | break; | ||
| 2935 | case ISCSI_PARAM_PING_TMO: | 3149 | case ISCSI_PARAM_PING_TMO: |
| 2936 | sscanf(buf, "%d", &conn->ping_timeout); | 3150 | sscanf(buf, "%d", &conn->ping_timeout); |
| 2937 | break; | 3151 | break; |
| @@ -3031,6 +3245,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, | |||
| 3031 | case ISCSI_PARAM_LU_RESET_TMO: | 3245 | case ISCSI_PARAM_LU_RESET_TMO: |
| 3032 | len = sprintf(buf, "%d\n", session->lu_reset_timeout); | 3246 | len = sprintf(buf, "%d\n", session->lu_reset_timeout); |
| 3033 | break; | 3247 | break; |
| 3248 | case ISCSI_PARAM_TGT_RESET_TMO: | ||
| 3249 | len = sprintf(buf, "%d\n", session->tgt_reset_timeout); | ||
| 3250 | break; | ||
| 3034 | case ISCSI_PARAM_INITIAL_R2T_EN: | 3251 | case ISCSI_PARAM_INITIAL_R2T_EN: |
| 3035 | len = sprintf(buf, "%d\n", session->initial_r2t_en); | 3252 | len = sprintf(buf, "%d\n", session->initial_r2t_en); |
| 3036 | break; | 3253 | break; |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2e0746d70303..ca25ee5190b0 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
| @@ -1004,7 +1004,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) | |||
| 1004 | * iscsi_tcp_task_xmit - xmit normal PDU task | 1004 | * iscsi_tcp_task_xmit - xmit normal PDU task |
| 1005 | * @task: iscsi command task | 1005 | * @task: iscsi command task |
| 1006 | * | 1006 | * |
| 1007 | * We're expected to return 0 when everything was transmitted succesfully, | 1007 | * We're expected to return 0 when everything was transmitted successfully, |
| 1008 | * -EAGAIN if there's still data in the queue, or != 0 for any other kind | 1008 | * -EAGAIN if there's still data in the queue, or != 0 for any other kind |
| 1009 | * of error. | 1009 | * of error. |
| 1010 | */ | 1010 | */ |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 1c558d3bce18..14b13196b22d 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
| @@ -820,10 +820,14 @@ void sas_slave_destroy(struct scsi_device *scsi_dev) | |||
| 820 | ata_port_disable(dev->sata_dev.ap); | 820 | ata_port_disable(dev->sata_dev.ap); |
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) | 823 | int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth, |
| 824 | int reason) | ||
| 824 | { | 825 | { |
| 825 | int res = min(new_depth, SAS_MAX_QD); | 826 | int res = min(new_depth, SAS_MAX_QD); |
| 826 | 827 | ||
| 828 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 829 | return -EOPNOTSUPP; | ||
| 830 | |||
| 827 | if (scsi_dev->tagged_supported) | 831 | if (scsi_dev->tagged_supported) |
| 828 | scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), | 832 | scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), |
| 829 | res); | 833 | res); |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index aa10f7951634..1cc23a69db5e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
| @@ -109,7 +109,8 @@ struct hbq_dmabuf { | |||
| 109 | struct lpfc_dmabuf dbuf; | 109 | struct lpfc_dmabuf dbuf; |
| 110 | uint32_t size; | 110 | uint32_t size; |
| 111 | uint32_t tag; | 111 | uint32_t tag; |
| 112 | struct lpfc_rcqe rcqe; | 112 | struct lpfc_cq_event cq_event; |
| 113 | unsigned long time_stamp; | ||
| 113 | }; | 114 | }; |
| 114 | 115 | ||
| 115 | /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ | 116 | /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ |
| @@ -201,6 +202,7 @@ struct lpfc_stats { | |||
| 201 | uint32_t elsRcvLIRR; | 202 | uint32_t elsRcvLIRR; |
| 202 | uint32_t elsRcvRPS; | 203 | uint32_t elsRcvRPS; |
| 203 | uint32_t elsRcvRPL; | 204 | uint32_t elsRcvRPL; |
| 205 | uint32_t elsRcvRRQ; | ||
| 204 | uint32_t elsXmitFLOGI; | 206 | uint32_t elsXmitFLOGI; |
| 205 | uint32_t elsXmitFDISC; | 207 | uint32_t elsXmitFDISC; |
| 206 | uint32_t elsXmitPLOGI; | 208 | uint32_t elsXmitPLOGI; |
| @@ -289,8 +291,8 @@ struct lpfc_vport { | |||
| 289 | 291 | ||
| 290 | uint16_t vpi; | 292 | uint16_t vpi; |
| 291 | uint16_t vfi; | 293 | uint16_t vfi; |
| 292 | uint8_t vfi_state; | 294 | uint8_t vpi_state; |
| 293 | #define LPFC_VFI_REGISTERED 0x1 | 295 | #define LPFC_VPI_REGISTERED 0x1 |
| 294 | 296 | ||
| 295 | uint32_t fc_flag; /* FC flags */ | 297 | uint32_t fc_flag; /* FC flags */ |
| 296 | /* Several of these flags are HBA centric and should be moved to | 298 | /* Several of these flags are HBA centric and should be moved to |
| @@ -405,6 +407,7 @@ struct lpfc_vport { | |||
| 405 | uint8_t stat_data_enabled; | 407 | uint8_t stat_data_enabled; |
| 406 | uint8_t stat_data_blocked; | 408 | uint8_t stat_data_blocked; |
| 407 | struct list_head rcv_buffer_list; | 409 | struct list_head rcv_buffer_list; |
| 410 | unsigned long rcv_buffer_time_stamp; | ||
| 408 | uint32_t vport_flag; | 411 | uint32_t vport_flag; |
| 409 | #define STATIC_VPORT 1 | 412 | #define STATIC_VPORT 1 |
| 410 | }; | 413 | }; |
| @@ -527,13 +530,16 @@ struct lpfc_hba { | |||
| 527 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 530 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
| 528 | #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ | 531 | #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ |
| 529 | #define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ | 532 | #define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ |
| 530 | #define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ | 533 | #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ |
| 531 | #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ | 534 | #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ |
| 532 | #define FCP_XRI_ABORT_EVENT 0x20 | 535 | #define FCP_XRI_ABORT_EVENT 0x20 |
| 533 | #define ELS_XRI_ABORT_EVENT 0x40 | 536 | #define ELS_XRI_ABORT_EVENT 0x40 |
| 534 | #define ASYNC_EVENT 0x80 | 537 | #define ASYNC_EVENT 0x80 |
| 535 | #define LINK_DISABLED 0x100 /* Link disabled by user */ | 538 | #define LINK_DISABLED 0x100 /* Link disabled by user */ |
| 536 | #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ | 539 | #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ |
| 540 | #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ | ||
| 541 | #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ | ||
| 542 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ | ||
| 537 | struct lpfc_dmabuf slim2p; | 543 | struct lpfc_dmabuf slim2p; |
| 538 | 544 | ||
| 539 | MAILBOX_t *mbox; | 545 | MAILBOX_t *mbox; |
| @@ -551,6 +557,7 @@ struct lpfc_hba { | |||
| 551 | uint8_t fc_linkspeed; /* Link speed after last READ_LA */ | 557 | uint8_t fc_linkspeed; /* Link speed after last READ_LA */ |
| 552 | 558 | ||
| 553 | uint32_t fc_eventTag; /* event tag for link attention */ | 559 | uint32_t fc_eventTag; /* event tag for link attention */ |
| 560 | uint32_t link_events; | ||
| 554 | 561 | ||
| 555 | /* These fields used to be binfo */ | 562 | /* These fields used to be binfo */ |
| 556 | uint32_t fc_pref_DID; /* preferred D_ID */ | 563 | uint32_t fc_pref_DID; /* preferred D_ID */ |
| @@ -604,8 +611,8 @@ struct lpfc_hba { | |||
| 604 | uint32_t cfg_enable_hba_reset; | 611 | uint32_t cfg_enable_hba_reset; |
| 605 | uint32_t cfg_enable_hba_heartbeat; | 612 | uint32_t cfg_enable_hba_heartbeat; |
| 606 | uint32_t cfg_enable_bg; | 613 | uint32_t cfg_enable_bg; |
| 607 | uint32_t cfg_enable_fip; | ||
| 608 | uint32_t cfg_log_verbose; | 614 | uint32_t cfg_log_verbose; |
| 615 | uint32_t cfg_aer_support; | ||
| 609 | 616 | ||
| 610 | lpfc_vpd_t vpd; /* vital product data */ | 617 | lpfc_vpd_t vpd; /* vital product data */ |
| 611 | 618 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e1a30a16a9fa..91542f786edf 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -23,12 +23,14 @@ | |||
| 23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
| 24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
| 25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/aer.h> | ||
| 26 | 27 | ||
| 27 | #include <scsi/scsi.h> | 28 | #include <scsi/scsi.h> |
| 28 | #include <scsi/scsi_device.h> | 29 | #include <scsi/scsi_device.h> |
| 29 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
| 30 | #include <scsi/scsi_tcq.h> | 31 | #include <scsi/scsi_tcq.h> |
| 31 | #include <scsi/scsi_transport_fc.h> | 32 | #include <scsi/scsi_transport_fc.h> |
| 33 | #include <scsi/fc/fc_fs.h> | ||
| 32 | 34 | ||
| 33 | #include "lpfc_hw4.h" | 35 | #include "lpfc_hw4.h" |
| 34 | #include "lpfc_hw.h" | 36 | #include "lpfc_hw.h" |
| @@ -98,6 +100,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, | |||
| 98 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); | 100 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); |
| 99 | } | 101 | } |
| 100 | 102 | ||
| 103 | /** | ||
| 104 | * lpfc_enable_fip_show - Return the fip mode of the HBA | ||
| 105 | * @dev: class unused variable. | ||
| 106 | * @attr: device attribute, not used. | ||
| 107 | * @buf: on return contains the module description text. | ||
| 108 | * | ||
| 109 | * Returns: size of formatted string. | ||
| 110 | **/ | ||
| 111 | static ssize_t | ||
| 112 | lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, | ||
| 113 | char *buf) | ||
| 114 | { | ||
| 115 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 116 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
| 117 | struct lpfc_hba *phba = vport->phba; | ||
| 118 | |||
| 119 | if (phba->hba_flag & HBA_FIP_SUPPORT) | ||
| 120 | return snprintf(buf, PAGE_SIZE, "1\n"); | ||
| 121 | else | ||
| 122 | return snprintf(buf, PAGE_SIZE, "0\n"); | ||
| 123 | } | ||
| 124 | |||
| 101 | static ssize_t | 125 | static ssize_t |
| 102 | lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, | 126 | lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, |
| 103 | char *buf) | 127 | char *buf) |
| @@ -654,7 +678,7 @@ lpfc_selective_reset(struct lpfc_hba *phba) | |||
| 654 | * Notes: | 678 | * Notes: |
| 655 | * Assumes any error from lpfc_selective_reset() will be negative. | 679 | * Assumes any error from lpfc_selective_reset() will be negative. |
| 656 | * If lpfc_selective_reset() returns zero then the length of the buffer | 680 | * If lpfc_selective_reset() returns zero then the length of the buffer |
| 657 | * is returned which indicates succcess | 681 | * is returned which indicates success |
| 658 | * | 682 | * |
| 659 | * Returns: | 683 | * Returns: |
| 660 | * -EINVAL if the buffer does not contain the string "selective" | 684 | * -EINVAL if the buffer does not contain the string "selective" |
| @@ -762,9 +786,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, | |||
| 762 | } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) | 786 | } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) |
| 763 | status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); | 787 | status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); |
| 764 | else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) | 788 | else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) |
| 765 | status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); | 789 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 790 | return -EINVAL; | ||
| 791 | else | ||
| 792 | status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); | ||
| 766 | else if (strncmp(buf, "error", sizeof("error") - 1) == 0) | 793 | else if (strncmp(buf, "error", sizeof("error") - 1) == 0) |
| 767 | status = lpfc_do_offline(phba, LPFC_EVT_KILL); | 794 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 795 | return -EINVAL; | ||
| 796 | else | ||
| 797 | status = lpfc_do_offline(phba, LPFC_EVT_KILL); | ||
| 768 | else | 798 | else |
| 769 | return -EINVAL; | 799 | return -EINVAL; |
| 770 | 800 | ||
| @@ -1126,6 +1156,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
| 1126 | if ((val & 0x3) != val) | 1156 | if ((val & 0x3) != val) |
| 1127 | return -EINVAL; | 1157 | return -EINVAL; |
| 1128 | 1158 | ||
| 1159 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
| 1160 | val = 0; | ||
| 1161 | |||
| 1129 | spin_lock_irq(&phba->hbalock); | 1162 | spin_lock_irq(&phba->hbalock); |
| 1130 | 1163 | ||
| 1131 | old_val = phba->cfg_poll; | 1164 | old_val = phba->cfg_poll; |
| @@ -1589,6 +1622,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO, | |||
| 1589 | static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); | 1622 | static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); |
| 1590 | static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); | 1623 | static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); |
| 1591 | static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); | 1624 | static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); |
| 1625 | static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL); | ||
| 1592 | static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, | 1626 | static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, |
| 1593 | lpfc_board_mode_show, lpfc_board_mode_store); | 1627 | lpfc_board_mode_show, lpfc_board_mode_store); |
| 1594 | static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); | 1628 | static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); |
| @@ -2759,6 +2793,196 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, | |||
| 2759 | lpfc_link_speed_show, lpfc_link_speed_store); | 2793 | lpfc_link_speed_show, lpfc_link_speed_store); |
| 2760 | 2794 | ||
| 2761 | /* | 2795 | /* |
| 2796 | # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) | ||
| 2797 | # 0 = aer disabled or not supported | ||
| 2798 | # 1 = aer supported and enabled (default) | ||
| 2799 | # Value range is [0,1]. Default value is 1. | ||
| 2800 | */ | ||
| 2801 | |||
| 2802 | /** | ||
| 2803 | * lpfc_aer_support_store - Set the adapter for aer support | ||
| 2804 | * | ||
| 2805 | * @dev: class device that is converted into a Scsi_host. | ||
| 2806 | * @attr: device attribute, not used. | ||
| 2807 | * @buf: containing the string "selective". | ||
| 2808 | * @count: unused variable. | ||
| 2809 | * | ||
| 2810 | * Description: | ||
| 2811 | * If the val is 1 and currently the device's AER capability was not | ||
| 2812 | * enabled, invoke the kernel's enable AER helper routine, trying to | ||
| 2813 | * enable the device's AER capability. If the helper routine enabling | ||
| 2814 | * AER returns success, update the device's cfg_aer_support flag to | ||
| 2815 | * indicate AER is supported by the device; otherwise, if the device | ||
| 2816 | * AER capability is already enabled to support AER, then do nothing. | ||
| 2817 | * | ||
| 2818 | * If the val is 0 and currently the device's AER support was enabled, | ||
| 2819 | * invoke the kernel's disable AER helper routine. After that, update | ||
| 2820 | * the device's cfg_aer_support flag to indicate AER is not supported | ||
| 2821 | * by the device; otherwise, if the device AER capability is already | ||
| 2822 | * disabled from supporting AER, then do nothing. | ||
| 2823 | * | ||
| 2824 | * Returns: | ||
| 2825 | * length of the buf on success if val is in range the intended mode | ||
| 2826 | * is supported. | ||
| 2827 | * -EINVAL if val out of range or intended mode is not supported. | ||
| 2828 | **/ | ||
| 2829 | static ssize_t | ||
| 2830 | lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, | ||
| 2831 | const char *buf, size_t count) | ||
| 2832 | { | ||
| 2833 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 2834 | struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; | ||
| 2835 | struct lpfc_hba *phba = vport->phba; | ||
| 2836 | int val = 0, rc = -EINVAL; | ||
| 2837 | |||
| 2838 | /* AER not supported on OC devices yet */ | ||
| 2839 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) | ||
| 2840 | return -EPERM; | ||
| 2841 | if (!isdigit(buf[0])) | ||
| 2842 | return -EINVAL; | ||
| 2843 | if (sscanf(buf, "%i", &val) != 1) | ||
| 2844 | return -EINVAL; | ||
| 2845 | |||
| 2846 | switch (val) { | ||
| 2847 | case 0: | ||
| 2848 | if (phba->hba_flag & HBA_AER_ENABLED) { | ||
| 2849 | rc = pci_disable_pcie_error_reporting(phba->pcidev); | ||
| 2850 | if (!rc) { | ||
| 2851 | spin_lock_irq(&phba->hbalock); | ||
| 2852 | phba->hba_flag &= ~HBA_AER_ENABLED; | ||
| 2853 | spin_unlock_irq(&phba->hbalock); | ||
| 2854 | phba->cfg_aer_support = 0; | ||
| 2855 | rc = strlen(buf); | ||
| 2856 | } else | ||
| 2857 | rc = -EPERM; | ||
| 2858 | } else { | ||
| 2859 | phba->cfg_aer_support = 0; | ||
| 2860 | rc = strlen(buf); | ||
| 2861 | } | ||
| 2862 | break; | ||
| 2863 | case 1: | ||
| 2864 | if (!(phba->hba_flag & HBA_AER_ENABLED)) { | ||
| 2865 | rc = pci_enable_pcie_error_reporting(phba->pcidev); | ||
| 2866 | if (!rc) { | ||
| 2867 | spin_lock_irq(&phba->hbalock); | ||
| 2868 | phba->hba_flag |= HBA_AER_ENABLED; | ||
| 2869 | spin_unlock_irq(&phba->hbalock); | ||
| 2870 | phba->cfg_aer_support = 1; | ||
| 2871 | rc = strlen(buf); | ||
| 2872 | } else | ||
| 2873 | rc = -EPERM; | ||
| 2874 | } else { | ||
| 2875 | phba->cfg_aer_support = 1; | ||
| 2876 | rc = strlen(buf); | ||
| 2877 | } | ||
| 2878 | break; | ||
| 2879 | default: | ||
| 2880 | rc = -EINVAL; | ||
| 2881 | break; | ||
| 2882 | } | ||
| 2883 | return rc; | ||
| 2884 | } | ||
| 2885 | |||
| 2886 | static int lpfc_aer_support = 1; | ||
| 2887 | module_param(lpfc_aer_support, int, 1); | ||
| 2888 | MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); | ||
| 2889 | lpfc_param_show(aer_support) | ||
| 2890 | |||
| 2891 | /** | ||
| 2892 | * lpfc_aer_support_init - Set the initial adapters aer support flag | ||
| 2893 | * @phba: lpfc_hba pointer. | ||
| 2894 | * @val: link speed value. | ||
| 2895 | * | ||
| 2896 | * Description: | ||
| 2897 | * If val is in a valid range [0,1], then set the adapter's initial | ||
| 2898 | * cfg_aer_support field. It will be up to the driver's probe_one | ||
| 2899 | * routine to determine whether the device's AER support can be set | ||
| 2900 | * or not. | ||
| 2901 | * | ||
| 2902 | * Notes: | ||
| 2903 | * If the value is not in range log a kernel error message, and | ||
| 2904 | * choose the default value of setting AER support and return. | ||
| 2905 | * | ||
| 2906 | * Returns: | ||
| 2907 | * zero if val saved. | ||
| 2908 | * -EINVAL val out of range | ||
| 2909 | **/ | ||
| 2910 | static int | ||
| 2911 | lpfc_aer_support_init(struct lpfc_hba *phba, int val) | ||
| 2912 | { | ||
| 2913 | /* AER not supported on OC devices yet */ | ||
| 2914 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { | ||
| 2915 | phba->cfg_aer_support = 0; | ||
| 2916 | return -EPERM; | ||
| 2917 | } | ||
| 2918 | |||
| 2919 | if (val == 0 || val == 1) { | ||
| 2920 | phba->cfg_aer_support = val; | ||
| 2921 | return 0; | ||
| 2922 | } | ||
| 2923 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 2924 | "2712 lpfc_aer_support attribute value %d out " | ||
| 2925 | "of range, allowed values are 0|1, setting it " | ||
| 2926 | "to default value of 1\n", val); | ||
| 2927 | /* By default, try to enable AER on a device */ | ||
| 2928 | phba->cfg_aer_support = 1; | ||
| 2929 | return -EINVAL; | ||
| 2930 | } | ||
| 2931 | |||
| 2932 | static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, | ||
| 2933 | lpfc_aer_support_show, lpfc_aer_support_store); | ||
| 2934 | |||
| 2935 | /** | ||
| 2936 | * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device | ||
| 2937 | * @dev: class device that is converted into a Scsi_host. | ||
| 2938 | * @attr: device attribute, not used. | ||
| 2939 | * @buf: containing the string "selective". | ||
| 2940 | * @count: unused variable. | ||
| 2941 | * | ||
| 2942 | * Description: | ||
| 2943 | * If the @buf contains 1 and the device currently has the AER support | ||
| 2944 | * enabled, then invokes the kernel AER helper routine | ||
| 2945 | * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable | ||
| 2946 | * error status register. | ||
| 2947 | * | ||
| 2948 | * Notes: | ||
| 2949 | * | ||
| 2950 | * Returns: | ||
| 2951 | * -EINVAL if the buf does not contain the 1 or the device is not currently | ||
| 2952 | * enabled with the AER support. | ||
| 2953 | **/ | ||
| 2954 | static ssize_t | ||
| 2955 | lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, | ||
| 2956 | const char *buf, size_t count) | ||
| 2957 | { | ||
| 2958 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 2959 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
| 2960 | struct lpfc_hba *phba = vport->phba; | ||
| 2961 | int val, rc = -1; | ||
| 2962 | |||
| 2963 | /* AER not supported on OC devices yet */ | ||
| 2964 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) | ||
| 2965 | return -EPERM; | ||
| 2966 | if (!isdigit(buf[0])) | ||
| 2967 | return -EINVAL; | ||
| 2968 | if (sscanf(buf, "%i", &val) != 1) | ||
| 2969 | return -EINVAL; | ||
| 2970 | if (val != 1) | ||
| 2971 | return -EINVAL; | ||
| 2972 | |||
| 2973 | if (phba->hba_flag & HBA_AER_ENABLED) | ||
| 2974 | rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); | ||
| 2975 | |||
| 2976 | if (rc == 0) | ||
| 2977 | return strlen(buf); | ||
| 2978 | else | ||
| 2979 | return -EPERM; | ||
| 2980 | } | ||
| 2981 | |||
| 2982 | static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, | ||
| 2983 | lpfc_aer_cleanup_state); | ||
| 2984 | |||
| 2985 | /* | ||
| 2762 | # lpfc_fcp_class: Determines FC class to use for the FCP protocol. | 2986 | # lpfc_fcp_class: Determines FC class to use for the FCP protocol. |
| 2763 | # Value range is [2,3]. Default value is 3. | 2987 | # Value range is [2,3]. Default value is 3. |
| 2764 | */ | 2988 | */ |
| @@ -2846,7 +3070,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " | |||
| 2846 | # identifies what rctl value to configure the additional ring for. | 3070 | # identifies what rctl value to configure the additional ring for. |
| 2847 | # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). | 3071 | # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). |
| 2848 | */ | 3072 | */ |
| 2849 | LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, | 3073 | LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, |
| 2850 | 255, "Identifies RCTL for additional ring configuration"); | 3074 | 255, "Identifies RCTL for additional ring configuration"); |
| 2851 | 3075 | ||
| 2852 | /* | 3076 | /* |
| @@ -2854,7 +3078,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, | |||
| 2854 | # identifies what type value to configure the additional ring for. | 3078 | # identifies what type value to configure the additional ring for. |
| 2855 | # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). | 3079 | # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). |
| 2856 | */ | 3080 | */ |
| 2857 | LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1, | 3081 | LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, |
| 2858 | 255, "Identifies TYPE for additional ring configuration"); | 3082 | 255, "Identifies TYPE for additional ring configuration"); |
| 2859 | 3083 | ||
| 2860 | /* | 3084 | /* |
| @@ -2947,15 +3171,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); | |||
| 2947 | LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); | 3171 | LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); |
| 2948 | 3172 | ||
| 2949 | /* | 3173 | /* |
| 2950 | # lpfc_enable_fip: When set, FIP is required to start discovery. If not | ||
| 2951 | # set, the driver will add an FCF record manually if the port has no | ||
| 2952 | # FCF records available and start discovery. | ||
| 2953 | # Value range is [0,1]. Default value is 1 (enabled) | ||
| 2954 | */ | ||
| 2955 | LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery"); | ||
| 2956 | |||
| 2957 | |||
| 2958 | /* | ||
| 2959 | # lpfc_prot_mask: i | 3174 | # lpfc_prot_mask: i |
| 2960 | # - Bit mask of host protection capabilities used to register with the | 3175 | # - Bit mask of host protection capabilities used to register with the |
| 2961 | # SCSI mid-layer | 3176 | # SCSI mid-layer |
| @@ -3013,6 +3228,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
| 3013 | &dev_attr_num_discovered_ports, | 3228 | &dev_attr_num_discovered_ports, |
| 3014 | &dev_attr_menlo_mgmt_mode, | 3229 | &dev_attr_menlo_mgmt_mode, |
| 3015 | &dev_attr_lpfc_drvr_version, | 3230 | &dev_attr_lpfc_drvr_version, |
| 3231 | &dev_attr_lpfc_enable_fip, | ||
| 3016 | &dev_attr_lpfc_temp_sensor, | 3232 | &dev_attr_lpfc_temp_sensor, |
| 3017 | &dev_attr_lpfc_log_verbose, | 3233 | &dev_attr_lpfc_log_verbose, |
| 3018 | &dev_attr_lpfc_lun_queue_depth, | 3234 | &dev_attr_lpfc_lun_queue_depth, |
| @@ -3020,7 +3236,6 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
| 3020 | &dev_attr_lpfc_peer_port_login, | 3236 | &dev_attr_lpfc_peer_port_login, |
| 3021 | &dev_attr_lpfc_nodev_tmo, | 3237 | &dev_attr_lpfc_nodev_tmo, |
| 3022 | &dev_attr_lpfc_devloss_tmo, | 3238 | &dev_attr_lpfc_devloss_tmo, |
| 3023 | &dev_attr_lpfc_enable_fip, | ||
| 3024 | &dev_attr_lpfc_fcp_class, | 3239 | &dev_attr_lpfc_fcp_class, |
| 3025 | &dev_attr_lpfc_use_adisc, | 3240 | &dev_attr_lpfc_use_adisc, |
| 3026 | &dev_attr_lpfc_ack0, | 3241 | &dev_attr_lpfc_ack0, |
| @@ -3061,6 +3276,8 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
| 3061 | &dev_attr_lpfc_max_scsicmpl_time, | 3276 | &dev_attr_lpfc_max_scsicmpl_time, |
| 3062 | &dev_attr_lpfc_stat_data_ctrl, | 3277 | &dev_attr_lpfc_stat_data_ctrl, |
| 3063 | &dev_attr_lpfc_prot_sg_seg_cnt, | 3278 | &dev_attr_lpfc_prot_sg_seg_cnt, |
| 3279 | &dev_attr_lpfc_aer_support, | ||
| 3280 | &dev_attr_lpfc_aer_state_cleanup, | ||
| 3064 | NULL, | 3281 | NULL, |
| 3065 | }; | 3282 | }; |
| 3066 | 3283 | ||
| @@ -3073,7 +3290,6 @@ struct device_attribute *lpfc_vport_attrs[] = { | |||
| 3073 | &dev_attr_lpfc_lun_queue_depth, | 3290 | &dev_attr_lpfc_lun_queue_depth, |
| 3074 | &dev_attr_lpfc_nodev_tmo, | 3291 | &dev_attr_lpfc_nodev_tmo, |
| 3075 | &dev_attr_lpfc_devloss_tmo, | 3292 | &dev_attr_lpfc_devloss_tmo, |
| 3076 | &dev_attr_lpfc_enable_fip, | ||
| 3077 | &dev_attr_lpfc_hba_queue_depth, | 3293 | &dev_attr_lpfc_hba_queue_depth, |
| 3078 | &dev_attr_lpfc_peer_port_login, | 3294 | &dev_attr_lpfc_peer_port_login, |
| 3079 | &dev_attr_lpfc_restrict_login, | 3295 | &dev_attr_lpfc_restrict_login, |
| @@ -3147,7 +3363,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
| 3147 | * sysfs_ctlreg_read - Read method for reading from ctlreg | 3363 | * sysfs_ctlreg_read - Read method for reading from ctlreg |
| 3148 | * @kobj: kernel kobject that contains the kernel class device. | 3364 | * @kobj: kernel kobject that contains the kernel class device. |
| 3149 | * @bin_attr: kernel attributes passed to us. | 3365 | * @bin_attr: kernel attributes passed to us. |
| 3150 | * @buf: if succesful contains the data from the adapter IOREG space. | 3366 | * @buf: if successful contains the data from the adapter IOREG space. |
| 3151 | * @off: offset into buffer to beginning of data. | 3367 | * @off: offset into buffer to beginning of data. |
| 3152 | * @count: bytes to transfer. | 3368 | * @count: bytes to transfer. |
| 3153 | * | 3369 | * |
| @@ -3815,7 +4031,11 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
| 3815 | hs->invalid_crc_count -= lso->invalid_crc_count; | 4031 | hs->invalid_crc_count -= lso->invalid_crc_count; |
| 3816 | hs->error_frames -= lso->error_frames; | 4032 | hs->error_frames -= lso->error_frames; |
| 3817 | 4033 | ||
| 3818 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 4034 | if (phba->hba_flag & HBA_FCOE_SUPPORT) { |
| 4035 | hs->lip_count = -1; | ||
| 4036 | hs->nos_count = (phba->link_events >> 1); | ||
| 4037 | hs->nos_count -= lso->link_events; | ||
| 4038 | } else if (phba->fc_topology == TOPOLOGY_LOOP) { | ||
| 3819 | hs->lip_count = (phba->fc_eventTag >> 1); | 4039 | hs->lip_count = (phba->fc_eventTag >> 1); |
| 3820 | hs->lip_count -= lso->link_events; | 4040 | hs->lip_count -= lso->link_events; |
| 3821 | hs->nos_count = -1; | 4041 | hs->nos_count = -1; |
| @@ -3906,7 +4126,10 @@ lpfc_reset_stats(struct Scsi_Host *shost) | |||
| 3906 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; | 4126 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; |
| 3907 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | 4127 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; |
| 3908 | lso->error_frames = pmb->un.varRdLnk.crcCnt; | 4128 | lso->error_frames = pmb->un.varRdLnk.crcCnt; |
| 3909 | lso->link_events = (phba->fc_eventTag >> 1); | 4129 | if (phba->hba_flag & HBA_FCOE_SUPPORT) |
| 4130 | lso->link_events = (phba->link_events >> 1); | ||
| 4131 | else | ||
| 4132 | lso->link_events = (phba->fc_eventTag >> 1); | ||
| 3910 | 4133 | ||
| 3911 | psli->stats_start = get_seconds(); | 4134 | psli->stats_start = get_seconds(); |
| 3912 | 4135 | ||
| @@ -4222,14 +4445,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
| 4222 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); | 4445 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); |
| 4223 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); | 4446 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); |
| 4224 | lpfc_enable_bg_init(phba, lpfc_enable_bg); | 4447 | lpfc_enable_bg_init(phba, lpfc_enable_bg); |
| 4448 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
| 4449 | phba->cfg_poll = 0; | ||
| 4450 | else | ||
| 4225 | phba->cfg_poll = lpfc_poll; | 4451 | phba->cfg_poll = lpfc_poll; |
| 4226 | phba->cfg_soft_wwnn = 0L; | 4452 | phba->cfg_soft_wwnn = 0L; |
| 4227 | phba->cfg_soft_wwpn = 0L; | 4453 | phba->cfg_soft_wwpn = 0L; |
| 4228 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); | 4454 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); |
| 4229 | lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); | 4455 | lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); |
| 4230 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); | 4456 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); |
| 4231 | lpfc_enable_fip_init(phba, lpfc_enable_fip); | ||
| 4232 | lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); | 4457 | lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); |
| 4458 | lpfc_aer_support_init(phba, lpfc_aer_support); | ||
| 4233 | 4459 | ||
| 4234 | return; | 4460 | return; |
| 4235 | } | 4461 | } |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index da6bf5aac9dd..a5d9048235d9 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <scsi/scsi_host.h> | 26 | #include <scsi/scsi_host.h> |
| 27 | #include <scsi/scsi_transport_fc.h> | 27 | #include <scsi/scsi_transport_fc.h> |
| 28 | #include <scsi/scsi_bsg_fc.h> | 28 | #include <scsi/scsi_bsg_fc.h> |
| 29 | #include <scsi/fc/fc_fs.h> | ||
| 29 | 30 | ||
| 30 | #include "lpfc_hw4.h" | 31 | #include "lpfc_hw4.h" |
| 31 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
| @@ -148,8 +149,8 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) | |||
| 148 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; | 149 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; |
| 149 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); | 150 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); |
| 150 | cmd->un.genreq64.w5.hcsw.Dfctl = 0; | 151 | cmd->un.genreq64.w5.hcsw.Dfctl = 0; |
| 151 | cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; | 152 | cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; |
| 152 | cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; | 153 | cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; |
| 153 | cmd->ulpBdeCount = 1; | 154 | cmd->ulpBdeCount = 1; |
| 154 | cmd->ulpLe = 1; | 155 | cmd->ulpLe = 1; |
| 155 | cmd->ulpClass = CLASS3; | 156 | cmd->ulpClass = CLASS3; |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 0830f37409a3..650494d622c1 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
| @@ -49,6 +49,8 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | |||
| 49 | void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); | 49 | void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); |
| 50 | 50 | ||
| 51 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); | 51 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); |
| 52 | void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); | ||
| 53 | void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); | ||
| 52 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); | 54 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); |
| 53 | int lpfc_linkdown(struct lpfc_hba *); | 55 | int lpfc_linkdown(struct lpfc_hba *); |
| 54 | void lpfc_linkdown_port(struct lpfc_vport *); | 56 | void lpfc_linkdown_port(struct lpfc_vport *); |
| @@ -144,6 +146,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); | |||
| 144 | 146 | ||
| 145 | void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, | 147 | void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, |
| 146 | struct lpfc_iocbq *); | 148 | struct lpfc_iocbq *); |
| 149 | void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, | ||
| 150 | struct lpfc_iocbq *); | ||
| 147 | int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); | 151 | int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); |
| 148 | int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); | 152 | int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); |
| 149 | void lpfc_fdmi_tmo(unsigned long); | 153 | void lpfc_fdmi_tmo(unsigned long); |
| @@ -188,7 +192,7 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int); | |||
| 188 | void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); | 192 | void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); |
| 189 | void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); | 193 | void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); |
| 190 | void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); | 194 | void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); |
| 191 | void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); | 195 | void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *); |
| 192 | void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); | 196 | void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); |
| 193 | void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); | 197 | void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); |
| 194 | void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); | 198 | void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); |
| @@ -212,7 +216,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *); | |||
| 212 | void lpfc_poll_timeout(unsigned long ptr); | 216 | void lpfc_poll_timeout(unsigned long ptr); |
| 213 | void lpfc_poll_start_timer(struct lpfc_hba *); | 217 | void lpfc_poll_start_timer(struct lpfc_hba *); |
| 214 | void lpfc_poll_eratt(unsigned long); | 218 | void lpfc_poll_eratt(unsigned long); |
| 215 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); | 219 | int |
| 220 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, | ||
| 221 | struct lpfc_sli_ring *, uint32_t); | ||
| 222 | |||
| 216 | struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); | 223 | struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); |
| 217 | void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); | 224 | void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); |
| 218 | uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); | 225 | uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); |
| @@ -235,7 +242,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); | |||
| 235 | int lpfc_sli_check_eratt(struct lpfc_hba *); | 242 | int lpfc_sli_check_eratt(struct lpfc_hba *); |
| 236 | void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, | 243 | void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, |
| 237 | struct lpfc_sli_ring *, uint32_t); | 244 | struct lpfc_sli_ring *, uint32_t); |
| 238 | int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); | 245 | void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); |
| 239 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 246 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
| 240 | int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, | 247 | int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, |
| 241 | struct lpfc_iocbq *, uint32_t); | 248 | struct lpfc_iocbq *, uint32_t); |
| @@ -361,6 +368,7 @@ void lpfc_stop_port(struct lpfc_hba *); | |||
| 361 | void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); | 368 | void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); |
| 362 | int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | 369 | int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); |
| 363 | void lpfc_start_fdiscs(struct lpfc_hba *phba); | 370 | void lpfc_start_fdiscs(struct lpfc_hba *phba); |
| 371 | struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); | ||
| 364 | 372 | ||
| 365 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) | 373 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) |
| 366 | #define HBA_EVENT_RSCN 5 | 374 | #define HBA_EVENT_RSCN 5 |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9a1bd9534d74..0ebcd9baca79 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <scsi/scsi_device.h> | 31 | #include <scsi/scsi_device.h> |
| 32 | #include <scsi/scsi_host.h> | 32 | #include <scsi/scsi_host.h> |
| 33 | #include <scsi/scsi_transport_fc.h> | 33 | #include <scsi/scsi_transport_fc.h> |
| 34 | #include <scsi/fc/fc_fs.h> | ||
| 34 | 35 | ||
| 35 | #include "lpfc_hw4.h" | 36 | #include "lpfc_hw4.h" |
| 36 | #include "lpfc_hw.h" | 37 | #include "lpfc_hw.h" |
| @@ -87,7 +88,6 @@ void | |||
| 87 | lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 88 | lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 88 | struct lpfc_iocbq *piocbq) | 89 | struct lpfc_iocbq *piocbq) |
| 89 | { | 90 | { |
| 90 | |||
| 91 | struct lpfc_dmabuf *mp = NULL; | 91 | struct lpfc_dmabuf *mp = NULL; |
| 92 | IOCB_t *icmd = &piocbq->iocb; | 92 | IOCB_t *icmd = &piocbq->iocb; |
| 93 | int i; | 93 | int i; |
| @@ -160,6 +160,39 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 160 | } | 160 | } |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | /** | ||
| 164 | * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort | ||
| 165 | * @phba: Pointer to HBA context object. | ||
| 166 | * @pring: Pointer to the driver internal I/O ring. | ||
| 167 | * @piocbq: Pointer to the IOCBQ. | ||
| 168 | * | ||
| 169 | * This function serves as the default handler for the sli4 unsolicited | ||
| 170 | * abort event. It shall be invoked when there is no application interface | ||
| 171 | * registered unsolicited abort handler. This handler does nothing but | ||
| 172 | * just simply releases the dma buffer used by the unsol abort event. | ||
| 173 | **/ | ||
| 174 | void | ||
| 175 | lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, | ||
| 176 | struct lpfc_sli_ring *pring, | ||
| 177 | struct lpfc_iocbq *piocbq) | ||
| 178 | { | ||
| 179 | IOCB_t *icmd = &piocbq->iocb; | ||
| 180 | struct lpfc_dmabuf *bdeBuf; | ||
| 181 | uint32_t size; | ||
| 182 | |||
| 183 | /* Forward abort event to any process registered to receive ct event */ | ||
| 184 | lpfc_bsg_ct_unsol_event(phba, pring, piocbq); | ||
| 185 | |||
| 186 | /* If there is no BDE associated with IOCB, there is nothing to do */ | ||
| 187 | if (icmd->ulpBdeCount == 0) | ||
| 188 | return; | ||
| 189 | bdeBuf = piocbq->context2; | ||
| 190 | piocbq->context2 = NULL; | ||
| 191 | size = icmd->un.cont64[0].tus.f.bdeSize; | ||
| 192 | lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); | ||
| 193 | lpfc_in_buf_free(phba, bdeBuf); | ||
| 194 | } | ||
| 195 | |||
| 163 | static void | 196 | static void |
| 164 | lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) | 197 | lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) |
| 165 | { | 198 | { |
| @@ -304,8 +337,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
| 304 | /* Fill in rest of iocb */ | 337 | /* Fill in rest of iocb */ |
| 305 | icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); | 338 | icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); |
| 306 | icmd->un.genreq64.w5.hcsw.Dfctl = 0; | 339 | icmd->un.genreq64.w5.hcsw.Dfctl = 0; |
| 307 | icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; | 340 | icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; |
| 308 | icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; | 341 | icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; |
| 309 | 342 | ||
| 310 | if (!tmo) { | 343 | if (!tmo) { |
| 311 | /* FC spec states we need 3 * ratov for CT requests */ | 344 | /* FC spec states we need 3 * ratov for CT requests */ |
| @@ -363,9 +396,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, | |||
| 363 | outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); | 396 | outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); |
| 364 | if (!outmp) | 397 | if (!outmp) |
| 365 | return -ENOMEM; | 398 | return -ENOMEM; |
| 366 | 399 | /* | |
| 400 | * Form the CT IOCB. The total number of BDEs in this IOCB | ||
| 401 | * is the single command plus response count from | ||
| 402 | * lpfc_alloc_ct_rsp. | ||
| 403 | */ | ||
| 404 | cnt += 1; | ||
| 367 | status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, | 405 | status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, |
| 368 | cnt+1, 0, retry); | 406 | cnt, 0, retry); |
| 369 | if (status) { | 407 | if (status) { |
| 370 | lpfc_free_ct_rsp(phba, outmp); | 408 | lpfc_free_ct_rsp(phba, outmp); |
| 371 | return -ENOMEM; | 409 | return -ENOMEM; |
| @@ -501,6 +539,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) | |||
| 501 | SLI_CTNS_GFF_ID, | 539 | SLI_CTNS_GFF_ID, |
| 502 | 0, Did) == 0) | 540 | 0, Did) == 0) |
| 503 | vport->num_disc_nodes++; | 541 | vport->num_disc_nodes++; |
| 542 | else | ||
| 543 | lpfc_setup_disc_node | ||
| 544 | (vport, Did); | ||
| 504 | } | 545 | } |
| 505 | else { | 546 | else { |
| 506 | lpfc_debugfs_disc_trc(vport, | 547 | lpfc_debugfs_disc_trc(vport, |
| @@ -1209,7 +1250,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
| 1209 | be16_to_cpu(SLI_CTNS_RFF_ID); | 1250 | be16_to_cpu(SLI_CTNS_RFF_ID); |
| 1210 | CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); | 1251 | CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); |
| 1211 | CtReq->un.rff.fbits = FC4_FEATURE_INIT; | 1252 | CtReq->un.rff.fbits = FC4_FEATURE_INIT; |
| 1212 | CtReq->un.rff.type_code = FC_FCP_DATA; | 1253 | CtReq->un.rff.type_code = FC_TYPE_FCP; |
| 1213 | cmpl = lpfc_cmpl_ct_cmd_rff_id; | 1254 | cmpl = lpfc_cmpl_ct_cmd_rff_id; |
| 1214 | break; | 1255 | break; |
| 1215 | } | 1256 | } |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 8d0f0de76b63..391584183d81 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
| @@ -926,7 +926,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) | |||
| 926 | goto out; | 926 | goto out; |
| 927 | 927 | ||
| 928 | /* Round to page boundry */ | 928 | /* Round to page boundry */ |
| 929 | printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", | 929 | printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", |
| 930 | __func__, _dump_buf_data); | 930 | __func__, _dump_buf_data); |
| 931 | debug->buffer = _dump_buf_data; | 931 | debug->buffer = _dump_buf_data; |
| 932 | if (!debug->buffer) { | 932 | if (!debug->buffer) { |
| @@ -956,8 +956,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) | |||
| 956 | goto out; | 956 | goto out; |
| 957 | 957 | ||
| 958 | /* Round to page boundry */ | 958 | /* Round to page boundry */ |
| 959 | printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, | 959 | printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", |
| 960 | _dump_buf_dif, file->f_dentry->d_name.name); | 960 | __func__, _dump_buf_dif, file->f_dentry->d_name.name); |
| 961 | debug->buffer = _dump_buf_dif; | 961 | debug->buffer = _dump_buf_dif; |
| 962 | if (!debug->buffer) { | 962 | if (!debug->buffer) { |
| 963 | kfree(debug); | 963 | kfree(debug); |
| @@ -1377,7 +1377,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
| 1377 | debugfs_create_dir(name, phba->hba_debugfs_root); | 1377 | debugfs_create_dir(name, phba->hba_debugfs_root); |
| 1378 | if (!vport->vport_debugfs_root) { | 1378 | if (!vport->vport_debugfs_root) { |
| 1379 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1379 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
| 1380 | "0417 Cant create debugfs"); | 1380 | "0417 Cant create debugfs\n"); |
| 1381 | goto debug_failed; | 1381 | goto debug_failed; |
| 1382 | } | 1382 | } |
| 1383 | atomic_inc(&phba->debugfs_vport_count); | 1383 | atomic_inc(&phba->debugfs_vport_count); |
| @@ -1430,7 +1430,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
| 1430 | vport, &lpfc_debugfs_op_nodelist); | 1430 | vport, &lpfc_debugfs_op_nodelist); |
| 1431 | if (!vport->debug_nodelist) { | 1431 | if (!vport->debug_nodelist) { |
| 1432 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1432 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
| 1433 | "0409 Cant create debugfs nodelist"); | 1433 | "0409 Cant create debugfs nodelist\n"); |
| 1434 | goto debug_failed; | 1434 | goto debug_failed; |
| 1435 | } | 1435 | } |
| 1436 | debug_failed: | 1436 | debug_failed: |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 1142070e9484..2851d75ffc6f 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | *******************************************************************/ | 19 | *******************************************************************/ |
| 20 | 20 | ||
| 21 | #define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ | 21 | #define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ |
| 22 | #define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ | 22 | #define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */ |
| 23 | #define FC_MAXLOOP 126 /* max devices supported on a fc loop */ | 23 | #define FC_MAXLOOP 126 /* max devices supported on a fc loop */ |
| 24 | #define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ | 24 | #define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ |
| 25 | 25 | ||
| @@ -105,8 +105,6 @@ struct lpfc_nodelist { | |||
| 105 | struct lpfc_vport *vport; | 105 | struct lpfc_vport *vport; |
| 106 | struct lpfc_work_evt els_retry_evt; | 106 | struct lpfc_work_evt els_retry_evt; |
| 107 | struct lpfc_work_evt dev_loss_evt; | 107 | struct lpfc_work_evt dev_loss_evt; |
| 108 | unsigned long last_ramp_up_time; /* jiffy of last ramp up */ | ||
| 109 | unsigned long last_q_full_time; /* jiffy of last queue full */ | ||
| 110 | struct kref kref; | 108 | struct kref kref; |
| 111 | atomic_t cmd_pending; | 109 | atomic_t cmd_pending; |
| 112 | uint32_t cmd_qdepth; | 110 | uint32_t cmd_qdepth; |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 45337cd23feb..ce522702a6c1 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
| @@ -173,13 +173,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
| 173 | * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. | 173 | * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. |
| 174 | */ | 174 | */ |
| 175 | if ((did == Fabric_DID) && | 175 | if ((did == Fabric_DID) && |
| 176 | bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && | 176 | (phba->hba_flag & HBA_FIP_SUPPORT) && |
| 177 | ((elscmd == ELS_CMD_FLOGI) || | 177 | ((elscmd == ELS_CMD_FLOGI) || |
| 178 | (elscmd == ELS_CMD_FDISC) || | 178 | (elscmd == ELS_CMD_FDISC) || |
| 179 | (elscmd == ELS_CMD_LOGO))) | 179 | (elscmd == ELS_CMD_LOGO))) |
| 180 | elsiocb->iocb_flag |= LPFC_FIP_ELS; | 180 | switch (elscmd) { |
| 181 | case ELS_CMD_FLOGI: | ||
| 182 | elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) | ||
| 183 | & LPFC_FIP_ELS_ID_MASK); | ||
| 184 | break; | ||
| 185 | case ELS_CMD_FDISC: | ||
| 186 | elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) | ||
| 187 | & LPFC_FIP_ELS_ID_MASK); | ||
| 188 | break; | ||
| 189 | case ELS_CMD_LOGO: | ||
| 190 | elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) | ||
| 191 | & LPFC_FIP_ELS_ID_MASK); | ||
| 192 | break; | ||
| 193 | } | ||
| 181 | else | 194 | else |
| 182 | elsiocb->iocb_flag &= ~LPFC_FIP_ELS; | 195 | elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; |
| 183 | 196 | ||
| 184 | icmd = &elsiocb->iocb; | 197 | icmd = &elsiocb->iocb; |
| 185 | 198 | ||
| @@ -591,7 +604,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
| 591 | } else { | 604 | } else { |
| 592 | ndlp->nlp_type |= NLP_FABRIC; | 605 | ndlp->nlp_type |= NLP_FABRIC; |
| 593 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 606 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
| 594 | if (vport->vfi_state & LPFC_VFI_REGISTERED) { | 607 | if (vport->vpi_state & LPFC_VPI_REGISTERED) { |
| 595 | lpfc_start_fdiscs(phba); | 608 | lpfc_start_fdiscs(phba); |
| 596 | lpfc_do_scr_ns_plogi(phba, vport); | 609 | lpfc_do_scr_ns_plogi(phba, vport); |
| 597 | } else | 610 | } else |
| @@ -802,7 +815,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 802 | 815 | ||
| 803 | /* FLOGI completes successfully */ | 816 | /* FLOGI completes successfully */ |
| 804 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | 817 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| 805 | "0101 FLOGI completes sucessfully " | 818 | "0101 FLOGI completes successfully " |
| 806 | "Data: x%x x%x x%x x%x\n", | 819 | "Data: x%x x%x x%x x%x\n", |
| 807 | irsp->un.ulpWord[4], sp->cmn.e_d_tov, | 820 | irsp->un.ulpWord[4], sp->cmn.e_d_tov, |
| 808 | sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); | 821 | sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); |
| @@ -2452,6 +2465,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) | |||
| 2452 | */ | 2465 | */ |
| 2453 | del_timer_sync(&ndlp->nlp_delayfunc); | 2466 | del_timer_sync(&ndlp->nlp_delayfunc); |
| 2454 | retry = ndlp->nlp_retry; | 2467 | retry = ndlp->nlp_retry; |
| 2468 | ndlp->nlp_retry = 0; | ||
| 2455 | 2469 | ||
| 2456 | switch (cmd) { | 2470 | switch (cmd) { |
| 2457 | case ELS_CMD_FLOGI: | 2471 | case ELS_CMD_FLOGI: |
| @@ -2711,12 +2725,16 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 2711 | !lpfc_error_lost_link(irsp)) { | 2725 | !lpfc_error_lost_link(irsp)) { |
| 2712 | /* FLOGI retry policy */ | 2726 | /* FLOGI retry policy */ |
| 2713 | retry = 1; | 2727 | retry = 1; |
| 2714 | maxretry = 48; | 2728 | /* retry forever */ |
| 2715 | if (cmdiocb->retry >= 32) | 2729 | maxretry = 0; |
| 2730 | if (cmdiocb->retry >= 100) | ||
| 2731 | delay = 5000; | ||
| 2732 | else if (cmdiocb->retry >= 32) | ||
| 2716 | delay = 1000; | 2733 | delay = 1000; |
| 2717 | } | 2734 | } |
| 2718 | 2735 | ||
| 2719 | if ((++cmdiocb->retry) >= maxretry) { | 2736 | cmdiocb->retry++; |
| 2737 | if (maxretry && (cmdiocb->retry >= maxretry)) { | ||
| 2720 | phba->fc_stat.elsRetryExceeded++; | 2738 | phba->fc_stat.elsRetryExceeded++; |
| 2721 | retry = 0; | 2739 | retry = 0; |
| 2722 | } | 2740 | } |
| @@ -4133,7 +4151,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
| 4133 | /* Indicate we are walking fc_rscn_id_list on this vport */ | 4151 | /* Indicate we are walking fc_rscn_id_list on this vport */ |
| 4134 | vport->fc_rscn_flush = 1; | 4152 | vport->fc_rscn_flush = 1; |
| 4135 | spin_unlock_irq(shost->host_lock); | 4153 | spin_unlock_irq(shost->host_lock); |
| 4136 | /* Get the array count after sucessfully have the token */ | 4154 | /* Get the array count after successfully have the token */ |
| 4137 | rscn_cnt = vport->fc_rscn_id_cnt; | 4155 | rscn_cnt = vport->fc_rscn_id_cnt; |
| 4138 | /* If we are already processing an RSCN, save the received | 4156 | /* If we are already processing an RSCN, save the received |
| 4139 | * RSCN payload buffer, cmdiocb->context2 to process later. | 4157 | * RSCN payload buffer, cmdiocb->context2 to process later. |
| @@ -4503,6 +4521,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
| 4503 | } | 4521 | } |
| 4504 | 4522 | ||
| 4505 | /** | 4523 | /** |
| 4524 | * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb | ||
| 4525 | * @vport: pointer to a host virtual N_Port data structure. | ||
| 4526 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
| 4527 | * @ndlp: pointer to a node-list data structure. | ||
| 4528 | * | ||
| 4529 | * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB | ||
| 4530 | * received as an ELS unsolicited event. A request to RRQ shall only | ||
| 4531 | * be accepted if the Originator Nx_Port N_Port_ID or the Responder | ||
| 4532 | * Nx_Port N_Port_ID of the target Exchange is the same as the | ||
| 4533 | * N_Port_ID of the Nx_Port that makes the request. If the RRQ is | ||
| 4534 | * not accepted, an LS_RJT with reason code "Unable to perform | ||
| 4535 | * command request" and reason code explanation "Invalid Originator | ||
| 4536 | * S_ID" shall be returned. For now, we just unconditionally accept | ||
| 4537 | * RRQ from the target. | ||
| 4538 | **/ | ||
| 4539 | static void | ||
| 4540 | lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | ||
| 4541 | struct lpfc_nodelist *ndlp) | ||
| 4542 | { | ||
| 4543 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | ||
| 4544 | } | ||
| 4545 | |||
| 4546 | /** | ||
| 4506 | * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd | 4547 | * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd |
| 4507 | * @phba: pointer to lpfc hba data structure. | 4548 | * @phba: pointer to lpfc hba data structure. |
| 4508 | * @pmb: pointer to the driver internal queue element for mailbox command. | 4549 | * @pmb: pointer to the driver internal queue element for mailbox command. |
| @@ -5396,7 +5437,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 5396 | if (lpfc_els_chk_latt(vport)) | 5437 | if (lpfc_els_chk_latt(vport)) |
| 5397 | goto dropit; | 5438 | goto dropit; |
| 5398 | 5439 | ||
| 5399 | /* Ignore traffic recevied during vport shutdown. */ | 5440 | /* Ignore traffic received during vport shutdown. */ |
| 5400 | if (vport->load_flag & FC_UNLOADING) | 5441 | if (vport->load_flag & FC_UNLOADING) |
| 5401 | goto dropit; | 5442 | goto dropit; |
| 5402 | 5443 | ||
| @@ -5618,6 +5659,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 5618 | if (newnode) | 5659 | if (newnode) |
| 5619 | lpfc_nlp_put(ndlp); | 5660 | lpfc_nlp_put(ndlp); |
| 5620 | break; | 5661 | break; |
| 5662 | case ELS_CMD_RRQ: | ||
| 5663 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | ||
| 5664 | "RCV RRQ: did:x%x/ste:x%x flg:x%x", | ||
| 5665 | did, vport->port_state, ndlp->nlp_flag); | ||
| 5666 | |||
| 5667 | phba->fc_stat.elsRcvRRQ++; | ||
| 5668 | lpfc_els_rcv_rrq(vport, elsiocb, ndlp); | ||
| 5669 | if (newnode) | ||
| 5670 | lpfc_nlp_put(ndlp); | ||
| 5671 | break; | ||
| 5621 | default: | 5672 | default: |
| 5622 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 5673 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
| 5623 | "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", | 5674 | "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", |
| @@ -5670,7 +5721,7 @@ dropit: | |||
| 5670 | * NULL - No vport with the matching @vpi found | 5721 | * NULL - No vport with the matching @vpi found |
| 5671 | * Otherwise - Address to the vport with the matching @vpi. | 5722 | * Otherwise - Address to the vport with the matching @vpi. |
| 5672 | **/ | 5723 | **/ |
| 5673 | static struct lpfc_vport * | 5724 | struct lpfc_vport * |
| 5674 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) | 5725 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) |
| 5675 | { | 5726 | { |
| 5676 | struct lpfc_vport *vport; | 5727 | struct lpfc_vport *vport; |
| @@ -6024,11 +6075,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 6024 | irsp->ulpStatus, irsp->un.ulpWord[4]); | 6075 | irsp->ulpStatus, irsp->un.ulpWord[4]); |
| 6025 | goto fdisc_failed; | 6076 | goto fdisc_failed; |
| 6026 | } | 6077 | } |
| 6027 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) | ||
| 6028 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
| 6029 | lpfc_nlp_put(ndlp); | ||
| 6030 | /* giving up on FDISC. Cancel discovery timer */ | ||
| 6031 | lpfc_can_disctmo(vport); | ||
| 6032 | spin_lock_irq(shost->host_lock); | 6078 | spin_lock_irq(shost->host_lock); |
| 6033 | vport->fc_flag |= FC_FABRIC; | 6079 | vport->fc_flag |= FC_FABRIC; |
| 6034 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) | 6080 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) |
| @@ -6107,6 +6153,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
| 6107 | int did = ndlp->nlp_DID; | 6153 | int did = ndlp->nlp_DID; |
| 6108 | int rc; | 6154 | int rc; |
| 6109 | 6155 | ||
| 6156 | vport->port_state = LPFC_FDISC; | ||
| 6110 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); | 6157 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); |
| 6111 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, | 6158 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, |
| 6112 | ELS_CMD_FDISC); | 6159 | ELS_CMD_FDISC); |
| @@ -6172,7 +6219,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
| 6172 | return 1; | 6219 | return 1; |
| 6173 | } | 6220 | } |
| 6174 | lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); | 6221 | lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); |
| 6175 | vport->port_state = LPFC_FDISC; | ||
| 6176 | return 0; | 6222 | return 0; |
| 6177 | } | 6223 | } |
| 6178 | 6224 | ||
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e6a47e25b218..3b9424427652 100644..100755 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
| @@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
| 525 | spin_unlock_irq(&phba->hbalock); | 525 | spin_unlock_irq(&phba->hbalock); |
| 526 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); | 526 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); |
| 527 | } | 527 | } |
| 528 | if (phba->hba_flag & HBA_RECEIVE_BUFFER) | ||
| 529 | lpfc_sli4_handle_received_buffer(phba); | ||
| 530 | } | 528 | } |
| 531 | 529 | ||
| 532 | vports = lpfc_create_vport_work_array(phba); | 530 | vports = lpfc_create_vport_work_array(phba); |
| @@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
| 568 | pring = &phba->sli.ring[LPFC_ELS_RING]; | 566 | pring = &phba->sli.ring[LPFC_ELS_RING]; |
| 569 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); | 567 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); |
| 570 | status >>= (4*LPFC_ELS_RING); | 568 | status >>= (4*LPFC_ELS_RING); |
| 571 | if ((status & HA_RXMASK) | 569 | if ((status & HA_RXMASK) || |
| 572 | || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { | 570 | (pring->flag & LPFC_DEFERRED_RING_EVENT) || |
| 571 | (phba->hba_flag & HBA_SP_QUEUE_EVT)) { | ||
| 573 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { | 572 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
| 574 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | 573 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
| 575 | /* Set the lpfc data pending flag */ | 574 | /* Set the lpfc data pending flag */ |
| @@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
| 688 | lpfc_unreg_rpi(vport, ndlp); | 687 | lpfc_unreg_rpi(vport, ndlp); |
| 689 | 688 | ||
| 690 | /* Leave Fabric nodes alone on link down */ | 689 | /* Leave Fabric nodes alone on link down */ |
| 691 | if (!remove && ndlp->nlp_type & NLP_FABRIC) | 690 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
| 691 | (!remove && ndlp->nlp_type & NLP_FABRIC)) | ||
| 692 | continue; | 692 | continue; |
| 693 | rc = lpfc_disc_state_machine(vport, ndlp, NULL, | 693 | rc = lpfc_disc_state_machine(vport, ndlp, NULL, |
| 694 | remove | 694 | remove |
| @@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
| 706 | void | 706 | void |
| 707 | lpfc_port_link_failure(struct lpfc_vport *vport) | 707 | lpfc_port_link_failure(struct lpfc_vport *vport) |
| 708 | { | 708 | { |
| 709 | /* Cleanup any outstanding received buffers */ | ||
| 710 | lpfc_cleanup_rcv_buffers(vport); | ||
| 711 | |||
| 709 | /* Cleanup any outstanding RSCN activity */ | 712 | /* Cleanup any outstanding RSCN activity */ |
| 710 | lpfc_els_flush_rscn(vport); | 713 | lpfc_els_flush_rscn(vport); |
| 711 | 714 | ||
| @@ -1015,13 +1018,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 1015 | mempool_free(mboxq, phba->mbox_mem_pool); | 1018 | mempool_free(mboxq, phba->mbox_mem_pool); |
| 1016 | return; | 1019 | return; |
| 1017 | } | 1020 | } |
| 1018 | if (vport->port_state != LPFC_FLOGI) { | 1021 | spin_lock_irqsave(&phba->hbalock, flags); |
| 1019 | spin_lock_irqsave(&phba->hbalock, flags); | 1022 | phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); |
| 1020 | phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); | 1023 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; |
| 1021 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1024 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 1022 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1025 | if (vport->port_state != LPFC_FLOGI) |
| 1023 | lpfc_initial_flogi(vport); | 1026 | lpfc_initial_flogi(vport); |
| 1024 | } | ||
| 1025 | 1027 | ||
| 1026 | mempool_free(mboxq, phba->mbox_mem_pool); | 1028 | mempool_free(mboxq, phba->mbox_mem_pool); |
| 1027 | return; | 1029 | return; |
| @@ -1199,6 +1201,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
| 1199 | 1201 | ||
| 1200 | /* If the FCF is not availabe do nothing. */ | 1202 | /* If the FCF is not availabe do nothing. */ |
| 1201 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { | 1203 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { |
| 1204 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
| 1202 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1205 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 1203 | return; | 1206 | return; |
| 1204 | } | 1207 | } |
| @@ -1216,15 +1219,23 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
| 1216 | 1219 | ||
| 1217 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, | 1220 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, |
| 1218 | GFP_KERNEL); | 1221 | GFP_KERNEL); |
| 1219 | if (!fcf_mbxq) | 1222 | if (!fcf_mbxq) { |
| 1223 | spin_lock_irqsave(&phba->hbalock, flags); | ||
| 1224 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
| 1225 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
| 1220 | return; | 1226 | return; |
| 1227 | } | ||
| 1221 | 1228 | ||
| 1222 | lpfc_reg_fcfi(phba, fcf_mbxq); | 1229 | lpfc_reg_fcfi(phba, fcf_mbxq); |
| 1223 | fcf_mbxq->vport = phba->pport; | 1230 | fcf_mbxq->vport = phba->pport; |
| 1224 | fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; | 1231 | fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; |
| 1225 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); | 1232 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); |
| 1226 | if (rc == MBX_NOT_FINISHED) | 1233 | if (rc == MBX_NOT_FINISHED) { |
| 1234 | spin_lock_irqsave(&phba->hbalock, flags); | ||
| 1235 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
| 1236 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
| 1227 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); | 1237 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); |
| 1238 | } | ||
| 1228 | 1239 | ||
| 1229 | return; | 1240 | return; |
| 1230 | } | 1241 | } |
| @@ -1253,13 +1264,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
| 1253 | uint16_t *vlan_id) | 1264 | uint16_t *vlan_id) |
| 1254 | { | 1265 | { |
| 1255 | struct lpfc_fcf_conn_entry *conn_entry; | 1266 | struct lpfc_fcf_conn_entry *conn_entry; |
| 1267 | int i, j, fcf_vlan_id = 0; | ||
| 1268 | |||
| 1269 | /* Find the lowest VLAN id in the FCF record */ | ||
| 1270 | for (i = 0; i < 512; i++) { | ||
| 1271 | if (new_fcf_record->vlan_bitmap[i]) { | ||
| 1272 | fcf_vlan_id = i * 8; | ||
| 1273 | j = 0; | ||
| 1274 | while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { | ||
| 1275 | j++; | ||
| 1276 | fcf_vlan_id++; | ||
| 1277 | } | ||
| 1278 | break; | ||
| 1279 | } | ||
| 1280 | } | ||
| 1256 | 1281 | ||
| 1257 | /* If FCF not available return 0 */ | 1282 | /* If FCF not available return 0 */ |
| 1258 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || | 1283 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || |
| 1259 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) | 1284 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) |
| 1260 | return 0; | 1285 | return 0; |
| 1261 | 1286 | ||
| 1262 | if (!phba->cfg_enable_fip) { | 1287 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
| 1263 | *boot_flag = 0; | 1288 | *boot_flag = 0; |
| 1264 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, | 1289 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
| 1265 | new_fcf_record); | 1290 | new_fcf_record); |
| @@ -1286,7 +1311,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
| 1286 | if (*addr_mode & LPFC_FCF_FPMA) | 1311 | if (*addr_mode & LPFC_FCF_FPMA) |
| 1287 | *addr_mode = LPFC_FCF_FPMA; | 1312 | *addr_mode = LPFC_FCF_FPMA; |
| 1288 | 1313 | ||
| 1289 | *vlan_id = 0xFFFF; | 1314 | /* If FCF record report a vlan id use that vlan id */ |
| 1315 | if (fcf_vlan_id) | ||
| 1316 | *vlan_id = fcf_vlan_id; | ||
| 1317 | else | ||
| 1318 | *vlan_id = 0xFFFF; | ||
| 1290 | return 1; | 1319 | return 1; |
| 1291 | } | 1320 | } |
| 1292 | 1321 | ||
| @@ -1384,8 +1413,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
| 1384 | (*addr_mode & LPFC_FCF_FPMA)) | 1413 | (*addr_mode & LPFC_FCF_FPMA)) |
| 1385 | *addr_mode = LPFC_FCF_FPMA; | 1414 | *addr_mode = LPFC_FCF_FPMA; |
| 1386 | 1415 | ||
| 1416 | /* If matching connect list has a vlan id, use it */ | ||
| 1387 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) | 1417 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) |
| 1388 | *vlan_id = conn_entry->conn_rec.vlan_tag; | 1418 | *vlan_id = conn_entry->conn_rec.vlan_tag; |
| 1419 | /* | ||
| 1420 | * If no vlan id is specified in connect list, use the vlan id | ||
| 1421 | * in the FCF record | ||
| 1422 | */ | ||
| 1423 | else if (fcf_vlan_id) | ||
| 1424 | *vlan_id = fcf_vlan_id; | ||
| 1389 | else | 1425 | else |
| 1390 | *vlan_id = 0xFFFF; | 1426 | *vlan_id = 0xFFFF; |
| 1391 | 1427 | ||
| @@ -1423,6 +1459,15 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
| 1423 | 1459 | ||
| 1424 | if (phba->link_state >= LPFC_LINK_UP) | 1460 | if (phba->link_state >= LPFC_LINK_UP) |
| 1425 | lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); | 1461 | lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); |
| 1462 | else { | ||
| 1463 | /* | ||
| 1464 | * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS | ||
| 1465 | * flag | ||
| 1466 | */ | ||
| 1467 | spin_lock_irq(&phba->hbalock); | ||
| 1468 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
| 1469 | spin_unlock_irq(&phba->hbalock); | ||
| 1470 | } | ||
| 1426 | 1471 | ||
| 1427 | if (unreg_fcf) { | 1472 | if (unreg_fcf) { |
| 1428 | spin_lock_irq(&phba->hbalock); | 1473 | spin_lock_irq(&phba->hbalock); |
| @@ -1659,9 +1704,8 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 1659 | lpfc_initial_fdisc(vport); | 1704 | lpfc_initial_fdisc(vport); |
| 1660 | else { | 1705 | else { |
| 1661 | lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); | 1706 | lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); |
| 1662 | lpfc_printf_vlog(vport, KERN_ERR, | 1707 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| 1663 | LOG_ELS, | 1708 | "2606 No NPIV Fabric support\n"); |
| 1664 | "2606 No NPIV Fabric support\n"); | ||
| 1665 | } | 1709 | } |
| 1666 | return; | 1710 | return; |
| 1667 | } | 1711 | } |
| @@ -1756,8 +1800,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 1756 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 1800 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
| 1757 | goto fail_free_mem; | 1801 | goto fail_free_mem; |
| 1758 | } | 1802 | } |
| 1759 | /* Mark the vport has registered with its VFI */ | 1803 | /* The VPI is implicitly registered when the VFI is registered */ |
| 1760 | vport->vfi_state |= LPFC_VFI_REGISTERED; | 1804 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
| 1761 | 1805 | ||
| 1762 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { | 1806 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
| 1763 | lpfc_start_fdiscs(phba); | 1807 | lpfc_start_fdiscs(phba); |
| @@ -1861,7 +1905,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
| 1861 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 1905 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
| 1862 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; | 1906 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
| 1863 | 1907 | ||
| 1864 | if (phba->cfg_enable_npiv) | 1908 | /* if npiv is enabled and this adapter supports npiv log |
| 1909 | * a message that npiv is not supported in this topology | ||
| 1910 | */ | ||
| 1911 | if (phba->cfg_enable_npiv && phba->max_vpi) | ||
| 1865 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1912 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
| 1866 | "1309 Link Up Event npiv not supported in loop " | 1913 | "1309 Link Up Event npiv not supported in loop " |
| 1867 | "topology\n"); | 1914 | "topology\n"); |
| @@ -1955,7 +2002,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
| 1955 | * is phase 1 implementation that support FCF index 0 and driver | 2002 | * is phase 1 implementation that support FCF index 0 and driver |
| 1956 | * defaults. | 2003 | * defaults. |
| 1957 | */ | 2004 | */ |
| 1958 | if (phba->cfg_enable_fip == 0) { | 2005 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
| 1959 | fcf_record = kzalloc(sizeof(struct fcf_record), | 2006 | fcf_record = kzalloc(sizeof(struct fcf_record), |
| 1960 | GFP_KERNEL); | 2007 | GFP_KERNEL); |
| 1961 | if (unlikely(!fcf_record)) { | 2008 | if (unlikely(!fcf_record)) { |
| @@ -2085,6 +2132,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
| 2085 | else | 2132 | else |
| 2086 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; | 2133 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; |
| 2087 | 2134 | ||
| 2135 | phba->link_events++; | ||
| 2088 | if (la->attType == AT_LINK_UP && (!la->mm)) { | 2136 | if (la->attType == AT_LINK_UP && (!la->mm)) { |
| 2089 | phba->fc_stat.LinkUp++; | 2137 | phba->fc_stat.LinkUp++; |
| 2090 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 2138 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
| @@ -2211,13 +2259,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
| 2211 | mb->mbxStatus); | 2259 | mb->mbxStatus); |
| 2212 | break; | 2260 | break; |
| 2213 | } | 2261 | } |
| 2262 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; | ||
| 2214 | vport->unreg_vpi_cmpl = VPORT_OK; | 2263 | vport->unreg_vpi_cmpl = VPORT_OK; |
| 2215 | mempool_free(pmb, phba->mbox_mem_pool); | 2264 | mempool_free(pmb, phba->mbox_mem_pool); |
| 2216 | /* | 2265 | /* |
| 2217 | * This shost reference might have been taken at the beginning of | 2266 | * This shost reference might have been taken at the beginning of |
| 2218 | * lpfc_vport_delete() | 2267 | * lpfc_vport_delete() |
| 2219 | */ | 2268 | */ |
| 2220 | if (vport->load_flag & FC_UNLOADING) | 2269 | if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) |
| 2221 | scsi_host_put(shost); | 2270 | scsi_host_put(shost); |
| 2222 | } | 2271 | } |
| 2223 | 2272 | ||
| @@ -2268,6 +2317,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
| 2268 | goto out; | 2317 | goto out; |
| 2269 | } | 2318 | } |
| 2270 | 2319 | ||
| 2320 | vport->vpi_state |= LPFC_VPI_REGISTERED; | ||
| 2271 | vport->num_disc_nodes = 0; | 2321 | vport->num_disc_nodes = 0; |
| 2272 | /* go thru NPR list and issue ELS PLOGIs */ | 2322 | /* go thru NPR list and issue ELS PLOGIs */ |
| 2273 | if (vport->fc_npr_cnt) | 2323 | if (vport->fc_npr_cnt) |
| @@ -3077,7 +3127,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
| 3077 | struct lpfc_sli *psli; | 3127 | struct lpfc_sli *psli; |
| 3078 | struct lpfc_sli_ring *pring; | 3128 | struct lpfc_sli_ring *pring; |
| 3079 | struct lpfc_iocbq *iocb, *next_iocb; | 3129 | struct lpfc_iocbq *iocb, *next_iocb; |
| 3080 | uint32_t rpi, i; | 3130 | uint32_t i; |
| 3081 | 3131 | ||
| 3082 | lpfc_fabric_abort_nport(ndlp); | 3132 | lpfc_fabric_abort_nport(ndlp); |
| 3083 | 3133 | ||
| @@ -3086,7 +3136,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
| 3086 | * by firmware with a no rpi error. | 3136 | * by firmware with a no rpi error. |
| 3087 | */ | 3137 | */ |
| 3088 | psli = &phba->sli; | 3138 | psli = &phba->sli; |
| 3089 | rpi = ndlp->nlp_rpi; | ||
| 3090 | if (ndlp->nlp_flag & NLP_RPI_VALID) { | 3139 | if (ndlp->nlp_flag & NLP_RPI_VALID) { |
| 3091 | /* Now process each ring */ | 3140 | /* Now process each ring */ |
| 3092 | for (i = 0; i < psli->num_rings; i++) { | 3141 | for (i = 0; i < psli->num_rings; i++) { |
| @@ -4322,6 +4371,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) | |||
| 4322 | ret = 1; | 4371 | ret = 1; |
| 4323 | spin_unlock_irq(shost->host_lock); | 4372 | spin_unlock_irq(shost->host_lock); |
| 4324 | goto out; | 4373 | goto out; |
| 4374 | } else { | ||
| 4375 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
| 4376 | "2624 RPI %x DID %x flg %x still " | ||
| 4377 | "logged in\n", | ||
| 4378 | ndlp->nlp_rpi, ndlp->nlp_DID, | ||
| 4379 | ndlp->nlp_flag); | ||
| 4380 | if (ndlp->nlp_flag & NLP_RPI_VALID) | ||
| 4381 | ret = 1; | ||
| 4325 | } | 4382 | } |
| 4326 | } | 4383 | } |
| 4327 | spin_unlock_irq(shost->host_lock); | 4384 | spin_unlock_irq(shost->host_lock); |
| @@ -4400,7 +4457,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
| 4400 | */ | 4457 | */ |
| 4401 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || | 4458 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || |
| 4402 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || | 4459 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || |
| 4403 | (phba->cfg_enable_fip == 0)) { | 4460 | (!(phba->hba_flag & HBA_FIP_SUPPORT))) { |
| 4404 | spin_unlock_irq(&phba->hbalock); | 4461 | spin_unlock_irq(&phba->hbalock); |
| 4405 | return; | 4462 | return; |
| 4406 | } | 4463 | } |
| @@ -4409,6 +4466,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
| 4409 | if (lpfc_fcf_inuse(phba)) | 4466 | if (lpfc_fcf_inuse(phba)) |
| 4410 | return; | 4467 | return; |
| 4411 | 4468 | ||
| 4469 | /* At this point, all discovery is aborted */ | ||
| 4470 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | ||
| 4412 | 4471 | ||
| 4413 | /* Unregister VPIs */ | 4472 | /* Unregister VPIs */ |
| 4414 | vports = lpfc_create_vport_work_array(phba); | 4473 | vports = lpfc_create_vport_work_array(phba); |
| @@ -4416,8 +4475,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
| 4416 | (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) | 4475 | (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) |
| 4417 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 4476 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
| 4418 | lpfc_mbx_unreg_vpi(vports[i]); | 4477 | lpfc_mbx_unreg_vpi(vports[i]); |
| 4419 | vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 4478 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
| 4420 | vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; | 4479 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
| 4421 | } | 4480 | } |
| 4422 | lpfc_destroy_vport_work_array(phba, vports); | 4481 | lpfc_destroy_vport_work_array(phba, vports); |
| 4423 | 4482 | ||
| @@ -4431,7 +4490,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
| 4431 | return; | 4490 | return; |
| 4432 | } | 4491 | } |
| 4433 | 4492 | ||
| 4434 | lpfc_unreg_vfi(mbox, phba->pport->vfi); | 4493 | lpfc_unreg_vfi(mbox, phba->pport); |
| 4435 | mbox->vport = phba->pport; | 4494 | mbox->vport = phba->pport; |
| 4436 | mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; | 4495 | mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; |
| 4437 | 4496 | ||
| @@ -4512,8 +4571,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, | |||
| 4512 | 4571 | ||
| 4513 | /* Free the current connect table */ | 4572 | /* Free the current connect table */ |
| 4514 | list_for_each_entry_safe(conn_entry, next_conn_entry, | 4573 | list_for_each_entry_safe(conn_entry, next_conn_entry, |
| 4515 | &phba->fcf_conn_rec_list, list) | 4574 | &phba->fcf_conn_rec_list, list) { |
| 4575 | list_del_init(&conn_entry->list); | ||
| 4516 | kfree(conn_entry); | 4576 | kfree(conn_entry); |
| 4577 | } | ||
| 4517 | 4578 | ||
| 4518 | conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; | 4579 | conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; |
| 4519 | record_count = conn_hdr->length * sizeof(uint32_t)/ | 4580 | record_count = conn_hdr->length * sizeof(uint32_t)/ |
| @@ -4569,14 +4630,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba, | |||
| 4569 | (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) | 4630 | (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) |
| 4570 | return; | 4631 | return; |
| 4571 | 4632 | ||
| 4572 | if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == | ||
| 4573 | FIPP_MODE_ON) | ||
| 4574 | phba->cfg_enable_fip = 1; | ||
| 4575 | |||
| 4576 | if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == | ||
| 4577 | FIPP_MODE_OFF) | ||
| 4578 | phba->cfg_enable_fip = 0; | ||
| 4579 | |||
| 4580 | if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { | 4633 | if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { |
| 4581 | phba->valid_vlan = 1; | 4634 | phba->valid_vlan = 1; |
| 4582 | phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & | 4635 | phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index ccb26724dc53..c9faa1d8c3c8 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
| @@ -1124,21 +1124,6 @@ typedef struct { | |||
| 1124 | /* Number of 4-byte words in an IOCB. */ | 1124 | /* Number of 4-byte words in an IOCB. */ |
| 1125 | #define IOCB_WORD_SZ 8 | 1125 | #define IOCB_WORD_SZ 8 |
| 1126 | 1126 | ||
| 1127 | /* defines for type field in fc header */ | ||
| 1128 | #define FC_ELS_DATA 0x1 | ||
| 1129 | #define FC_LLC_SNAP 0x5 | ||
| 1130 | #define FC_FCP_DATA 0x8 | ||
| 1131 | #define FC_COMMON_TRANSPORT_ULP 0x20 | ||
| 1132 | |||
| 1133 | /* defines for rctl field in fc header */ | ||
| 1134 | #define FC_DEV_DATA 0x0 | ||
| 1135 | #define FC_UNSOL_CTL 0x2 | ||
| 1136 | #define FC_SOL_CTL 0x3 | ||
| 1137 | #define FC_UNSOL_DATA 0x4 | ||
| 1138 | #define FC_FCP_CMND 0x6 | ||
| 1139 | #define FC_ELS_REQ 0x22 | ||
| 1140 | #define FC_ELS_RSP 0x23 | ||
| 1141 | |||
| 1142 | /* network headers for Dfctl field */ | 1127 | /* network headers for Dfctl field */ |
| 1143 | #define FC_NET_HDR 0x20 | 1128 | #define FC_NET_HDR 0x20 |
| 1144 | 1129 | ||
| @@ -1183,6 +1168,8 @@ typedef struct { | |||
| 1183 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 | 1168 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 |
| 1184 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 | 1169 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 |
| 1185 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 | 1170 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 |
| 1171 | #define PCI_DEVICE_ID_TOMCAT 0x0714 | ||
| 1172 | #define PCI_DEVICE_ID_FALCON 0xf180 | ||
| 1186 | 1173 | ||
| 1187 | #define JEDEC_ID_ADDRESS 0x0080001c | 1174 | #define JEDEC_ID_ADDRESS 0x0080001c |
| 1188 | #define FIREFLY_JEDEC_ID 0x1ACC | 1175 | #define FIREFLY_JEDEC_ID 0x1ACC |
| @@ -1444,6 +1431,7 @@ typedef struct { /* FireFly BIU registers */ | |||
| 1444 | #define CMD_ABORT_MXRI64_CN 0x8C | 1431 | #define CMD_ABORT_MXRI64_CN 0x8C |
| 1445 | #define CMD_RCV_ELS_REQ64_CX 0x8D | 1432 | #define CMD_RCV_ELS_REQ64_CX 0x8D |
| 1446 | #define CMD_XMIT_ELS_RSP64_CX 0x95 | 1433 | #define CMD_XMIT_ELS_RSP64_CX 0x95 |
| 1434 | #define CMD_XMIT_BLS_RSP64_CX 0x97 | ||
| 1447 | #define CMD_FCP_IWRITE64_CR 0x98 | 1435 | #define CMD_FCP_IWRITE64_CR 0x98 |
| 1448 | #define CMD_FCP_IWRITE64_CX 0x99 | 1436 | #define CMD_FCP_IWRITE64_CX 0x99 |
| 1449 | #define CMD_FCP_IREAD64_CR 0x9A | 1437 | #define CMD_FCP_IREAD64_CR 0x9A |
| @@ -2306,8 +2294,7 @@ typedef struct { | |||
| 2306 | uint32_t rsvd1; | 2294 | uint32_t rsvd1; |
| 2307 | uint32_t rsvd2:8; | 2295 | uint32_t rsvd2:8; |
| 2308 | uint32_t sid:24; | 2296 | uint32_t sid:24; |
| 2309 | uint32_t rsvd3; | 2297 | uint32_t wwn[2]; |
| 2310 | uint32_t rsvd4; | ||
| 2311 | uint32_t rsvd5; | 2298 | uint32_t rsvd5; |
| 2312 | uint16_t vfi; | 2299 | uint16_t vfi; |
| 2313 | uint16_t vpi; | 2300 | uint16_t vpi; |
| @@ -2315,8 +2302,7 @@ typedef struct { | |||
| 2315 | uint32_t rsvd1; | 2302 | uint32_t rsvd1; |
| 2316 | uint32_t sid:24; | 2303 | uint32_t sid:24; |
| 2317 | uint32_t rsvd2:8; | 2304 | uint32_t rsvd2:8; |
| 2318 | uint32_t rsvd3; | 2305 | uint32_t wwn[2]; |
| 2319 | uint32_t rsvd4; | ||
| 2320 | uint32_t rsvd5; | 2306 | uint32_t rsvd5; |
| 2321 | uint16_t vpi; | 2307 | uint16_t vpi; |
| 2322 | uint16_t vfi; | 2308 | uint16_t vfi; |
| @@ -2326,7 +2312,13 @@ typedef struct { | |||
| 2326 | /* Structure for MB Command UNREG_VPI (0x97) */ | 2312 | /* Structure for MB Command UNREG_VPI (0x97) */ |
| 2327 | typedef struct { | 2313 | typedef struct { |
| 2328 | uint32_t rsvd1; | 2314 | uint32_t rsvd1; |
| 2329 | uint32_t rsvd2; | 2315 | #ifdef __BIG_ENDIAN_BITFIELD |
| 2316 | uint16_t rsvd2; | ||
| 2317 | uint16_t sli4_vpi; | ||
| 2318 | #else /* __LITTLE_ENDIAN */ | ||
| 2319 | uint16_t sli4_vpi; | ||
| 2320 | uint16_t rsvd2; | ||
| 2321 | #endif | ||
| 2330 | uint32_t rsvd3; | 2322 | uint32_t rsvd3; |
| 2331 | uint32_t rsvd4; | 2323 | uint32_t rsvd4; |
| 2332 | uint32_t rsvd5; | 2324 | uint32_t rsvd5; |
| @@ -3547,7 +3539,7 @@ typedef struct _IOCB { /* IOCB structure */ | |||
| 3547 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ | 3539 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ |
| 3548 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ | 3540 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ |
| 3549 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ | 3541 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ |
| 3550 | 3542 | struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ | |
| 3551 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ | 3543 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ |
| 3552 | } un; | 3544 | } un; |
| 3553 | union { | 3545 | union { |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 3689eee04535..1585148a17e5 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
| @@ -194,6 +194,26 @@ struct lpfc_sli4_flags { | |||
| 194 | #define lpfc_fip_flag_WORD word0 | 194 | #define lpfc_fip_flag_WORD word0 |
| 195 | }; | 195 | }; |
| 196 | 196 | ||
| 197 | struct sli4_bls_acc { | ||
| 198 | uint32_t word0_rsvd; /* Word0 must be reserved */ | ||
| 199 | uint32_t word1; | ||
| 200 | #define lpfc_abts_orig_SHIFT 0 | ||
| 201 | #define lpfc_abts_orig_MASK 0x00000001 | ||
| 202 | #define lpfc_abts_orig_WORD word1 | ||
| 203 | #define LPFC_ABTS_UNSOL_RSP 1 | ||
| 204 | #define LPFC_ABTS_UNSOL_INT 0 | ||
| 205 | uint32_t word2; | ||
| 206 | #define lpfc_abts_rxid_SHIFT 0 | ||
| 207 | #define lpfc_abts_rxid_MASK 0x0000FFFF | ||
| 208 | #define lpfc_abts_rxid_WORD word2 | ||
| 209 | #define lpfc_abts_oxid_SHIFT 16 | ||
| 210 | #define lpfc_abts_oxid_MASK 0x0000FFFF | ||
| 211 | #define lpfc_abts_oxid_WORD word2 | ||
| 212 | uint32_t word3; | ||
| 213 | uint32_t word4; | ||
| 214 | uint32_t word5_rsvd; /* Word5 must be reserved */ | ||
| 215 | }; | ||
| 216 | |||
| 197 | /* event queue entry structure */ | 217 | /* event queue entry structure */ |
| 198 | struct lpfc_eqe { | 218 | struct lpfc_eqe { |
| 199 | uint32_t word0; | 219 | uint32_t word0; |
| @@ -425,7 +445,7 @@ struct lpfc_wqe_generic{ | |||
| 425 | #define lpfc_wqe_gen_status_MASK 0x0000000F | 445 | #define lpfc_wqe_gen_status_MASK 0x0000000F |
| 426 | #define lpfc_wqe_gen_status_WORD word7 | 446 | #define lpfc_wqe_gen_status_WORD word7 |
| 427 | #define lpfc_wqe_gen_ct_SHIFT 2 | 447 | #define lpfc_wqe_gen_ct_SHIFT 2 |
| 428 | #define lpfc_wqe_gen_ct_MASK 0x00000007 | 448 | #define lpfc_wqe_gen_ct_MASK 0x00000003 |
| 429 | #define lpfc_wqe_gen_ct_WORD word7 | 449 | #define lpfc_wqe_gen_ct_WORD word7 |
| 430 | uint32_t abort_tag; | 450 | uint32_t abort_tag; |
| 431 | uint32_t word9; | 451 | uint32_t word9; |
| @@ -453,6 +473,13 @@ struct lpfc_wqe_generic{ | |||
| 453 | #define lpfc_wqe_gen_wqec_SHIFT 7 | 473 | #define lpfc_wqe_gen_wqec_SHIFT 7 |
| 454 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 | 474 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 |
| 455 | #define lpfc_wqe_gen_wqec_WORD word11 | 475 | #define lpfc_wqe_gen_wqec_WORD word11 |
| 476 | #define ELS_ID_FLOGI 3 | ||
| 477 | #define ELS_ID_FDISC 2 | ||
| 478 | #define ELS_ID_LOGO 1 | ||
| 479 | #define ELS_ID_DEFAULT 0 | ||
| 480 | #define lpfc_wqe_gen_els_id_SHIFT 4 | ||
| 481 | #define lpfc_wqe_gen_els_id_MASK 0x00000003 | ||
| 482 | #define lpfc_wqe_gen_els_id_WORD word11 | ||
| 456 | #define lpfc_wqe_gen_cmd_type_SHIFT 0 | 483 | #define lpfc_wqe_gen_cmd_type_SHIFT 0 |
| 457 | #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F | 484 | #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F |
| 458 | #define lpfc_wqe_gen_cmd_type_WORD word11 | 485 | #define lpfc_wqe_gen_cmd_type_WORD word11 |
| @@ -487,8 +514,8 @@ struct lpfc_register { | |||
| 487 | 514 | ||
| 488 | #define LPFC_UERR_STATUS_HI 0x00A4 | 515 | #define LPFC_UERR_STATUS_HI 0x00A4 |
| 489 | #define LPFC_UERR_STATUS_LO 0x00A0 | 516 | #define LPFC_UERR_STATUS_LO 0x00A0 |
| 490 | #define LPFC_ONLINE0 0x00B0 | 517 | #define LPFC_UE_MASK_HI 0x00AC |
| 491 | #define LPFC_ONLINE1 0x00B4 | 518 | #define LPFC_UE_MASK_LO 0x00A8 |
| 492 | #define LPFC_SCRATCHPAD 0x0058 | 519 | #define LPFC_SCRATCHPAD 0x0058 |
| 493 | 520 | ||
| 494 | /* BAR0 Registers */ | 521 | /* BAR0 Registers */ |
| @@ -760,6 +787,7 @@ struct mbox_header { | |||
| 760 | #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 | 787 | #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 |
| 761 | #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 | 788 | #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 |
| 762 | #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 | 789 | #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 |
| 790 | #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A | ||
| 763 | #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D | 791 | #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D |
| 764 | 792 | ||
| 765 | /* FCoE Opcodes */ | 793 | /* FCoE Opcodes */ |
| @@ -1273,6 +1301,51 @@ struct lpfc_mbx_del_fcf_tbl_entry { | |||
| 1273 | #define lpfc_mbx_del_fcf_tbl_index_WORD word10 | 1301 | #define lpfc_mbx_del_fcf_tbl_index_WORD word10 |
| 1274 | }; | 1302 | }; |
| 1275 | 1303 | ||
| 1304 | struct lpfc_mbx_query_fw_cfg { | ||
| 1305 | struct mbox_header header; | ||
| 1306 | uint32_t config_number; | ||
| 1307 | uint32_t asic_rev; | ||
| 1308 | uint32_t phys_port; | ||
| 1309 | uint32_t function_mode; | ||
| 1310 | /* firmware Function Mode */ | ||
| 1311 | #define lpfc_function_mode_toe_SHIFT 0 | ||
| 1312 | #define lpfc_function_mode_toe_MASK 0x00000001 | ||
| 1313 | #define lpfc_function_mode_toe_WORD function_mode | ||
| 1314 | #define lpfc_function_mode_nic_SHIFT 1 | ||
| 1315 | #define lpfc_function_mode_nic_MASK 0x00000001 | ||
| 1316 | #define lpfc_function_mode_nic_WORD function_mode | ||
| 1317 | #define lpfc_function_mode_rdma_SHIFT 2 | ||
| 1318 | #define lpfc_function_mode_rdma_MASK 0x00000001 | ||
| 1319 | #define lpfc_function_mode_rdma_WORD function_mode | ||
| 1320 | #define lpfc_function_mode_vm_SHIFT 3 | ||
| 1321 | #define lpfc_function_mode_vm_MASK 0x00000001 | ||
| 1322 | #define lpfc_function_mode_vm_WORD function_mode | ||
| 1323 | #define lpfc_function_mode_iscsi_i_SHIFT 4 | ||
| 1324 | #define lpfc_function_mode_iscsi_i_MASK 0x00000001 | ||
| 1325 | #define lpfc_function_mode_iscsi_i_WORD function_mode | ||
| 1326 | #define lpfc_function_mode_iscsi_t_SHIFT 5 | ||
| 1327 | #define lpfc_function_mode_iscsi_t_MASK 0x00000001 | ||
| 1328 | #define lpfc_function_mode_iscsi_t_WORD function_mode | ||
| 1329 | #define lpfc_function_mode_fcoe_i_SHIFT 6 | ||
| 1330 | #define lpfc_function_mode_fcoe_i_MASK 0x00000001 | ||
| 1331 | #define lpfc_function_mode_fcoe_i_WORD function_mode | ||
| 1332 | #define lpfc_function_mode_fcoe_t_SHIFT 7 | ||
| 1333 | #define lpfc_function_mode_fcoe_t_MASK 0x00000001 | ||
| 1334 | #define lpfc_function_mode_fcoe_t_WORD function_mode | ||
| 1335 | #define lpfc_function_mode_dal_SHIFT 8 | ||
| 1336 | #define lpfc_function_mode_dal_MASK 0x00000001 | ||
| 1337 | #define lpfc_function_mode_dal_WORD function_mode | ||
| 1338 | #define lpfc_function_mode_lro_SHIFT 9 | ||
| 1339 | #define lpfc_function_mode_lro_MASK 0x00000001 | ||
| 1340 | #define lpfc_function_mode_lro_WORD function_mode9 | ||
| 1341 | #define lpfc_function_mode_flex10_SHIFT 10 | ||
| 1342 | #define lpfc_function_mode_flex10_MASK 0x00000001 | ||
| 1343 | #define lpfc_function_mode_flex10_WORD function_mode | ||
| 1344 | #define lpfc_function_mode_ncsi_SHIFT 11 | ||
| 1345 | #define lpfc_function_mode_ncsi_MASK 0x00000001 | ||
| 1346 | #define lpfc_function_mode_ncsi_WORD function_mode | ||
| 1347 | }; | ||
| 1348 | |||
| 1276 | /* Status field for embedded SLI_CONFIG mailbox command */ | 1349 | /* Status field for embedded SLI_CONFIG mailbox command */ |
| 1277 | #define STATUS_SUCCESS 0x0 | 1350 | #define STATUS_SUCCESS 0x0 |
| 1278 | #define STATUS_FAILED 0x1 | 1351 | #define STATUS_FAILED 0x1 |
| @@ -1349,8 +1422,7 @@ struct lpfc_mbx_reg_vfi { | |||
| 1349 | #define lpfc_reg_vfi_fcfi_SHIFT 0 | 1422 | #define lpfc_reg_vfi_fcfi_SHIFT 0 |
| 1350 | #define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF | 1423 | #define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF |
| 1351 | #define lpfc_reg_vfi_fcfi_WORD word2 | 1424 | #define lpfc_reg_vfi_fcfi_WORD word2 |
| 1352 | uint32_t word3_rsvd; | 1425 | uint32_t wwn[2]; |
| 1353 | uint32_t word4_rsvd; | ||
| 1354 | struct ulp_bde64 bde; | 1426 | struct ulp_bde64 bde; |
| 1355 | uint32_t word8_rsvd; | 1427 | uint32_t word8_rsvd; |
| 1356 | uint32_t word9_rsvd; | 1428 | uint32_t word9_rsvd; |
| @@ -1555,6 +1627,11 @@ struct lpfc_mbx_read_rev { | |||
| 1555 | #define lpfc_mbx_rd_rev_fcoe_SHIFT 20 | 1627 | #define lpfc_mbx_rd_rev_fcoe_SHIFT 20 |
| 1556 | #define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 | 1628 | #define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 |
| 1557 | #define lpfc_mbx_rd_rev_fcoe_WORD word1 | 1629 | #define lpfc_mbx_rd_rev_fcoe_WORD word1 |
| 1630 | #define lpfc_mbx_rd_rev_cee_ver_SHIFT 21 | ||
| 1631 | #define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003 | ||
| 1632 | #define lpfc_mbx_rd_rev_cee_ver_WORD word1 | ||
| 1633 | #define LPFC_PREDCBX_CEE_MODE 0 | ||
| 1634 | #define LPFC_DCBX_CEE_MODE 1 | ||
| 1558 | #define lpfc_mbx_rd_rev_vpd_SHIFT 29 | 1635 | #define lpfc_mbx_rd_rev_vpd_SHIFT 29 |
| 1559 | #define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 | 1636 | #define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 |
| 1560 | #define lpfc_mbx_rd_rev_vpd_WORD word1 | 1637 | #define lpfc_mbx_rd_rev_vpd_WORD word1 |
| @@ -1804,6 +1881,7 @@ struct lpfc_mqe { | |||
| 1804 | struct lpfc_mbx_read_config rd_config; | 1881 | struct lpfc_mbx_read_config rd_config; |
| 1805 | struct lpfc_mbx_request_features req_ftrs; | 1882 | struct lpfc_mbx_request_features req_ftrs; |
| 1806 | struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; | 1883 | struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; |
| 1884 | struct lpfc_mbx_query_fw_cfg query_fw_cfg; | ||
| 1807 | struct lpfc_mbx_nop nop; | 1885 | struct lpfc_mbx_nop nop; |
| 1808 | } un; | 1886 | } un; |
| 1809 | }; | 1887 | }; |
| @@ -1885,7 +1963,7 @@ struct lpfc_acqe_link { | |||
| 1885 | }; | 1963 | }; |
| 1886 | 1964 | ||
| 1887 | struct lpfc_acqe_fcoe { | 1965 | struct lpfc_acqe_fcoe { |
| 1888 | uint32_t fcf_index; | 1966 | uint32_t index; |
| 1889 | uint32_t word1; | 1967 | uint32_t word1; |
| 1890 | #define lpfc_acqe_fcoe_fcf_count_SHIFT 0 | 1968 | #define lpfc_acqe_fcoe_fcf_count_SHIFT 0 |
| 1891 | #define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF | 1969 | #define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF |
| @@ -1896,6 +1974,7 @@ struct lpfc_acqe_fcoe { | |||
| 1896 | #define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 | 1974 | #define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 |
| 1897 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 | 1975 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 |
| 1898 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 | 1976 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 |
| 1977 | #define LPFC_FCOE_EVENT_TYPE_CVL 0x4 | ||
| 1899 | uint32_t event_tag; | 1978 | uint32_t event_tag; |
| 1900 | uint32_t trailer; | 1979 | uint32_t trailer; |
| 1901 | }; | 1980 | }; |
| @@ -1921,12 +2000,13 @@ struct lpfc_bmbx_create { | |||
| 1921 | #define SGL_ALIGN_SZ 64 | 2000 | #define SGL_ALIGN_SZ 64 |
| 1922 | #define SGL_PAGE_SIZE 4096 | 2001 | #define SGL_PAGE_SIZE 4096 |
| 1923 | /* align SGL addr on a size boundary - adjust address up */ | 2002 | /* align SGL addr on a size boundary - adjust address up */ |
| 1924 | #define NO_XRI ((uint16_t)-1) | 2003 | #define NO_XRI ((uint16_t)-1) |
| 2004 | |||
| 1925 | struct wqe_common { | 2005 | struct wqe_common { |
| 1926 | uint32_t word6; | 2006 | uint32_t word6; |
| 1927 | #define wqe_xri_SHIFT 0 | 2007 | #define wqe_xri_tag_SHIFT 0 |
| 1928 | #define wqe_xri_MASK 0x0000FFFF | 2008 | #define wqe_xri_tag_MASK 0x0000FFFF |
| 1929 | #define wqe_xri_WORD word6 | 2009 | #define wqe_xri_tag_WORD word6 |
| 1930 | #define wqe_ctxt_tag_SHIFT 16 | 2010 | #define wqe_ctxt_tag_SHIFT 16 |
| 1931 | #define wqe_ctxt_tag_MASK 0x0000FFFF | 2011 | #define wqe_ctxt_tag_MASK 0x0000FFFF |
| 1932 | #define wqe_ctxt_tag_WORD word6 | 2012 | #define wqe_ctxt_tag_WORD word6 |
| @@ -1987,7 +2067,7 @@ struct wqe_common { | |||
| 1987 | #define wqe_wqec_MASK 0x00000001 | 2067 | #define wqe_wqec_MASK 0x00000001 |
| 1988 | #define wqe_wqec_WORD word11 | 2068 | #define wqe_wqec_WORD word11 |
| 1989 | #define wqe_cqid_SHIFT 16 | 2069 | #define wqe_cqid_SHIFT 16 |
| 1990 | #define wqe_cqid_MASK 0x000003ff | 2070 | #define wqe_cqid_MASK 0x0000ffff |
| 1991 | #define wqe_cqid_WORD word11 | 2071 | #define wqe_cqid_WORD word11 |
| 1992 | }; | 2072 | }; |
| 1993 | 2073 | ||
| @@ -1996,6 +2076,9 @@ struct wqe_did { | |||
| 1996 | #define wqe_els_did_SHIFT 0 | 2076 | #define wqe_els_did_SHIFT 0 |
| 1997 | #define wqe_els_did_MASK 0x00FFFFFF | 2077 | #define wqe_els_did_MASK 0x00FFFFFF |
| 1998 | #define wqe_els_did_WORD word5 | 2078 | #define wqe_els_did_WORD word5 |
| 2079 | #define wqe_xmit_bls_pt_SHIFT 28 | ||
| 2080 | #define wqe_xmit_bls_pt_MASK 0x00000003 | ||
| 2081 | #define wqe_xmit_bls_pt_WORD word5 | ||
| 1999 | #define wqe_xmit_bls_ar_SHIFT 30 | 2082 | #define wqe_xmit_bls_ar_SHIFT 30 |
| 2000 | #define wqe_xmit_bls_ar_MASK 0x00000001 | 2083 | #define wqe_xmit_bls_ar_MASK 0x00000001 |
| 2001 | #define wqe_xmit_bls_ar_WORD word5 | 2084 | #define wqe_xmit_bls_ar_WORD word5 |
| @@ -2044,6 +2127,23 @@ struct xmit_els_rsp64_wqe { | |||
| 2044 | 2127 | ||
| 2045 | struct xmit_bls_rsp64_wqe { | 2128 | struct xmit_bls_rsp64_wqe { |
| 2046 | uint32_t payload0; | 2129 | uint32_t payload0; |
| 2130 | /* Payload0 for BA_ACC */ | ||
| 2131 | #define xmit_bls_rsp64_acc_seq_id_SHIFT 16 | ||
| 2132 | #define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff | ||
| 2133 | #define xmit_bls_rsp64_acc_seq_id_WORD payload0 | ||
| 2134 | #define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24 | ||
| 2135 | #define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff | ||
| 2136 | #define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0 | ||
| 2137 | /* Payload0 for BA_RJT */ | ||
| 2138 | #define xmit_bls_rsp64_rjt_vspec_SHIFT 0 | ||
| 2139 | #define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff | ||
| 2140 | #define xmit_bls_rsp64_rjt_vspec_WORD payload0 | ||
| 2141 | #define xmit_bls_rsp64_rjt_expc_SHIFT 8 | ||
| 2142 | #define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff | ||
| 2143 | #define xmit_bls_rsp64_rjt_expc_WORD payload0 | ||
| 2144 | #define xmit_bls_rsp64_rjt_rsnc_SHIFT 16 | ||
| 2145 | #define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff | ||
| 2146 | #define xmit_bls_rsp64_rjt_rsnc_WORD payload0 | ||
| 2047 | uint32_t word1; | 2147 | uint32_t word1; |
| 2048 | #define xmit_bls_rsp64_rxid_SHIFT 0 | 2148 | #define xmit_bls_rsp64_rxid_SHIFT 0 |
| 2049 | #define xmit_bls_rsp64_rxid_MASK 0x0000ffff | 2149 | #define xmit_bls_rsp64_rxid_MASK 0x0000ffff |
| @@ -2052,18 +2152,19 @@ struct xmit_bls_rsp64_wqe { | |||
| 2052 | #define xmit_bls_rsp64_oxid_MASK 0x0000ffff | 2152 | #define xmit_bls_rsp64_oxid_MASK 0x0000ffff |
| 2053 | #define xmit_bls_rsp64_oxid_WORD word1 | 2153 | #define xmit_bls_rsp64_oxid_WORD word1 |
| 2054 | uint32_t word2; | 2154 | uint32_t word2; |
| 2055 | #define xmit_bls_rsp64_seqcntlo_SHIFT 0 | 2155 | #define xmit_bls_rsp64_seqcnthi_SHIFT 0 |
| 2056 | #define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff | ||
| 2057 | #define xmit_bls_rsp64_seqcntlo_WORD word2 | ||
| 2058 | #define xmit_bls_rsp64_seqcnthi_SHIFT 16 | ||
| 2059 | #define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff | 2156 | #define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff |
| 2060 | #define xmit_bls_rsp64_seqcnthi_WORD word2 | 2157 | #define xmit_bls_rsp64_seqcnthi_WORD word2 |
| 2158 | #define xmit_bls_rsp64_seqcntlo_SHIFT 16 | ||
| 2159 | #define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff | ||
| 2160 | #define xmit_bls_rsp64_seqcntlo_WORD word2 | ||
| 2061 | uint32_t rsrvd3; | 2161 | uint32_t rsrvd3; |
| 2062 | uint32_t rsrvd4; | 2162 | uint32_t rsrvd4; |
| 2063 | struct wqe_did wqe_dest; | 2163 | struct wqe_did wqe_dest; |
| 2064 | struct wqe_common wqe_com; /* words 6-11 */ | 2164 | struct wqe_common wqe_com; /* words 6-11 */ |
| 2065 | uint32_t rsvd_12_15[4]; | 2165 | uint32_t rsvd_12_15[4]; |
| 2066 | }; | 2166 | }; |
| 2167 | |||
| 2067 | struct wqe_rctl_dfctl { | 2168 | struct wqe_rctl_dfctl { |
| 2068 | uint32_t word5; | 2169 | uint32_t word5; |
| 2069 | #define wqe_si_SHIFT 2 | 2170 | #define wqe_si_SHIFT 2 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 562d8cee874b..226920d15ea1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
| 29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
| 30 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
| 31 | #include <linux/aer.h> | ||
| 31 | 32 | ||
| 32 | #include <scsi/scsi.h> | 33 | #include <scsi/scsi.h> |
| 33 | #include <scsi/scsi_device.h> | 34 | #include <scsi/scsi_device.h> |
| @@ -645,7 +646,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) | |||
| 645 | * down the SLI Layer. | 646 | * down the SLI Layer. |
| 646 | * | 647 | * |
| 647 | * Return codes | 648 | * Return codes |
| 648 | * 0 - sucess. | 649 | * 0 - success. |
| 649 | * Any other value - error. | 650 | * Any other value - error. |
| 650 | **/ | 651 | **/ |
| 651 | static int | 652 | static int |
| @@ -700,7 +701,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) | |||
| 700 | * down the SLI Layer. | 701 | * down the SLI Layer. |
| 701 | * | 702 | * |
| 702 | * Return codes | 703 | * Return codes |
| 703 | * 0 - sucess. | 704 | * 0 - success. |
| 704 | * Any other value - error. | 705 | * Any other value - error. |
| 705 | **/ | 706 | **/ |
| 706 | static int | 707 | static int |
| @@ -755,7 +756,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) | |||
| 755 | * uninitialization after the HBA is reset when bring down the SLI Layer. | 756 | * uninitialization after the HBA is reset when bring down the SLI Layer. |
| 756 | * | 757 | * |
| 757 | * Return codes | 758 | * Return codes |
| 758 | * 0 - sucess. | 759 | * 0 - success. |
| 759 | * Any other value - error. | 760 | * Any other value - error. |
| 760 | **/ | 761 | **/ |
| 761 | int | 762 | int |
| @@ -852,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | |||
| 852 | void | 853 | void |
| 853 | lpfc_hb_timeout_handler(struct lpfc_hba *phba) | 854 | lpfc_hb_timeout_handler(struct lpfc_hba *phba) |
| 854 | { | 855 | { |
| 856 | struct lpfc_vport **vports; | ||
| 855 | LPFC_MBOXQ_t *pmboxq; | 857 | LPFC_MBOXQ_t *pmboxq; |
| 856 | struct lpfc_dmabuf *buf_ptr; | 858 | struct lpfc_dmabuf *buf_ptr; |
| 857 | int retval; | 859 | int retval, i; |
| 858 | struct lpfc_sli *psli = &phba->sli; | 860 | struct lpfc_sli *psli = &phba->sli; |
| 859 | LIST_HEAD(completions); | 861 | LIST_HEAD(completions); |
| 860 | 862 | ||
| 863 | vports = lpfc_create_vport_work_array(phba); | ||
| 864 | if (vports != NULL) | ||
| 865 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) | ||
| 866 | lpfc_rcv_seq_check_edtov(vports[i]); | ||
| 867 | lpfc_destroy_vport_work_array(phba, vports); | ||
| 868 | |||
| 861 | if ((phba->link_state == LPFC_HBA_ERROR) || | 869 | if ((phba->link_state == LPFC_HBA_ERROR) || |
| 862 | (phba->pport->load_flag & FC_UNLOADING) || | 870 | (phba->pport->load_flag & FC_UNLOADING) || |
| 863 | (phba->pport->fc_flag & FC_OFFLINE_MODE)) | 871 | (phba->pport->fc_flag & FC_OFFLINE_MODE)) |
| @@ -1254,7 +1262,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) | |||
| 1254 | * routine from the API jump table function pointer from the lpfc_hba struct. | 1262 | * routine from the API jump table function pointer from the lpfc_hba struct. |
| 1255 | * | 1263 | * |
| 1256 | * Return codes | 1264 | * Return codes |
| 1257 | * 0 - sucess. | 1265 | * 0 - success. |
| 1258 | * Any other value - error. | 1266 | * Any other value - error. |
| 1259 | **/ | 1267 | **/ |
| 1260 | void | 1268 | void |
| @@ -1521,10 +1529,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
| 1521 | int GE = 0; | 1529 | int GE = 0; |
| 1522 | int oneConnect = 0; /* default is not a oneConnect */ | 1530 | int oneConnect = 0; /* default is not a oneConnect */ |
| 1523 | struct { | 1531 | struct { |
| 1524 | char * name; | 1532 | char *name; |
| 1525 | int max_speed; | 1533 | char *bus; |
| 1526 | char * bus; | 1534 | char *function; |
| 1527 | } m = {"<Unknown>", 0, ""}; | 1535 | } m = {"<Unknown>", "", ""}; |
| 1528 | 1536 | ||
| 1529 | if (mdp && mdp[0] != '\0' | 1537 | if (mdp && mdp[0] != '\0' |
| 1530 | && descp && descp[0] != '\0') | 1538 | && descp && descp[0] != '\0') |
| @@ -1545,132 +1553,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
| 1545 | 1553 | ||
| 1546 | switch (dev_id) { | 1554 | switch (dev_id) { |
| 1547 | case PCI_DEVICE_ID_FIREFLY: | 1555 | case PCI_DEVICE_ID_FIREFLY: |
| 1548 | m = (typeof(m)){"LP6000", max_speed, "PCI"}; | 1556 | m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; |
| 1549 | break; | 1557 | break; |
| 1550 | case PCI_DEVICE_ID_SUPERFLY: | 1558 | case PCI_DEVICE_ID_SUPERFLY: |
| 1551 | if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) | 1559 | if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) |
| 1552 | m = (typeof(m)){"LP7000", max_speed, "PCI"}; | 1560 | m = (typeof(m)){"LP7000", "PCI", |
| 1561 | "Fibre Channel Adapter"}; | ||
| 1553 | else | 1562 | else |
| 1554 | m = (typeof(m)){"LP7000E", max_speed, "PCI"}; | 1563 | m = (typeof(m)){"LP7000E", "PCI", |
| 1564 | "Fibre Channel Adapter"}; | ||
| 1555 | break; | 1565 | break; |
| 1556 | case PCI_DEVICE_ID_DRAGONFLY: | 1566 | case PCI_DEVICE_ID_DRAGONFLY: |
| 1557 | m = (typeof(m)){"LP8000", max_speed, "PCI"}; | 1567 | m = (typeof(m)){"LP8000", "PCI", |
| 1568 | "Fibre Channel Adapter"}; | ||
| 1558 | break; | 1569 | break; |
| 1559 | case PCI_DEVICE_ID_CENTAUR: | 1570 | case PCI_DEVICE_ID_CENTAUR: |
| 1560 | if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) | 1571 | if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) |
| 1561 | m = (typeof(m)){"LP9002", max_speed, "PCI"}; | 1572 | m = (typeof(m)){"LP9002", "PCI", |
| 1573 | "Fibre Channel Adapter"}; | ||
| 1562 | else | 1574 | else |
| 1563 | m = (typeof(m)){"LP9000", max_speed, "PCI"}; | 1575 | m = (typeof(m)){"LP9000", "PCI", |
| 1576 | "Fibre Channel Adapter"}; | ||
| 1564 | break; | 1577 | break; |
| 1565 | case PCI_DEVICE_ID_RFLY: | 1578 | case PCI_DEVICE_ID_RFLY: |
| 1566 | m = (typeof(m)){"LP952", max_speed, "PCI"}; | 1579 | m = (typeof(m)){"LP952", "PCI", |
| 1580 | "Fibre Channel Adapter"}; | ||
| 1567 | break; | 1581 | break; |
| 1568 | case PCI_DEVICE_ID_PEGASUS: | 1582 | case PCI_DEVICE_ID_PEGASUS: |
| 1569 | m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; | 1583 | m = (typeof(m)){"LP9802", "PCI-X", |
| 1584 | "Fibre Channel Adapter"}; | ||
| 1570 | break; | 1585 | break; |
| 1571 | case PCI_DEVICE_ID_THOR: | 1586 | case PCI_DEVICE_ID_THOR: |
| 1572 | m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; | 1587 | m = (typeof(m)){"LP10000", "PCI-X", |
| 1588 | "Fibre Channel Adapter"}; | ||
| 1573 | break; | 1589 | break; |
| 1574 | case PCI_DEVICE_ID_VIPER: | 1590 | case PCI_DEVICE_ID_VIPER: |
| 1575 | m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; | 1591 | m = (typeof(m)){"LPX1000", "PCI-X", |
| 1592 | "Fibre Channel Adapter"}; | ||
| 1576 | break; | 1593 | break; |
| 1577 | case PCI_DEVICE_ID_PFLY: | 1594 | case PCI_DEVICE_ID_PFLY: |
| 1578 | m = (typeof(m)){"LP982", max_speed, "PCI-X"}; | 1595 | m = (typeof(m)){"LP982", "PCI-X", |
| 1596 | "Fibre Channel Adapter"}; | ||
| 1579 | break; | 1597 | break; |
| 1580 | case PCI_DEVICE_ID_TFLY: | 1598 | case PCI_DEVICE_ID_TFLY: |
| 1581 | m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; | 1599 | m = (typeof(m)){"LP1050", "PCI-X", |
| 1600 | "Fibre Channel Adapter"}; | ||
| 1582 | break; | 1601 | break; |
| 1583 | case PCI_DEVICE_ID_HELIOS: | 1602 | case PCI_DEVICE_ID_HELIOS: |
| 1584 | m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; | 1603 | m = (typeof(m)){"LP11000", "PCI-X2", |
| 1604 | "Fibre Channel Adapter"}; | ||
| 1585 | break; | 1605 | break; |
| 1586 | case PCI_DEVICE_ID_HELIOS_SCSP: | 1606 | case PCI_DEVICE_ID_HELIOS_SCSP: |
| 1587 | m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; | 1607 | m = (typeof(m)){"LP11000-SP", "PCI-X2", |
| 1608 | "Fibre Channel Adapter"}; | ||
| 1588 | break; | 1609 | break; |
| 1589 | case PCI_DEVICE_ID_HELIOS_DCSP: | 1610 | case PCI_DEVICE_ID_HELIOS_DCSP: |
| 1590 | m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; | 1611 | m = (typeof(m)){"LP11002-SP", "PCI-X2", |
| 1612 | "Fibre Channel Adapter"}; | ||
| 1591 | break; | 1613 | break; |
| 1592 | case PCI_DEVICE_ID_NEPTUNE: | 1614 | case PCI_DEVICE_ID_NEPTUNE: |
| 1593 | m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; | 1615 | m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; |
| 1594 | break; | 1616 | break; |
| 1595 | case PCI_DEVICE_ID_NEPTUNE_SCSP: | 1617 | case PCI_DEVICE_ID_NEPTUNE_SCSP: |
| 1596 | m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; | 1618 | m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; |
| 1597 | break; | 1619 | break; |
| 1598 | case PCI_DEVICE_ID_NEPTUNE_DCSP: | 1620 | case PCI_DEVICE_ID_NEPTUNE_DCSP: |
| 1599 | m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; | 1621 | m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; |
| 1600 | break; | 1622 | break; |
| 1601 | case PCI_DEVICE_ID_BMID: | 1623 | case PCI_DEVICE_ID_BMID: |
| 1602 | m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; | 1624 | m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; |
| 1603 | break; | 1625 | break; |
| 1604 | case PCI_DEVICE_ID_BSMB: | 1626 | case PCI_DEVICE_ID_BSMB: |
| 1605 | m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; | 1627 | m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; |
| 1606 | break; | 1628 | break; |
| 1607 | case PCI_DEVICE_ID_ZEPHYR: | 1629 | case PCI_DEVICE_ID_ZEPHYR: |
| 1608 | m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; | 1630 | m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; |
| 1609 | break; | 1631 | break; |
| 1610 | case PCI_DEVICE_ID_ZEPHYR_SCSP: | 1632 | case PCI_DEVICE_ID_ZEPHYR_SCSP: |
| 1611 | m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; | 1633 | m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; |
| 1612 | break; | 1634 | break; |
| 1613 | case PCI_DEVICE_ID_ZEPHYR_DCSP: | 1635 | case PCI_DEVICE_ID_ZEPHYR_DCSP: |
| 1614 | m = (typeof(m)){"LP2105", max_speed, "PCIe"}; | 1636 | m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; |
| 1615 | GE = 1; | 1637 | GE = 1; |
| 1616 | break; | 1638 | break; |
| 1617 | case PCI_DEVICE_ID_ZMID: | 1639 | case PCI_DEVICE_ID_ZMID: |
| 1618 | m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; | 1640 | m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; |
| 1619 | break; | 1641 | break; |
| 1620 | case PCI_DEVICE_ID_ZSMB: | 1642 | case PCI_DEVICE_ID_ZSMB: |
| 1621 | m = (typeof(m)){"LPe111", max_speed, "PCIe"}; | 1643 | m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; |
| 1622 | break; | 1644 | break; |
| 1623 | case PCI_DEVICE_ID_LP101: | 1645 | case PCI_DEVICE_ID_LP101: |
| 1624 | m = (typeof(m)){"LP101", max_speed, "PCI-X"}; | 1646 | m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; |
| 1625 | break; | 1647 | break; |
| 1626 | case PCI_DEVICE_ID_LP10000S: | 1648 | case PCI_DEVICE_ID_LP10000S: |
| 1627 | m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; | 1649 | m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; |
| 1628 | break; | 1650 | break; |
| 1629 | case PCI_DEVICE_ID_LP11000S: | 1651 | case PCI_DEVICE_ID_LP11000S: |
| 1630 | m = (typeof(m)){"LP11000-S", max_speed, | 1652 | m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; |
| 1631 | "PCI-X2"}; | ||
| 1632 | break; | 1653 | break; |
| 1633 | case PCI_DEVICE_ID_LPE11000S: | 1654 | case PCI_DEVICE_ID_LPE11000S: |
| 1634 | m = (typeof(m)){"LPe11000-S", max_speed, | 1655 | m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; |
| 1635 | "PCIe"}; | ||
| 1636 | break; | 1656 | break; |
| 1637 | case PCI_DEVICE_ID_SAT: | 1657 | case PCI_DEVICE_ID_SAT: |
| 1638 | m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; | 1658 | m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; |
| 1639 | break; | 1659 | break; |
| 1640 | case PCI_DEVICE_ID_SAT_MID: | 1660 | case PCI_DEVICE_ID_SAT_MID: |
| 1641 | m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; | 1661 | m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; |
| 1642 | break; | 1662 | break; |
| 1643 | case PCI_DEVICE_ID_SAT_SMB: | 1663 | case PCI_DEVICE_ID_SAT_SMB: |
| 1644 | m = (typeof(m)){"LPe121", max_speed, "PCIe"}; | 1664 | m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; |
| 1645 | break; | 1665 | break; |
| 1646 | case PCI_DEVICE_ID_SAT_DCSP: | 1666 | case PCI_DEVICE_ID_SAT_DCSP: |
| 1647 | m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; | 1667 | m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; |
| 1648 | break; | 1668 | break; |
| 1649 | case PCI_DEVICE_ID_SAT_SCSP: | 1669 | case PCI_DEVICE_ID_SAT_SCSP: |
| 1650 | m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; | 1670 | m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; |
| 1651 | break; | 1671 | break; |
| 1652 | case PCI_DEVICE_ID_SAT_S: | 1672 | case PCI_DEVICE_ID_SAT_S: |
| 1653 | m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; | 1673 | m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; |
| 1654 | break; | 1674 | break; |
| 1655 | case PCI_DEVICE_ID_HORNET: | 1675 | case PCI_DEVICE_ID_HORNET: |
| 1656 | m = (typeof(m)){"LP21000", max_speed, "PCIe"}; | 1676 | m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; |
| 1657 | GE = 1; | 1677 | GE = 1; |
| 1658 | break; | 1678 | break; |
| 1659 | case PCI_DEVICE_ID_PROTEUS_VF: | 1679 | case PCI_DEVICE_ID_PROTEUS_VF: |
| 1660 | m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; | 1680 | m = (typeof(m)){"LPev12000", "PCIe IOV", |
| 1681 | "Fibre Channel Adapter"}; | ||
| 1661 | break; | 1682 | break; |
| 1662 | case PCI_DEVICE_ID_PROTEUS_PF: | 1683 | case PCI_DEVICE_ID_PROTEUS_PF: |
| 1663 | m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; | 1684 | m = (typeof(m)){"LPev12000", "PCIe IOV", |
| 1685 | "Fibre Channel Adapter"}; | ||
| 1664 | break; | 1686 | break; |
| 1665 | case PCI_DEVICE_ID_PROTEUS_S: | 1687 | case PCI_DEVICE_ID_PROTEUS_S: |
| 1666 | m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; | 1688 | m = (typeof(m)){"LPemv12002-S", "PCIe IOV", |
| 1689 | "Fibre Channel Adapter"}; | ||
| 1667 | break; | 1690 | break; |
| 1668 | case PCI_DEVICE_ID_TIGERSHARK: | 1691 | case PCI_DEVICE_ID_TIGERSHARK: |
| 1669 | oneConnect = 1; | 1692 | oneConnect = 1; |
| 1670 | m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; | 1693 | m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; |
| 1694 | break; | ||
| 1695 | case PCI_DEVICE_ID_TOMCAT: | ||
| 1696 | oneConnect = 1; | ||
| 1697 | m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; | ||
| 1698 | break; | ||
| 1699 | case PCI_DEVICE_ID_FALCON: | ||
| 1700 | m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", | ||
| 1701 | "EmulexSecure Fibre"}; | ||
| 1671 | break; | 1702 | break; |
| 1672 | default: | 1703 | default: |
| 1673 | m = (typeof(m)){ NULL }; | 1704 | m = (typeof(m)){"Unknown", "", ""}; |
| 1674 | break; | 1705 | break; |
| 1675 | } | 1706 | } |
| 1676 | 1707 | ||
| @@ -1682,17 +1713,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
| 1682 | if (descp && descp[0] == '\0') { | 1713 | if (descp && descp[0] == '\0') { |
| 1683 | if (oneConnect) | 1714 | if (oneConnect) |
| 1684 | snprintf(descp, 255, | 1715 | snprintf(descp, 255, |
| 1685 | "Emulex OneConnect %s, FCoE Initiator, Port %s", | 1716 | "Emulex OneConnect %s, %s Initiator, Port %s", |
| 1686 | m.name, | 1717 | m.name, m.function, |
| 1687 | phba->Port); | 1718 | phba->Port); |
| 1688 | else | 1719 | else |
| 1689 | snprintf(descp, 255, | 1720 | snprintf(descp, 255, |
| 1690 | "Emulex %s %d%s %s %s", | 1721 | "Emulex %s %d%s %s %s", |
| 1691 | m.name, m.max_speed, | 1722 | m.name, max_speed, (GE) ? "GE" : "Gb", |
| 1692 | (GE) ? "GE" : "Gb", | 1723 | m.bus, m.function); |
| 1693 | m.bus, | ||
| 1694 | (GE) ? "FCoE Adapter" : | ||
| 1695 | "Fibre Channel Adapter"); | ||
| 1696 | } | 1724 | } |
| 1697 | } | 1725 | } |
| 1698 | 1726 | ||
| @@ -2217,7 +2245,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
| 2217 | 2245 | ||
| 2218 | if (vports[i]->load_flag & FC_UNLOADING) | 2246 | if (vports[i]->load_flag & FC_UNLOADING) |
| 2219 | continue; | 2247 | continue; |
| 2220 | vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; | 2248 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
| 2221 | shost = lpfc_shost_from_vport(vports[i]); | 2249 | shost = lpfc_shost_from_vport(vports[i]); |
| 2222 | list_for_each_entry_safe(ndlp, next_ndlp, | 2250 | list_for_each_entry_safe(ndlp, next_ndlp, |
| 2223 | &vports[i]->fc_nodes, | 2251 | &vports[i]->fc_nodes, |
| @@ -2308,6 +2336,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) | |||
| 2308 | 2336 | ||
| 2309 | spin_lock_irq(&phba->hbalock); | 2337 | spin_lock_irq(&phba->hbalock); |
| 2310 | /* Release all the lpfc_scsi_bufs maintained by this host. */ | 2338 | /* Release all the lpfc_scsi_bufs maintained by this host. */ |
| 2339 | spin_lock(&phba->scsi_buf_list_lock); | ||
| 2311 | list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { | 2340 | list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { |
| 2312 | list_del(&sb->list); | 2341 | list_del(&sb->list); |
| 2313 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, | 2342 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, |
| @@ -2315,6 +2344,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) | |||
| 2315 | kfree(sb); | 2344 | kfree(sb); |
| 2316 | phba->total_scsi_bufs--; | 2345 | phba->total_scsi_bufs--; |
| 2317 | } | 2346 | } |
| 2347 | spin_unlock(&phba->scsi_buf_list_lock); | ||
| 2318 | 2348 | ||
| 2319 | /* Release all the lpfc_iocbq entries maintained by this host. */ | 2349 | /* Release all the lpfc_iocbq entries maintained by this host. */ |
| 2320 | list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { | 2350 | list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { |
| @@ -2322,9 +2352,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) | |||
| 2322 | kfree(io); | 2352 | kfree(io); |
| 2323 | phba->total_iocbq_bufs--; | 2353 | phba->total_iocbq_bufs--; |
| 2324 | } | 2354 | } |
| 2325 | |||
| 2326 | spin_unlock_irq(&phba->hbalock); | 2355 | spin_unlock_irq(&phba->hbalock); |
| 2327 | |||
| 2328 | return 0; | 2356 | return 0; |
| 2329 | } | 2357 | } |
| 2330 | 2358 | ||
| @@ -2408,7 +2436,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
| 2408 | vport->els_tmofunc.function = lpfc_els_timeout; | 2436 | vport->els_tmofunc.function = lpfc_els_timeout; |
| 2409 | vport->els_tmofunc.data = (unsigned long)vport; | 2437 | vport->els_tmofunc.data = (unsigned long)vport; |
| 2410 | 2438 | ||
| 2411 | error = scsi_add_host(shost, dev); | 2439 | error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); |
| 2412 | if (error) | 2440 | if (error) |
| 2413 | goto out_put_shost; | 2441 | goto out_put_shost; |
| 2414 | 2442 | ||
| @@ -2699,6 +2727,63 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) | |||
| 2699 | } | 2727 | } |
| 2700 | 2728 | ||
| 2701 | /** | 2729 | /** |
| 2730 | * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support | ||
| 2731 | * @phba: pointer to lpfc hba data structure. | ||
| 2732 | * | ||
| 2733 | * This function uses the QUERY_FW_CFG mailbox command to determine if the | ||
| 2734 | * firmware loaded supports FCoE. A return of zero indicates that the mailbox | ||
| 2735 | * was successful and the firmware supports FCoE. Any other return indicates | ||
| 2736 | * a error. It is assumed that this function will be called before interrupts | ||
| 2737 | * are enabled. | ||
| 2738 | **/ | ||
| 2739 | static int | ||
| 2740 | lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) | ||
| 2741 | { | ||
| 2742 | int rc = 0; | ||
| 2743 | LPFC_MBOXQ_t *mboxq; | ||
| 2744 | struct lpfc_mbx_query_fw_cfg *query_fw_cfg; | ||
| 2745 | uint32_t length; | ||
| 2746 | uint32_t shdr_status, shdr_add_status; | ||
| 2747 | |||
| 2748 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
| 2749 | if (!mboxq) { | ||
| 2750 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 2751 | "2621 Failed to allocate mbox for " | ||
| 2752 | "query firmware config cmd\n"); | ||
| 2753 | return -ENOMEM; | ||
| 2754 | } | ||
| 2755 | query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; | ||
| 2756 | length = (sizeof(struct lpfc_mbx_query_fw_cfg) - | ||
| 2757 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
| 2758 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
| 2759 | LPFC_MBOX_OPCODE_QUERY_FW_CFG, | ||
| 2760 | length, LPFC_SLI4_MBX_EMBED); | ||
| 2761 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
| 2762 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
| 2763 | shdr_status = bf_get(lpfc_mbox_hdr_status, | ||
| 2764 | &query_fw_cfg->header.cfg_shdr.response); | ||
| 2765 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, | ||
| 2766 | &query_fw_cfg->header.cfg_shdr.response); | ||
| 2767 | if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { | ||
| 2768 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 2769 | "2622 Query Firmware Config failed " | ||
| 2770 | "mbx status x%x, status x%x add_status x%x\n", | ||
| 2771 | rc, shdr_status, shdr_add_status); | ||
| 2772 | return -EINVAL; | ||
| 2773 | } | ||
| 2774 | if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { | ||
| 2775 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 2776 | "2623 FCoE Function not supported by firmware. " | ||
| 2777 | "Function mode = %08x\n", | ||
| 2778 | query_fw_cfg->function_mode); | ||
| 2779 | return -EINVAL; | ||
| 2780 | } | ||
| 2781 | if (rc != MBX_TIMEOUT) | ||
| 2782 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
| 2783 | return 0; | ||
| 2784 | } | ||
| 2785 | |||
| 2786 | /** | ||
| 2702 | * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code | 2787 | * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code |
| 2703 | * @phba: pointer to lpfc hba data structure. | 2788 | * @phba: pointer to lpfc hba data structure. |
| 2704 | * @acqe_link: pointer to the async link completion queue entry. | 2789 | * @acqe_link: pointer to the async link completion queue entry. |
| @@ -2918,13 +3003,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 2918 | { | 3003 | { |
| 2919 | uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); | 3004 | uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); |
| 2920 | int rc; | 3005 | int rc; |
| 3006 | struct lpfc_vport *vport; | ||
| 3007 | struct lpfc_nodelist *ndlp; | ||
| 3008 | struct Scsi_Host *shost; | ||
| 2921 | 3009 | ||
| 3010 | phba->fc_eventTag = acqe_fcoe->event_tag; | ||
| 2922 | phba->fcoe_eventtag = acqe_fcoe->event_tag; | 3011 | phba->fcoe_eventtag = acqe_fcoe->event_tag; |
| 2923 | switch (event_type) { | 3012 | switch (event_type) { |
| 2924 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: | 3013 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: |
| 2925 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 3014 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
| 2926 | "2546 New FCF found index 0x%x tag 0x%x\n", | 3015 | "2546 New FCF found index 0x%x tag 0x%x\n", |
| 2927 | acqe_fcoe->fcf_index, | 3016 | acqe_fcoe->index, |
| 2928 | acqe_fcoe->event_tag); | 3017 | acqe_fcoe->event_tag); |
| 2929 | /* | 3018 | /* |
| 2930 | * If the current FCF is in discovered state, or | 3019 | * If the current FCF is in discovered state, or |
| @@ -2939,12 +3028,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 2939 | spin_unlock_irq(&phba->hbalock); | 3028 | spin_unlock_irq(&phba->hbalock); |
| 2940 | 3029 | ||
| 2941 | /* Read the FCF table and re-discover SAN. */ | 3030 | /* Read the FCF table and re-discover SAN. */ |
| 2942 | rc = lpfc_sli4_read_fcf_record(phba, | 3031 | rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); |
| 2943 | LPFC_FCOE_FCF_GET_FIRST); | ||
| 2944 | if (rc) | 3032 | if (rc) |
| 2945 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 3033 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
| 2946 | "2547 Read FCF record failed 0x%x\n", | 3034 | "2547 Read FCF record failed 0x%x\n", |
| 2947 | rc); | 3035 | rc); |
| 2948 | break; | 3036 | break; |
| 2949 | 3037 | ||
| 2950 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: | 3038 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: |
| @@ -2956,11 +3044,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 2956 | 3044 | ||
| 2957 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: | 3045 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: |
| 2958 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 3046 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
| 2959 | "2549 FCF disconnected fron network index 0x%x" | 3047 | "2549 FCF disconnected from network index 0x%x" |
| 2960 | " tag 0x%x\n", acqe_fcoe->fcf_index, | 3048 | " tag 0x%x\n", acqe_fcoe->index, |
| 2961 | acqe_fcoe->event_tag); | 3049 | acqe_fcoe->event_tag); |
| 2962 | /* If the event is not for currently used fcf do nothing */ | 3050 | /* If the event is not for currently used fcf do nothing */ |
| 2963 | if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) | 3051 | if (phba->fcf.fcf_indx != acqe_fcoe->index) |
| 2964 | break; | 3052 | break; |
| 2965 | /* | 3053 | /* |
| 2966 | * Currently, driver support only one FCF - so treat this as | 3054 | * Currently, driver support only one FCF - so treat this as |
| @@ -2970,7 +3058,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 2970 | /* Unregister FCF if no devices connected to it */ | 3058 | /* Unregister FCF if no devices connected to it */ |
| 2971 | lpfc_unregister_unused_fcf(phba); | 3059 | lpfc_unregister_unused_fcf(phba); |
| 2972 | break; | 3060 | break; |
| 2973 | 3061 | case LPFC_FCOE_EVENT_TYPE_CVL: | |
| 3062 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
| 3063 | "2718 Clear Virtual Link Received for VPI 0x%x" | ||
| 3064 | " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); | ||
| 3065 | vport = lpfc_find_vport_by_vpid(phba, | ||
| 3066 | acqe_fcoe->index - phba->vpi_base); | ||
| 3067 | if (!vport) | ||
| 3068 | break; | ||
| 3069 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | ||
| 3070 | if (!ndlp) | ||
| 3071 | break; | ||
| 3072 | shost = lpfc_shost_from_vport(vport); | ||
| 3073 | lpfc_linkdown_port(vport); | ||
| 3074 | if (vport->port_type != LPFC_NPIV_PORT) { | ||
| 3075 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); | ||
| 3076 | spin_lock_irq(shost->host_lock); | ||
| 3077 | ndlp->nlp_flag |= NLP_DELAY_TMO; | ||
| 3078 | spin_unlock_irq(shost->host_lock); | ||
| 3079 | ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; | ||
| 3080 | vport->port_state = LPFC_FLOGI; | ||
| 3081 | } | ||
| 3082 | break; | ||
| 2974 | default: | 3083 | default: |
| 2975 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3084 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 2976 | "0288 Unknown FCoE event type 0x%x event tag " | 3085 | "0288 Unknown FCoE event type 0x%x event tag " |
| @@ -2990,6 +3099,7 @@ static void | |||
| 2990 | lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, | 3099 | lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, |
| 2991 | struct lpfc_acqe_dcbx *acqe_dcbx) | 3100 | struct lpfc_acqe_dcbx *acqe_dcbx) |
| 2992 | { | 3101 | { |
| 3102 | phba->fc_eventTag = acqe_dcbx->event_tag; | ||
| 2993 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3103 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 2994 | "0290 The SLI4 DCBX asynchronous event is not " | 3104 | "0290 The SLI4 DCBX asynchronous event is not " |
| 2995 | "handled yet\n"); | 3105 | "handled yet\n"); |
| @@ -3124,7 +3234,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) | |||
| 3124 | * PCI devices. | 3234 | * PCI devices. |
| 3125 | * | 3235 | * |
| 3126 | * Return codes | 3236 | * Return codes |
| 3127 | * 0 - sucessful | 3237 | * 0 - successful |
| 3128 | * other values - error | 3238 | * other values - error |
| 3129 | **/ | 3239 | **/ |
| 3130 | static int | 3240 | static int |
| @@ -3220,7 +3330,7 @@ lpfc_reset_hba(struct lpfc_hba *phba) | |||
| 3220 | * support the SLI-3 HBA device it attached to. | 3330 | * support the SLI-3 HBA device it attached to. |
| 3221 | * | 3331 | * |
| 3222 | * Return codes | 3332 | * Return codes |
| 3223 | * 0 - sucessful | 3333 | * 0 - successful |
| 3224 | * other values - error | 3334 | * other values - error |
| 3225 | **/ | 3335 | **/ |
| 3226 | static int | 3336 | static int |
| @@ -3321,7 +3431,7 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) | |||
| 3321 | * support the SLI-4 HBA device it attached to. | 3431 | * support the SLI-4 HBA device it attached to. |
| 3322 | * | 3432 | * |
| 3323 | * Return codes | 3433 | * Return codes |
| 3324 | * 0 - sucessful | 3434 | * 0 - successful |
| 3325 | * other values - error | 3435 | * other values - error |
| 3326 | **/ | 3436 | **/ |
| 3327 | static int | 3437 | static int |
| @@ -3432,7 +3542,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
| 3432 | /* Driver internel slow-path CQ Event pool */ | 3542 | /* Driver internel slow-path CQ Event pool */ |
| 3433 | INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); | 3543 | INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); |
| 3434 | /* Response IOCB work queue list */ | 3544 | /* Response IOCB work queue list */ |
| 3435 | INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); | 3545 | INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); |
| 3436 | /* Asynchronous event CQ Event work queue list */ | 3546 | /* Asynchronous event CQ Event work queue list */ |
| 3437 | INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); | 3547 | INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); |
| 3438 | /* Fast-path XRI aborted CQ Event work queue list */ | 3548 | /* Fast-path XRI aborted CQ Event work queue list */ |
| @@ -3461,6 +3571,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
| 3461 | if (unlikely(rc)) | 3571 | if (unlikely(rc)) |
| 3462 | goto out_free_bsmbx; | 3572 | goto out_free_bsmbx; |
| 3463 | 3573 | ||
| 3574 | rc = lpfc_sli4_fw_cfg_check(phba); | ||
| 3575 | if (unlikely(rc)) | ||
| 3576 | goto out_free_bsmbx; | ||
| 3577 | |||
| 3464 | /* Set up the hba's configuration parameters. */ | 3578 | /* Set up the hba's configuration parameters. */ |
| 3465 | rc = lpfc_sli4_read_config(phba); | 3579 | rc = lpfc_sli4_read_config(phba); |
| 3466 | if (unlikely(rc)) | 3580 | if (unlikely(rc)) |
| @@ -3594,8 +3708,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) | |||
| 3594 | 3708 | ||
| 3595 | /* Free the current connect table */ | 3709 | /* Free the current connect table */ |
| 3596 | list_for_each_entry_safe(conn_entry, next_conn_entry, | 3710 | list_for_each_entry_safe(conn_entry, next_conn_entry, |
| 3597 | &phba->fcf_conn_rec_list, list) | 3711 | &phba->fcf_conn_rec_list, list) { |
| 3712 | list_del_init(&conn_entry->list); | ||
| 3598 | kfree(conn_entry); | 3713 | kfree(conn_entry); |
| 3714 | } | ||
| 3599 | 3715 | ||
| 3600 | return; | 3716 | return; |
| 3601 | } | 3717 | } |
| @@ -3642,7 +3758,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
| 3642 | * device specific resource setup to support the HBA device it attached to. | 3758 | * device specific resource setup to support the HBA device it attached to. |
| 3643 | * | 3759 | * |
| 3644 | * Return codes | 3760 | * Return codes |
| 3645 | * 0 - sucessful | 3761 | * 0 - successful |
| 3646 | * other values - error | 3762 | * other values - error |
| 3647 | **/ | 3763 | **/ |
| 3648 | static int | 3764 | static int |
| @@ -3688,7 +3804,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) | |||
| 3688 | * device specific resource setup to support the HBA device it attached to. | 3804 | * device specific resource setup to support the HBA device it attached to. |
| 3689 | * | 3805 | * |
| 3690 | * Return codes | 3806 | * Return codes |
| 3691 | * 0 - sucessful | 3807 | * 0 - successful |
| 3692 | * other values - error | 3808 | * other values - error |
| 3693 | **/ | 3809 | **/ |
| 3694 | static int | 3810 | static int |
| @@ -3753,7 +3869,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba) | |||
| 3753 | * list and set up the IOCB tag array accordingly. | 3869 | * list and set up the IOCB tag array accordingly. |
| 3754 | * | 3870 | * |
| 3755 | * Return codes | 3871 | * Return codes |
| 3756 | * 0 - sucessful | 3872 | * 0 - successful |
| 3757 | * other values - error | 3873 | * other values - error |
| 3758 | **/ | 3874 | **/ |
| 3759 | static int | 3875 | static int |
| @@ -3824,7 +3940,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba) | |||
| 3824 | rc = lpfc_sli4_remove_all_sgl_pages(phba); | 3940 | rc = lpfc_sli4_remove_all_sgl_pages(phba); |
| 3825 | if (rc) { | 3941 | if (rc) { |
| 3826 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3942 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 3827 | "2005 Unable to deregister pages from HBA: %x", rc); | 3943 | "2005 Unable to deregister pages from HBA: %x\n", rc); |
| 3828 | } | 3944 | } |
| 3829 | kfree(phba->sli4_hba.lpfc_els_sgl_array); | 3945 | kfree(phba->sli4_hba.lpfc_els_sgl_array); |
| 3830 | } | 3946 | } |
| @@ -3872,7 +3988,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba) | |||
| 3872 | * list and set up the sgl xritag tag array accordingly. | 3988 | * list and set up the sgl xritag tag array accordingly. |
| 3873 | * | 3989 | * |
| 3874 | * Return codes | 3990 | * Return codes |
| 3875 | * 0 - sucessful | 3991 | * 0 - successful |
| 3876 | * other values - error | 3992 | * other values - error |
| 3877 | **/ | 3993 | **/ |
| 3878 | static int | 3994 | static int |
| @@ -3986,7 +4102,7 @@ out_free_mem: | |||
| 3986 | * enabled and the driver is reinitializing the device. | 4102 | * enabled and the driver is reinitializing the device. |
| 3987 | * | 4103 | * |
| 3988 | * Return codes | 4104 | * Return codes |
| 3989 | * 0 - sucessful | 4105 | * 0 - successful |
| 3990 | * ENOMEM - No availble memory | 4106 | * ENOMEM - No availble memory |
| 3991 | * EIO - The mailbox failed to complete successfully. | 4107 | * EIO - The mailbox failed to complete successfully. |
| 3992 | **/ | 4108 | **/ |
| @@ -4146,7 +4262,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) | |||
| 4146 | * PCI device data structure is set. | 4262 | * PCI device data structure is set. |
| 4147 | * | 4263 | * |
| 4148 | * Return codes | 4264 | * Return codes |
| 4149 | * pointer to @phba - sucessful | 4265 | * pointer to @phba - successful |
| 4150 | * NULL - error | 4266 | * NULL - error |
| 4151 | **/ | 4267 | **/ |
| 4152 | static struct lpfc_hba * | 4268 | static struct lpfc_hba * |
| @@ -4202,7 +4318,7 @@ lpfc_hba_free(struct lpfc_hba *phba) | |||
| 4202 | * host with it. | 4318 | * host with it. |
| 4203 | * | 4319 | * |
| 4204 | * Return codes | 4320 | * Return codes |
| 4205 | * 0 - sucessful | 4321 | * 0 - successful |
| 4206 | * other values - error | 4322 | * other values - error |
| 4207 | **/ | 4323 | **/ |
| 4208 | static int | 4324 | static int |
| @@ -4273,7 +4389,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) | |||
| 4273 | _dump_buf_data = | 4389 | _dump_buf_data = |
| 4274 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | 4390 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); |
| 4275 | if (_dump_buf_data) { | 4391 | if (_dump_buf_data) { |
| 4276 | printk(KERN_ERR "BLKGRD allocated %d pages for " | 4392 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4393 | "9043 BLKGRD: allocated %d pages for " | ||
| 4277 | "_dump_buf_data at 0x%p\n", | 4394 | "_dump_buf_data at 0x%p\n", |
| 4278 | (1 << pagecnt), _dump_buf_data); | 4395 | (1 << pagecnt), _dump_buf_data); |
| 4279 | _dump_buf_data_order = pagecnt; | 4396 | _dump_buf_data_order = pagecnt; |
| @@ -4284,17 +4401,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) | |||
| 4284 | --pagecnt; | 4401 | --pagecnt; |
| 4285 | } | 4402 | } |
| 4286 | if (!_dump_buf_data_order) | 4403 | if (!_dump_buf_data_order) |
| 4287 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | 4404 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4405 | "9044 BLKGRD: ERROR unable to allocate " | ||
| 4288 | "memory for hexdump\n"); | 4406 | "memory for hexdump\n"); |
| 4289 | } else | 4407 | } else |
| 4290 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" | 4408 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4409 | "9045 BLKGRD: already allocated _dump_buf_data=0x%p" | ||
| 4291 | "\n", _dump_buf_data); | 4410 | "\n", _dump_buf_data); |
| 4292 | if (!_dump_buf_dif) { | 4411 | if (!_dump_buf_dif) { |
| 4293 | while (pagecnt) { | 4412 | while (pagecnt) { |
| 4294 | _dump_buf_dif = | 4413 | _dump_buf_dif = |
| 4295 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | 4414 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); |
| 4296 | if (_dump_buf_dif) { | 4415 | if (_dump_buf_dif) { |
| 4297 | printk(KERN_ERR "BLKGRD allocated %d pages for " | 4416 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4417 | "9046 BLKGRD: allocated %d pages for " | ||
| 4298 | "_dump_buf_dif at 0x%p\n", | 4418 | "_dump_buf_dif at 0x%p\n", |
| 4299 | (1 << pagecnt), _dump_buf_dif); | 4419 | (1 << pagecnt), _dump_buf_dif); |
| 4300 | _dump_buf_dif_order = pagecnt; | 4420 | _dump_buf_dif_order = pagecnt; |
| @@ -4305,10 +4425,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) | |||
| 4305 | --pagecnt; | 4425 | --pagecnt; |
| 4306 | } | 4426 | } |
| 4307 | if (!_dump_buf_dif_order) | 4427 | if (!_dump_buf_dif_order) |
| 4308 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | 4428 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4429 | "9047 BLKGRD: ERROR unable to allocate " | ||
| 4309 | "memory for hexdump\n"); | 4430 | "memory for hexdump\n"); |
| 4310 | } else | 4431 | } else |
| 4311 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", | 4432 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 4433 | "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", | ||
| 4312 | _dump_buf_dif); | 4434 | _dump_buf_dif); |
| 4313 | } | 4435 | } |
| 4314 | 4436 | ||
| @@ -4365,7 +4487,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba) | |||
| 4365 | * with SLI-3 interface spec. | 4487 | * with SLI-3 interface spec. |
| 4366 | * | 4488 | * |
| 4367 | * Return codes | 4489 | * Return codes |
| 4368 | * 0 - sucessful | 4490 | * 0 - successful |
| 4369 | * other values - error | 4491 | * other values - error |
| 4370 | **/ | 4492 | **/ |
| 4371 | static int | 4493 | static int |
| @@ -4512,7 +4634,6 @@ int | |||
| 4512 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) | 4634 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) |
| 4513 | { | 4635 | { |
| 4514 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; | 4636 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; |
| 4515 | uint32_t onlnreg0, onlnreg1; | ||
| 4516 | int i, port_error = -ENODEV; | 4637 | int i, port_error = -ENODEV; |
| 4517 | 4638 | ||
| 4518 | if (!phba->sli4_hba.STAregaddr) | 4639 | if (!phba->sli4_hba.STAregaddr) |
| @@ -4556,21 +4677,20 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
| 4556 | bf_get(lpfc_scratchpad_slirev, &scratchpad), | 4677 | bf_get(lpfc_scratchpad_slirev, &scratchpad), |
| 4557 | bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), | 4678 | bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), |
| 4558 | bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); | 4679 | bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); |
| 4559 | 4680 | phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); | |
| 4681 | phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); | ||
| 4560 | /* With uncoverable error, log the error message and return error */ | 4682 | /* With uncoverable error, log the error message and return error */ |
| 4561 | onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); | 4683 | uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); |
| 4562 | onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); | 4684 | uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); |
| 4563 | if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { | 4685 | if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || |
| 4564 | uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); | 4686 | (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { |
| 4565 | uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); | 4687 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 4566 | if (uerrlo_reg.word0 || uerrhi_reg.word0) { | 4688 | "1422 HBA Unrecoverable error: " |
| 4567 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 4689 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " |
| 4568 | "1422 HBA Unrecoverable error: " | 4690 | "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", |
| 4569 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | 4691 | uerrlo_reg.word0, uerrhi_reg.word0, |
| 4570 | "online0_reg=0x%x, online1_reg=0x%x\n", | 4692 | phba->sli4_hba.ue_mask_lo, |
| 4571 | uerrlo_reg.word0, uerrhi_reg.word0, | 4693 | phba->sli4_hba.ue_mask_hi); |
| 4572 | onlnreg0, onlnreg1); | ||
| 4573 | } | ||
| 4574 | return -ENODEV; | 4694 | return -ENODEV; |
| 4575 | } | 4695 | } |
| 4576 | 4696 | ||
| @@ -4591,10 +4711,10 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) | |||
| 4591 | LPFC_UERR_STATUS_LO; | 4711 | LPFC_UERR_STATUS_LO; |
| 4592 | phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + | 4712 | phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + |
| 4593 | LPFC_UERR_STATUS_HI; | 4713 | LPFC_UERR_STATUS_HI; |
| 4594 | phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + | 4714 | phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + |
| 4595 | LPFC_ONLINE0; | 4715 | LPFC_UE_MASK_LO; |
| 4596 | phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + | 4716 | phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + |
| 4597 | LPFC_ONLINE1; | 4717 | LPFC_UE_MASK_HI; |
| 4598 | phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + | 4718 | phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + |
| 4599 | LPFC_SCRATCHPAD; | 4719 | LPFC_SCRATCHPAD; |
| 4600 | } | 4720 | } |
| @@ -4662,7 +4782,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) | |||
| 4662 | * this routine. | 4782 | * this routine. |
| 4663 | * | 4783 | * |
| 4664 | * Return codes | 4784 | * Return codes |
| 4665 | * 0 - sucessful | 4785 | * 0 - successful |
| 4666 | * ENOMEM - could not allocated memory. | 4786 | * ENOMEM - could not allocated memory. |
| 4667 | **/ | 4787 | **/ |
| 4668 | static int | 4788 | static int |
| @@ -4761,7 +4881,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) | |||
| 4761 | * allocation for the port. | 4881 | * allocation for the port. |
| 4762 | * | 4882 | * |
| 4763 | * Return codes | 4883 | * Return codes |
| 4764 | * 0 - sucessful | 4884 | * 0 - successful |
| 4765 | * ENOMEM - No availble memory | 4885 | * ENOMEM - No availble memory |
| 4766 | * EIO - The mailbox failed to complete successfully. | 4886 | * EIO - The mailbox failed to complete successfully. |
| 4767 | **/ | 4887 | **/ |
| @@ -4825,7 +4945,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
| 4825 | phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; | 4945 | phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; |
| 4826 | phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; | 4946 | phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; |
| 4827 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; | 4947 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; |
| 4828 | phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; | 4948 | phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? |
| 4949 | (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; | ||
| 4829 | phba->max_vports = phba->max_vpi; | 4950 | phba->max_vports = phba->max_vpi; |
| 4830 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 4951 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 4831 | "2003 cfg params XRI(B:%d M:%d), " | 4952 | "2003 cfg params XRI(B:%d M:%d), " |
| @@ -4861,7 +4982,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
| 4861 | * HBA consistent with the SLI-4 interface spec. | 4982 | * HBA consistent with the SLI-4 interface spec. |
| 4862 | * | 4983 | * |
| 4863 | * Return codes | 4984 | * Return codes |
| 4864 | * 0 - sucessful | 4985 | * 0 - successful |
| 4865 | * ENOMEM - No availble memory | 4986 | * ENOMEM - No availble memory |
| 4866 | * EIO - The mailbox failed to complete successfully. | 4987 | * EIO - The mailbox failed to complete successfully. |
| 4867 | **/ | 4988 | **/ |
| @@ -4910,7 +5031,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) | |||
| 4910 | * we just use some constant number as place holder. | 5031 | * we just use some constant number as place holder. |
| 4911 | * | 5032 | * |
| 4912 | * Return codes | 5033 | * Return codes |
| 4913 | * 0 - sucessful | 5034 | * 0 - successful |
| 4914 | * ENOMEM - No availble memory | 5035 | * ENOMEM - No availble memory |
| 4915 | * EIO - The mailbox failed to complete successfully. | 5036 | * EIO - The mailbox failed to complete successfully. |
| 4916 | **/ | 5037 | **/ |
| @@ -4979,10 +5100,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
| 4979 | /* It does not make sense to have more EQs than WQs */ | 5100 | /* It does not make sense to have more EQs than WQs */ |
| 4980 | if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { | 5101 | if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { |
| 4981 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 5102 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 4982 | "2593 The number of FCP EQs (%d) is more " | 5103 | "2593 The FCP EQ count(%d) cannot be greater " |
| 4983 | "than the number of FCP WQs (%d), take " | 5104 | "than the FCP WQ count(%d), limiting the " |
| 4984 | "the number of FCP EQs same as than of " | 5105 | "FCP EQ count to %d\n", cfg_fcp_eq_count, |
| 4985 | "WQs (%d)\n", cfg_fcp_eq_count, | ||
| 4986 | phba->cfg_fcp_wq_count, | 5106 | phba->cfg_fcp_wq_count, |
| 4987 | phba->cfg_fcp_wq_count); | 5107 | phba->cfg_fcp_wq_count); |
| 4988 | cfg_fcp_eq_count = phba->cfg_fcp_wq_count; | 5108 | cfg_fcp_eq_count = phba->cfg_fcp_wq_count; |
| @@ -5058,15 +5178,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
| 5058 | } | 5178 | } |
| 5059 | phba->sli4_hba.els_cq = qdesc; | 5179 | phba->sli4_hba.els_cq = qdesc; |
| 5060 | 5180 | ||
| 5061 | /* Create slow-path Unsolicited Receive Complete Queue */ | ||
| 5062 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | ||
| 5063 | phba->sli4_hba.cq_ecount); | ||
| 5064 | if (!qdesc) { | ||
| 5065 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 5066 | "0502 Failed allocate slow-path USOL RX CQ\n"); | ||
| 5067 | goto out_free_els_cq; | ||
| 5068 | } | ||
| 5069 | phba->sli4_hba.rxq_cq = qdesc; | ||
| 5070 | 5181 | ||
| 5071 | /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ | 5182 | /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ |
| 5072 | phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * | 5183 | phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * |
| @@ -5075,7 +5186,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
| 5075 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5186 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 5076 | "2577 Failed allocate memory for fast-path " | 5187 | "2577 Failed allocate memory for fast-path " |
| 5077 | "CQ record array\n"); | 5188 | "CQ record array\n"); |
| 5078 | goto out_free_rxq_cq; | 5189 | goto out_free_els_cq; |
| 5079 | } | 5190 | } |
| 5080 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { | 5191 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { |
| 5081 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | 5192 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, |
| @@ -5188,9 +5299,6 @@ out_free_fcp_cq: | |||
| 5188 | phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; | 5299 | phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; |
| 5189 | } | 5300 | } |
| 5190 | kfree(phba->sli4_hba.fcp_cq); | 5301 | kfree(phba->sli4_hba.fcp_cq); |
| 5191 | out_free_rxq_cq: | ||
| 5192 | lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); | ||
| 5193 | phba->sli4_hba.rxq_cq = NULL; | ||
| 5194 | out_free_els_cq: | 5302 | out_free_els_cq: |
| 5195 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); | 5303 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); |
| 5196 | phba->sli4_hba.els_cq = NULL; | 5304 | phba->sli4_hba.els_cq = NULL; |
| @@ -5218,7 +5326,7 @@ out_error: | |||
| 5218 | * operation. | 5326 | * operation. |
| 5219 | * | 5327 | * |
| 5220 | * Return codes | 5328 | * Return codes |
| 5221 | * 0 - sucessful | 5329 | * 0 - successful |
| 5222 | * ENOMEM - No availble memory | 5330 | * ENOMEM - No availble memory |
| 5223 | * EIO - The mailbox failed to complete successfully. | 5331 | * EIO - The mailbox failed to complete successfully. |
| 5224 | **/ | 5332 | **/ |
| @@ -5247,10 +5355,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
| 5247 | lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); | 5355 | lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); |
| 5248 | phba->sli4_hba.dat_rq = NULL; | 5356 | phba->sli4_hba.dat_rq = NULL; |
| 5249 | 5357 | ||
| 5250 | /* Release unsolicited receive complete queue */ | ||
| 5251 | lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); | ||
| 5252 | phba->sli4_hba.rxq_cq = NULL; | ||
| 5253 | |||
| 5254 | /* Release ELS complete queue */ | 5358 | /* Release ELS complete queue */ |
| 5255 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); | 5359 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); |
| 5256 | phba->sli4_hba.els_cq = NULL; | 5360 | phba->sli4_hba.els_cq = NULL; |
| @@ -5286,7 +5390,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
| 5286 | * operation. | 5390 | * operation. |
| 5287 | * | 5391 | * |
| 5288 | * Return codes | 5392 | * Return codes |
| 5289 | * 0 - sucessful | 5393 | * 0 - successful |
| 5290 | * ENOMEM - No availble memory | 5394 | * ENOMEM - No availble memory |
| 5291 | * EIO - The mailbox failed to complete successfully. | 5395 | * EIO - The mailbox failed to complete successfully. |
| 5292 | **/ | 5396 | **/ |
| @@ -5383,25 +5487,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
| 5383 | phba->sli4_hba.els_cq->queue_id, | 5487 | phba->sli4_hba.els_cq->queue_id, |
| 5384 | phba->sli4_hba.sp_eq->queue_id); | 5488 | phba->sli4_hba.sp_eq->queue_id); |
| 5385 | 5489 | ||
| 5386 | /* Set up slow-path Unsolicited Receive Complete Queue */ | ||
| 5387 | if (!phba->sli4_hba.rxq_cq) { | ||
| 5388 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 5389 | "0532 USOL RX CQ not allocated\n"); | ||
| 5390 | goto out_destroy_els_cq; | ||
| 5391 | } | ||
| 5392 | rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, | ||
| 5393 | LPFC_RCQ, LPFC_USOL); | ||
| 5394 | if (rc) { | ||
| 5395 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 5396 | "0533 Failed setup of slow-path USOL RX CQ: " | ||
| 5397 | "rc = 0x%x\n", rc); | ||
| 5398 | goto out_destroy_els_cq; | ||
| 5399 | } | ||
| 5400 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
| 5401 | "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", | ||
| 5402 | phba->sli4_hba.rxq_cq->queue_id, | ||
| 5403 | phba->sli4_hba.sp_eq->queue_id); | ||
| 5404 | |||
| 5405 | /* Set up fast-path FCP Response Complete Queue */ | 5490 | /* Set up fast-path FCP Response Complete Queue */ |
| 5406 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { | 5491 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { |
| 5407 | if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { | 5492 | if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { |
| @@ -5507,7 +5592,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
| 5507 | goto out_destroy_fcp_wq; | 5592 | goto out_destroy_fcp_wq; |
| 5508 | } | 5593 | } |
| 5509 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, | 5594 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, |
| 5510 | phba->sli4_hba.rxq_cq, LPFC_USOL); | 5595 | phba->sli4_hba.els_cq, LPFC_USOL); |
| 5511 | if (rc) { | 5596 | if (rc) { |
| 5512 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5597 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 5513 | "0541 Failed setup of Receive Queue: " | 5598 | "0541 Failed setup of Receive Queue: " |
| @@ -5519,7 +5604,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
| 5519 | "parent cq-id=%d\n", | 5604 | "parent cq-id=%d\n", |
| 5520 | phba->sli4_hba.hdr_rq->queue_id, | 5605 | phba->sli4_hba.hdr_rq->queue_id, |
| 5521 | phba->sli4_hba.dat_rq->queue_id, | 5606 | phba->sli4_hba.dat_rq->queue_id, |
| 5522 | phba->sli4_hba.rxq_cq->queue_id); | 5607 | phba->sli4_hba.els_cq->queue_id); |
| 5523 | return 0; | 5608 | return 0; |
| 5524 | 5609 | ||
| 5525 | out_destroy_fcp_wq: | 5610 | out_destroy_fcp_wq: |
| @@ -5531,8 +5616,6 @@ out_destroy_mbx_wq: | |||
| 5531 | out_destroy_fcp_cq: | 5616 | out_destroy_fcp_cq: |
| 5532 | for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) | 5617 | for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) |
| 5533 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); | 5618 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); |
| 5534 | lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); | ||
| 5535 | out_destroy_els_cq: | ||
| 5536 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); | 5619 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); |
| 5537 | out_destroy_mbx_cq: | 5620 | out_destroy_mbx_cq: |
| 5538 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); | 5621 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); |
| @@ -5552,7 +5635,7 @@ out_error: | |||
| 5552 | * operation. | 5635 | * operation. |
| 5553 | * | 5636 | * |
| 5554 | * Return codes | 5637 | * Return codes |
| 5555 | * 0 - sucessful | 5638 | * 0 - successful |
| 5556 | * ENOMEM - No availble memory | 5639 | * ENOMEM - No availble memory |
| 5557 | * EIO - The mailbox failed to complete successfully. | 5640 | * EIO - The mailbox failed to complete successfully. |
| 5558 | **/ | 5641 | **/ |
| @@ -5574,8 +5657,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) | |||
| 5574 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); | 5657 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); |
| 5575 | /* Unset ELS complete queue */ | 5658 | /* Unset ELS complete queue */ |
| 5576 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); | 5659 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); |
| 5577 | /* Unset unsolicited receive complete queue */ | ||
| 5578 | lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); | ||
| 5579 | /* Unset FCP response complete queue */ | 5660 | /* Unset FCP response complete queue */ |
| 5580 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) | 5661 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) |
| 5581 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); | 5662 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); |
| @@ -5599,7 +5680,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) | |||
| 5599 | * Later, this can be used for all the slow-path events. | 5680 | * Later, this can be used for all the slow-path events. |
| 5600 | * | 5681 | * |
| 5601 | * Return codes | 5682 | * Return codes |
| 5602 | * 0 - sucessful | 5683 | * 0 - successful |
| 5603 | * -ENOMEM - No availble memory | 5684 | * -ENOMEM - No availble memory |
| 5604 | **/ | 5685 | **/ |
| 5605 | static int | 5686 | static int |
| @@ -5760,7 +5841,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) | |||
| 5760 | * all resources assigned to the PCI function which originates this request. | 5841 | * all resources assigned to the PCI function which originates this request. |
| 5761 | * | 5842 | * |
| 5762 | * Return codes | 5843 | * Return codes |
| 5763 | * 0 - sucessful | 5844 | * 0 - successful |
| 5764 | * ENOMEM - No availble memory | 5845 | * ENOMEM - No availble memory |
| 5765 | * EIO - The mailbox failed to complete successfully. | 5846 | * EIO - The mailbox failed to complete successfully. |
| 5766 | **/ | 5847 | **/ |
| @@ -5923,7 +6004,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) | |||
| 5923 | * with SLI-4 interface spec. | 6004 | * with SLI-4 interface spec. |
| 5924 | * | 6005 | * |
| 5925 | * Return codes | 6006 | * Return codes |
| 5926 | * 0 - sucessful | 6007 | * 0 - successful |
| 5927 | * other values - error | 6008 | * other values - error |
| 5928 | **/ | 6009 | **/ |
| 5929 | static int | 6010 | static int |
| @@ -6052,7 +6133,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) | |||
| 6052 | * will be left with MSI-X enabled and leaks its vectors. | 6133 | * will be left with MSI-X enabled and leaks its vectors. |
| 6053 | * | 6134 | * |
| 6054 | * Return codes | 6135 | * Return codes |
| 6055 | * 0 - sucessful | 6136 | * 0 - successful |
| 6056 | * other values - error | 6137 | * other values - error |
| 6057 | **/ | 6138 | **/ |
| 6058 | static int | 6139 | static int |
| @@ -6184,7 +6265,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba) | |||
| 6184 | * is done in this function. | 6265 | * is done in this function. |
| 6185 | * | 6266 | * |
| 6186 | * Return codes | 6267 | * Return codes |
| 6187 | * 0 - sucessful | 6268 | * 0 - successful |
| 6188 | * other values - error | 6269 | * other values - error |
| 6189 | */ | 6270 | */ |
| 6190 | static int | 6271 | static int |
| @@ -6243,7 +6324,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba) | |||
| 6243 | * MSI-X -> MSI -> IRQ. | 6324 | * MSI-X -> MSI -> IRQ. |
| 6244 | * | 6325 | * |
| 6245 | * Return codes | 6326 | * Return codes |
| 6246 | * 0 - sucessful | 6327 | * 0 - successful |
| 6247 | * other values - error | 6328 | * other values - error |
| 6248 | **/ | 6329 | **/ |
| 6249 | static uint32_t | 6330 | static uint32_t |
| @@ -6333,7 +6414,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) | |||
| 6333 | * enabled and leaks its vectors. | 6414 | * enabled and leaks its vectors. |
| 6334 | * | 6415 | * |
| 6335 | * Return codes | 6416 | * Return codes |
| 6336 | * 0 - sucessful | 6417 | * 0 - successful |
| 6337 | * other values - error | 6418 | * other values - error |
| 6338 | **/ | 6419 | **/ |
| 6339 | static int | 6420 | static int |
| @@ -6443,7 +6524,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) | |||
| 6443 | * which is done in this function. | 6524 | * which is done in this function. |
| 6444 | * | 6525 | * |
| 6445 | * Return codes | 6526 | * Return codes |
| 6446 | * 0 - sucessful | 6527 | * 0 - successful |
| 6447 | * other values - error | 6528 | * other values - error |
| 6448 | **/ | 6529 | **/ |
| 6449 | static int | 6530 | static int |
| @@ -6508,7 +6589,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba) | |||
| 6508 | * MSI-X -> MSI -> IRQ. | 6589 | * MSI-X -> MSI -> IRQ. |
| 6509 | * | 6590 | * |
| 6510 | * Return codes | 6591 | * Return codes |
| 6511 | * 0 - sucessful | 6592 | * 0 - successful |
| 6512 | * other values - error | 6593 | * other values - error |
| 6513 | **/ | 6594 | **/ |
| 6514 | static uint32_t | 6595 | static uint32_t |
| @@ -6722,6 +6803,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 6722 | { | 6803 | { |
| 6723 | struct lpfc_hba *phba; | 6804 | struct lpfc_hba *phba; |
| 6724 | struct lpfc_vport *vport = NULL; | 6805 | struct lpfc_vport *vport = NULL; |
| 6806 | struct Scsi_Host *shost = NULL; | ||
| 6725 | int error; | 6807 | int error; |
| 6726 | uint32_t cfg_mode, intr_mode; | 6808 | uint32_t cfg_mode, intr_mode; |
| 6727 | 6809 | ||
| @@ -6800,6 +6882,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 6800 | goto out_destroy_shost; | 6882 | goto out_destroy_shost; |
| 6801 | } | 6883 | } |
| 6802 | 6884 | ||
| 6885 | shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ | ||
| 6803 | /* Now, trying to enable interrupt and bring up the device */ | 6886 | /* Now, trying to enable interrupt and bring up the device */ |
| 6804 | cfg_mode = phba->cfg_use_msi; | 6887 | cfg_mode = phba->cfg_use_msi; |
| 6805 | while (true) { | 6888 | while (true) { |
| @@ -6866,6 +6949,8 @@ out_unset_pci_mem_s3: | |||
| 6866 | lpfc_sli_pci_mem_unset(phba); | 6949 | lpfc_sli_pci_mem_unset(phba); |
| 6867 | out_disable_pci_dev: | 6950 | out_disable_pci_dev: |
| 6868 | lpfc_disable_pci_dev(phba); | 6951 | lpfc_disable_pci_dev(phba); |
| 6952 | if (shost) | ||
| 6953 | scsi_host_put(shost); | ||
| 6869 | out_free_phba: | 6954 | out_free_phba: |
| 6870 | lpfc_hba_free(phba); | 6955 | lpfc_hba_free(phba); |
| 6871 | return error; | 6956 | return error; |
| @@ -7036,6 +7121,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) | |||
| 7036 | /* Restore device state from PCI config space */ | 7121 | /* Restore device state from PCI config space */ |
| 7037 | pci_set_power_state(pdev, PCI_D0); | 7122 | pci_set_power_state(pdev, PCI_D0); |
| 7038 | pci_restore_state(pdev); | 7123 | pci_restore_state(pdev); |
| 7124 | |||
| 7039 | if (pdev->is_busmaster) | 7125 | if (pdev->is_busmaster) |
| 7040 | pci_set_master(pdev); | 7126 | pci_set_master(pdev); |
| 7041 | 7127 | ||
| @@ -7070,6 +7156,75 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) | |||
| 7070 | } | 7156 | } |
| 7071 | 7157 | ||
| 7072 | /** | 7158 | /** |
| 7159 | * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover | ||
| 7160 | * @phba: pointer to lpfc hba data structure. | ||
| 7161 | * | ||
| 7162 | * This routine is called to prepare the SLI3 device for PCI slot recover. It | ||
| 7163 | * aborts and stops all the on-going I/Os on the pci device. | ||
| 7164 | **/ | ||
| 7165 | static void | ||
| 7166 | lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) | ||
| 7167 | { | ||
| 7168 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 7169 | "2723 PCI channel I/O abort preparing for recovery\n"); | ||
| 7170 | /* Prepare for bringing HBA offline */ | ||
| 7171 | lpfc_offline_prep(phba); | ||
| 7172 | /* Clear sli active flag to prevent sysfs access to HBA */ | ||
| 7173 | spin_lock_irq(&phba->hbalock); | ||
| 7174 | phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; | ||
| 7175 | spin_unlock_irq(&phba->hbalock); | ||
| 7176 | /* Stop and flush all I/Os and bring HBA offline */ | ||
| 7177 | lpfc_offline(phba); | ||
| 7178 | } | ||
| 7179 | |||
| 7180 | /** | ||
| 7181 | * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset | ||
| 7182 | * @phba: pointer to lpfc hba data structure. | ||
| 7183 | * | ||
| 7184 | * This routine is called to prepare the SLI3 device for PCI slot reset. It | ||
| 7185 | * disables the device interrupt and pci device, and aborts the internal FCP | ||
| 7186 | * pending I/Os. | ||
| 7187 | **/ | ||
| 7188 | static void | ||
| 7189 | lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) | ||
| 7190 | { | ||
| 7191 | struct lpfc_sli *psli = &phba->sli; | ||
| 7192 | struct lpfc_sli_ring *pring; | ||
| 7193 | |||
| 7194 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 7195 | "2710 PCI channel disable preparing for reset\n"); | ||
| 7196 | /* Disable interrupt and pci device */ | ||
| 7197 | lpfc_sli_disable_intr(phba); | ||
| 7198 | pci_disable_device(phba->pcidev); | ||
| 7199 | /* | ||
| 7200 | * There may be I/Os dropped by the firmware. | ||
| 7201 | * Error iocb (I/O) on txcmplq and let the SCSI layer | ||
| 7202 | * retry it after re-establishing link. | ||
| 7203 | */ | ||
| 7204 | pring = &psli->ring[psli->fcp_ring]; | ||
| 7205 | lpfc_sli_abort_iocb_ring(phba, pring); | ||
| 7206 | } | ||
| 7207 | |||
| 7208 | /** | ||
| 7209 | * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable | ||
| 7210 | * @phba: pointer to lpfc hba data structure. | ||
| 7211 | * | ||
| 7212 | * This routine is called to prepare the SLI3 device for PCI slot permanently | ||
| 7213 | * disabling. It blocks the SCSI transport layer traffic and flushes the FCP | ||
| 7214 | * pending I/Os. | ||
| 7215 | **/ | ||
| 7216 | static void | ||
| 7217 | lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) | ||
| 7218 | { | ||
| 7219 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 7220 | "2711 PCI channel permanent disable for failure\n"); | ||
| 7221 | /* Block all SCSI devices' I/Os on the host */ | ||
| 7222 | lpfc_scsi_dev_block(phba); | ||
| 7223 | /* Clean up all driver's outstanding SCSI I/Os */ | ||
| 7224 | lpfc_sli_flush_fcp_rings(phba); | ||
| 7225 | } | ||
| 7226 | |||
| 7227 | /** | ||
| 7073 | * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error | 7228 | * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error |
| 7074 | * @pdev: pointer to PCI device. | 7229 | * @pdev: pointer to PCI device. |
| 7075 | * @state: the current PCI connection state. | 7230 | * @state: the current PCI connection state. |
| @@ -7083,6 +7238,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) | |||
| 7083 | * as desired. | 7238 | * as desired. |
| 7084 | * | 7239 | * |
| 7085 | * Return codes | 7240 | * Return codes |
| 7241 | * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link | ||
| 7086 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery | 7242 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery |
| 7087 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | 7243 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered |
| 7088 | **/ | 7244 | **/ |
| @@ -7091,33 +7247,27 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) | |||
| 7091 | { | 7247 | { |
| 7092 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 7248 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
| 7093 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 7249 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
| 7094 | struct lpfc_sli *psli = &phba->sli; | ||
| 7095 | struct lpfc_sli_ring *pring; | ||
| 7096 | 7250 | ||
| 7097 | if (state == pci_channel_io_perm_failure) { | 7251 | switch (state) { |
| 7098 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7252 | case pci_channel_io_normal: |
| 7099 | "0472 PCI channel I/O permanent failure\n"); | 7253 | /* Non-fatal error, prepare for recovery */ |
| 7100 | /* Block all SCSI devices' I/Os on the host */ | 7254 | lpfc_sli_prep_dev_for_recover(phba); |
| 7101 | lpfc_scsi_dev_block(phba); | 7255 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 7102 | /* Clean up all driver's outstanding SCSI I/Os */ | 7256 | case pci_channel_io_frozen: |
| 7103 | lpfc_sli_flush_fcp_rings(phba); | 7257 | /* Fatal error, prepare for slot reset */ |
| 7258 | lpfc_sli_prep_dev_for_reset(phba); | ||
| 7259 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 7260 | case pci_channel_io_perm_failure: | ||
| 7261 | /* Permanent failure, prepare for device down */ | ||
| 7262 | lpfc_prep_dev_for_perm_failure(phba); | ||
| 7104 | return PCI_ERS_RESULT_DISCONNECT; | 7263 | return PCI_ERS_RESULT_DISCONNECT; |
| 7264 | default: | ||
| 7265 | /* Unknown state, prepare and request slot reset */ | ||
| 7266 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 7267 | "0472 Unknown PCI error state: x%x\n", state); | ||
| 7268 | lpfc_sli_prep_dev_for_reset(phba); | ||
| 7269 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 7105 | } | 7270 | } |
| 7106 | |||
| 7107 | pci_disable_device(pdev); | ||
| 7108 | /* | ||
| 7109 | * There may be I/Os dropped by the firmware. | ||
| 7110 | * Error iocb (I/O) on txcmplq and let the SCSI layer | ||
| 7111 | * retry it after re-establishing link. | ||
| 7112 | */ | ||
| 7113 | pring = &psli->ring[psli->fcp_ring]; | ||
| 7114 | lpfc_sli_abort_iocb_ring(phba, pring); | ||
| 7115 | |||
| 7116 | /* Disable interrupt */ | ||
| 7117 | lpfc_sli_disable_intr(phba); | ||
| 7118 | |||
| 7119 | /* Request a slot reset. */ | ||
| 7120 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 7121 | } | 7271 | } |
| 7122 | 7272 | ||
| 7123 | /** | 7273 | /** |
| @@ -7197,7 +7347,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev) | |||
| 7197 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 7347 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
| 7198 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 7348 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
| 7199 | 7349 | ||
| 7350 | /* Bring the device online */ | ||
| 7200 | lpfc_online(phba); | 7351 | lpfc_online(phba); |
| 7352 | |||
| 7353 | /* Clean up Advanced Error Reporting (AER) if needed */ | ||
| 7354 | if (phba->hba_flag & HBA_AER_ENABLED) | ||
| 7355 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
| 7201 | } | 7356 | } |
| 7202 | 7357 | ||
| 7203 | /** | 7358 | /** |
| @@ -7213,15 +7368,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) | |||
| 7213 | 7368 | ||
| 7214 | if (phba->sli_rev == LPFC_SLI_REV4) { | 7369 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 7215 | if (max_xri <= 100) | 7370 | if (max_xri <= 100) |
| 7216 | return 4; | 7371 | return 10; |
| 7217 | else if (max_xri <= 256) | 7372 | else if (max_xri <= 256) |
| 7218 | return 8; | 7373 | return 25; |
| 7219 | else if (max_xri <= 512) | 7374 | else if (max_xri <= 512) |
| 7220 | return 16; | 7375 | return 50; |
| 7221 | else if (max_xri <= 1024) | 7376 | else if (max_xri <= 1024) |
| 7222 | return 32; | 7377 | return 100; |
| 7223 | else | 7378 | else |
| 7224 | return 48; | 7379 | return 150; |
| 7225 | } else | 7380 | } else |
| 7226 | return 0; | 7381 | return 0; |
| 7227 | } | 7382 | } |
| @@ -7249,6 +7404,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 7249 | { | 7404 | { |
| 7250 | struct lpfc_hba *phba; | 7405 | struct lpfc_hba *phba; |
| 7251 | struct lpfc_vport *vport = NULL; | 7406 | struct lpfc_vport *vport = NULL; |
| 7407 | struct Scsi_Host *shost = NULL; | ||
| 7252 | int error; | 7408 | int error; |
| 7253 | uint32_t cfg_mode, intr_mode; | 7409 | uint32_t cfg_mode, intr_mode; |
| 7254 | int mcnt; | 7410 | int mcnt; |
| @@ -7329,6 +7485,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 7329 | goto out_destroy_shost; | 7485 | goto out_destroy_shost; |
| 7330 | } | 7486 | } |
| 7331 | 7487 | ||
| 7488 | shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ | ||
| 7332 | /* Now, trying to enable interrupt and bring up the device */ | 7489 | /* Now, trying to enable interrupt and bring up the device */ |
| 7333 | cfg_mode = phba->cfg_use_msi; | 7490 | cfg_mode = phba->cfg_use_msi; |
| 7334 | while (true) { | 7491 | while (true) { |
| @@ -7397,6 +7554,8 @@ out_unset_pci_mem_s4: | |||
| 7397 | lpfc_sli4_pci_mem_unset(phba); | 7554 | lpfc_sli4_pci_mem_unset(phba); |
| 7398 | out_disable_pci_dev: | 7555 | out_disable_pci_dev: |
| 7399 | lpfc_disable_pci_dev(phba); | 7556 | lpfc_disable_pci_dev(phba); |
| 7557 | if (shost) | ||
| 7558 | scsi_host_put(shost); | ||
| 7400 | out_free_phba: | 7559 | out_free_phba: |
| 7401 | lpfc_hba_free(phba); | 7560 | lpfc_hba_free(phba); |
| 7402 | return error; | 7561 | return error; |
| @@ -7971,6 +8130,10 @@ static struct pci_device_id lpfc_id_table[] = { | |||
| 7971 | PCI_ANY_ID, PCI_ANY_ID, }, | 8130 | PCI_ANY_ID, PCI_ANY_ID, }, |
| 7972 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, | 8131 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, |
| 7973 | PCI_ANY_ID, PCI_ANY_ID, }, | 8132 | PCI_ANY_ID, PCI_ANY_ID, }, |
| 8133 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, | ||
| 8134 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 8135 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, | ||
| 8136 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 7974 | { 0 } | 8137 | { 0 } |
| 7975 | }; | 8138 | }; |
| 7976 | 8139 | ||
| @@ -8053,15 +8216,15 @@ lpfc_exit(void) | |||
| 8053 | if (lpfc_enable_npiv) | 8216 | if (lpfc_enable_npiv) |
| 8054 | fc_release_transport(lpfc_vport_transport_template); | 8217 | fc_release_transport(lpfc_vport_transport_template); |
| 8055 | if (_dump_buf_data) { | 8218 | if (_dump_buf_data) { |
| 8056 | printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " | 8219 | printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " |
| 8057 | "at 0x%p\n", | 8220 | "_dump_buf_data at 0x%p\n", |
| 8058 | (1L << _dump_buf_data_order), _dump_buf_data); | 8221 | (1L << _dump_buf_data_order), _dump_buf_data); |
| 8059 | free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); | 8222 | free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); |
| 8060 | } | 8223 | } |
| 8061 | 8224 | ||
| 8062 | if (_dump_buf_dif) { | 8225 | if (_dump_buf_dif) { |
| 8063 | printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " | 8226 | printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " |
| 8064 | "at 0x%p\n", | 8227 | "_dump_buf_dif at 0x%p\n", |
| 8065 | (1L << _dump_buf_dif_order), _dump_buf_dif); | 8228 | (1L << _dump_buf_dif_order), _dump_buf_dif); |
| 8066 | free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); | 8229 | free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); |
| 8067 | } | 8230 | } |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 1ab405902a18..a9afd8b94b6a 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
| @@ -25,8 +25,8 @@ | |||
| 25 | 25 | ||
| 26 | #include <scsi/scsi_device.h> | 26 | #include <scsi/scsi_device.h> |
| 27 | #include <scsi/scsi_transport_fc.h> | 27 | #include <scsi/scsi_transport_fc.h> |
| 28 | |||
| 29 | #include <scsi/scsi.h> | 28 | #include <scsi/scsi.h> |
| 29 | #include <scsi/fc/fc_fs.h> | ||
| 30 | 30 | ||
| 31 | #include "lpfc_hw4.h" | 31 | #include "lpfc_hw4.h" |
| 32 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
| @@ -820,6 +820,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) | |||
| 820 | mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; | 820 | mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; |
| 821 | mb->un.varRegVpi.sid = vport->fc_myDID; | 821 | mb->un.varRegVpi.sid = vport->fc_myDID; |
| 822 | mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; | 822 | mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; |
| 823 | memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, | ||
| 824 | sizeof(struct lpfc_name)); | ||
| 825 | mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); | ||
| 826 | mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); | ||
| 823 | 827 | ||
| 824 | mb->mbxCommand = MBX_REG_VPI; | 828 | mb->mbxCommand = MBX_REG_VPI; |
| 825 | mb->mbxOwner = OWN_HOST; | 829 | mb->mbxOwner = OWN_HOST; |
| @@ -849,7 +853,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) | |||
| 849 | MAILBOX_t *mb = &pmb->u.mb; | 853 | MAILBOX_t *mb = &pmb->u.mb; |
| 850 | memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); | 854 | memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); |
| 851 | 855 | ||
| 852 | mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; | 856 | if (phba->sli_rev < LPFC_SLI_REV4) |
| 857 | mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; | ||
| 858 | else | ||
| 859 | mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; | ||
| 853 | 860 | ||
| 854 | mb->mbxCommand = MBX_UNREG_VPI; | 861 | mb->mbxCommand = MBX_UNREG_VPI; |
| 855 | mb->mbxOwner = OWN_HOST; | 862 | mb->mbxOwner = OWN_HOST; |
| @@ -1132,7 +1139,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) | |||
| 1132 | /* Otherwise we setup specific rctl / type masks for this ring */ | 1139 | /* Otherwise we setup specific rctl / type masks for this ring */ |
| 1133 | for (i = 0; i < pring->num_mask; i++) { | 1140 | for (i = 0; i < pring->num_mask; i++) { |
| 1134 | mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; | 1141 | mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; |
| 1135 | if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) | 1142 | if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) |
| 1136 | mb->un.varCfgRing.rrRegs[i].rmask = 0xff; | 1143 | mb->un.varCfgRing.rrRegs[i].rmask = 0xff; |
| 1137 | else | 1144 | else |
| 1138 | mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; | 1145 | mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; |
| @@ -1654,9 +1661,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, | |||
| 1654 | /* Allocate record for keeping SGE virtual addresses */ | 1661 | /* Allocate record for keeping SGE virtual addresses */ |
| 1655 | mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), | 1662 | mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), |
| 1656 | GFP_KERNEL); | 1663 | GFP_KERNEL); |
| 1657 | if (!mbox->sge_array) | 1664 | if (!mbox->sge_array) { |
| 1665 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
| 1666 | "2527 Failed to allocate non-embedded SGE " | ||
| 1667 | "array.\n"); | ||
| 1658 | return 0; | 1668 | return 0; |
| 1659 | 1669 | } | |
| 1660 | for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { | 1670 | for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { |
| 1661 | /* The DMA memory is always allocated in the length of a | 1671 | /* The DMA memory is always allocated in the length of a |
| 1662 | * page even though the last SGE might not fill up to a | 1672 | * page even though the last SGE might not fill up to a |
| @@ -1753,11 +1763,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) | |||
| 1753 | /* Set up host requested features. */ | 1763 | /* Set up host requested features. */ |
| 1754 | bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); | 1764 | bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); |
| 1755 | 1765 | ||
| 1756 | if (phba->cfg_enable_fip) | ||
| 1757 | bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); | ||
| 1758 | else | ||
| 1759 | bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1); | ||
| 1760 | |||
| 1761 | /* Enable DIF (block guard) only if configured to do so. */ | 1766 | /* Enable DIF (block guard) only if configured to do so. */ |
| 1762 | if (phba->cfg_enable_bg) | 1767 | if (phba->cfg_enable_bg) |
| 1763 | bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); | 1768 | bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); |
| @@ -1817,6 +1822,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) | |||
| 1817 | bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); | 1822 | bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); |
| 1818 | bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); | 1823 | bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); |
| 1819 | bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); | 1824 | bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); |
| 1825 | memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); | ||
| 1826 | reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); | ||
| 1827 | reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); | ||
| 1820 | reg_vfi->bde.addrHigh = putPaddrHigh(phys); | 1828 | reg_vfi->bde.addrHigh = putPaddrHigh(phys); |
| 1821 | reg_vfi->bde.addrLow = putPaddrLow(phys); | 1829 | reg_vfi->bde.addrLow = putPaddrLow(phys); |
| 1822 | reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); | 1830 | reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); |
| @@ -1850,7 +1858,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) | |||
| 1850 | /** | 1858 | /** |
| 1851 | * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command | 1859 | * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command |
| 1852 | * @mbox: pointer to lpfc mbox command to initialize. | 1860 | * @mbox: pointer to lpfc mbox command to initialize. |
| 1853 | * @vfi: VFI to be unregistered. | 1861 | * @vport: vport associated with the VF. |
| 1854 | * | 1862 | * |
| 1855 | * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric | 1863 | * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric |
| 1856 | * (logical NPort) into the inactive state. The SLI Host must have logged out | 1864 | * (logical NPort) into the inactive state. The SLI Host must have logged out |
| @@ -1859,11 +1867,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) | |||
| 1859 | * fabric inactive. | 1867 | * fabric inactive. |
| 1860 | **/ | 1868 | **/ |
| 1861 | void | 1869 | void |
| 1862 | lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) | 1870 | lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) |
| 1863 | { | 1871 | { |
| 1864 | memset(mbox, 0, sizeof(*mbox)); | 1872 | memset(mbox, 0, sizeof(*mbox)); |
| 1865 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); | 1873 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); |
| 1866 | bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); | 1874 | bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, |
| 1875 | vport->vfi + vport->phba->vfi_base); | ||
| 1867 | } | 1876 | } |
| 1868 | 1877 | ||
| 1869 | /** | 1878 | /** |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 3e74136f1ede..2ed6af194932 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
| @@ -1223,6 +1223,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, | |||
| 1223 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 1223 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
| 1224 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && | 1224 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
| 1225 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1225 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
| 1226 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
| 1227 | spin_unlock_irq(&phba->hbalock); | ||
| 1228 | lpfc_sli4_free_rpi(phba, | ||
| 1229 | mb->u.mb.un.varRegLogin.rpi); | ||
| 1230 | spin_lock_irq(&phba->hbalock); | ||
| 1231 | } | ||
| 1226 | mp = (struct lpfc_dmabuf *) (mb->context1); | 1232 | mp = (struct lpfc_dmabuf *) (mb->context1); |
| 1227 | if (mp) { | 1233 | if (mp) { |
| 1228 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1234 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| @@ -1230,6 +1236,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, | |||
| 1230 | } | 1236 | } |
| 1231 | lpfc_nlp_put(ndlp); | 1237 | lpfc_nlp_put(ndlp); |
| 1232 | list_del(&mb->list); | 1238 | list_del(&mb->list); |
| 1239 | phba->sli.mboxq_cnt--; | ||
| 1233 | mempool_free(mb, phba->mbox_mem_pool); | 1240 | mempool_free(mb, phba->mbox_mem_pool); |
| 1234 | } | 1241 | } |
| 1235 | } | 1242 | } |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c88f59f0ce30..a246410ce9df 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
| @@ -59,22 +59,26 @@ static char *dif_op_str[] = { | |||
| 59 | }; | 59 | }; |
| 60 | static void | 60 | static void |
| 61 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); | 61 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); |
| 62 | static void | ||
| 63 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); | ||
| 62 | 64 | ||
| 63 | static void | 65 | static void |
| 64 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) | 66 | lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) |
| 65 | { | 67 | { |
| 66 | void *src, *dst; | 68 | void *src, *dst; |
| 67 | struct scatterlist *sgde = scsi_sglist(cmnd); | 69 | struct scatterlist *sgde = scsi_sglist(cmnd); |
| 68 | 70 | ||
| 69 | if (!_dump_buf_data) { | 71 | if (!_dump_buf_data) { |
| 70 | printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", | 72 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 73 | "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", | ||
| 71 | __func__); | 74 | __func__); |
| 72 | return; | 75 | return; |
| 73 | } | 76 | } |
| 74 | 77 | ||
| 75 | 78 | ||
| 76 | if (!sgde) { | 79 | if (!sgde) { |
| 77 | printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); | 80 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 81 | "9051 BLKGRD: ERROR: data scatterlist is null\n"); | ||
| 78 | return; | 82 | return; |
| 79 | } | 83 | } |
| 80 | 84 | ||
| @@ -88,19 +92,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd) | |||
| 88 | } | 92 | } |
| 89 | 93 | ||
| 90 | static void | 94 | static void |
| 91 | lpfc_debug_save_dif(struct scsi_cmnd *cmnd) | 95 | lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) |
| 92 | { | 96 | { |
| 93 | void *src, *dst; | 97 | void *src, *dst; |
| 94 | struct scatterlist *sgde = scsi_prot_sglist(cmnd); | 98 | struct scatterlist *sgde = scsi_prot_sglist(cmnd); |
| 95 | 99 | ||
| 96 | if (!_dump_buf_dif) { | 100 | if (!_dump_buf_dif) { |
| 97 | printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", | 101 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 102 | "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", | ||
| 98 | __func__); | 103 | __func__); |
| 99 | return; | 104 | return; |
| 100 | } | 105 | } |
| 101 | 106 | ||
| 102 | if (!sgde) { | 107 | if (!sgde) { |
| 103 | printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); | 108 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 109 | "9053 BLKGRD: ERROR: prot scatterlist is null\n"); | ||
| 104 | return; | 110 | return; |
| 105 | } | 111 | } |
| 106 | 112 | ||
| @@ -242,6 +248,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | |||
| 242 | } | 248 | } |
| 243 | 249 | ||
| 244 | /** | 250 | /** |
| 251 | * lpfc_change_queue_depth - Alter scsi device queue depth | ||
| 252 | * @sdev: Pointer the scsi device on which to change the queue depth. | ||
| 253 | * @qdepth: New queue depth to set the sdev to. | ||
| 254 | * @reason: The reason for the queue depth change. | ||
| 255 | * | ||
| 256 | * This function is called by the midlayer and the LLD to alter the queue | ||
| 257 | * depth for a scsi device. This function sets the queue depth to the new | ||
| 258 | * value and sends an event out to log the queue depth change. | ||
| 259 | **/ | ||
| 260 | int | ||
| 261 | lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | ||
| 262 | { | ||
| 263 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; | ||
| 264 | struct lpfc_hba *phba = vport->phba; | ||
| 265 | struct lpfc_rport_data *rdata; | ||
| 266 | unsigned long new_queue_depth, old_queue_depth; | ||
| 267 | |||
| 268 | old_queue_depth = sdev->queue_depth; | ||
| 269 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | ||
| 270 | new_queue_depth = sdev->queue_depth; | ||
| 271 | rdata = sdev->hostdata; | ||
| 272 | if (rdata) | ||
| 273 | lpfc_send_sdev_queuedepth_change_event(phba, vport, | ||
| 274 | rdata->pnode, sdev->lun, | ||
| 275 | old_queue_depth, | ||
| 276 | new_queue_depth); | ||
| 277 | return sdev->queue_depth; | ||
| 278 | } | ||
| 279 | |||
| 280 | /** | ||
| 245 | * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread | 281 | * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread |
| 246 | * @phba: The Hba for which this call is being executed. | 282 | * @phba: The Hba for which this call is being executed. |
| 247 | * | 283 | * |
| @@ -305,8 +341,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, | |||
| 305 | if (vport->cfg_lun_queue_depth <= queue_depth) | 341 | if (vport->cfg_lun_queue_depth <= queue_depth) |
| 306 | return; | 342 | return; |
| 307 | spin_lock_irqsave(&phba->hbalock, flags); | 343 | spin_lock_irqsave(&phba->hbalock, flags); |
| 308 | if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || | 344 | if (time_before(jiffies, |
| 309 | ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { | 345 | phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || |
| 346 | time_before(jiffies, | ||
| 347 | phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { | ||
| 310 | spin_unlock_irqrestore(&phba->hbalock, flags); | 348 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 311 | return; | 349 | return; |
| 312 | } | 350 | } |
| @@ -338,10 +376,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
| 338 | struct lpfc_vport **vports; | 376 | struct lpfc_vport **vports; |
| 339 | struct Scsi_Host *shost; | 377 | struct Scsi_Host *shost; |
| 340 | struct scsi_device *sdev; | 378 | struct scsi_device *sdev; |
| 341 | unsigned long new_queue_depth, old_queue_depth; | 379 | unsigned long new_queue_depth; |
| 342 | unsigned long num_rsrc_err, num_cmd_success; | 380 | unsigned long num_rsrc_err, num_cmd_success; |
| 343 | int i; | 381 | int i; |
| 344 | struct lpfc_rport_data *rdata; | ||
| 345 | 382 | ||
| 346 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); | 383 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); |
| 347 | num_cmd_success = atomic_read(&phba->num_cmd_success); | 384 | num_cmd_success = atomic_read(&phba->num_cmd_success); |
| @@ -359,22 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
| 359 | else | 396 | else |
| 360 | new_queue_depth = sdev->queue_depth - | 397 | new_queue_depth = sdev->queue_depth - |
| 361 | new_queue_depth; | 398 | new_queue_depth; |
| 362 | old_queue_depth = sdev->queue_depth; | 399 | lpfc_change_queue_depth(sdev, new_queue_depth, |
| 363 | if (sdev->ordered_tags) | 400 | SCSI_QDEPTH_DEFAULT); |
| 364 | scsi_adjust_queue_depth(sdev, | ||
| 365 | MSG_ORDERED_TAG, | ||
| 366 | new_queue_depth); | ||
| 367 | else | ||
| 368 | scsi_adjust_queue_depth(sdev, | ||
| 369 | MSG_SIMPLE_TAG, | ||
| 370 | new_queue_depth); | ||
| 371 | rdata = sdev->hostdata; | ||
| 372 | if (rdata) | ||
| 373 | lpfc_send_sdev_queuedepth_change_event( | ||
| 374 | phba, vports[i], | ||
| 375 | rdata->pnode, | ||
| 376 | sdev->lun, old_queue_depth, | ||
| 377 | new_queue_depth); | ||
| 378 | } | 401 | } |
| 379 | } | 402 | } |
| 380 | lpfc_destroy_vport_work_array(phba, vports); | 403 | lpfc_destroy_vport_work_array(phba, vports); |
| @@ -398,7 +421,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
| 398 | struct Scsi_Host *shost; | 421 | struct Scsi_Host *shost; |
| 399 | struct scsi_device *sdev; | 422 | struct scsi_device *sdev; |
| 400 | int i; | 423 | int i; |
| 401 | struct lpfc_rport_data *rdata; | ||
| 402 | 424 | ||
| 403 | vports = lpfc_create_vport_work_array(phba); | 425 | vports = lpfc_create_vport_work_array(phba); |
| 404 | if (vports != NULL) | 426 | if (vports != NULL) |
| @@ -408,22 +430,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
| 408 | if (vports[i]->cfg_lun_queue_depth <= | 430 | if (vports[i]->cfg_lun_queue_depth <= |
| 409 | sdev->queue_depth) | 431 | sdev->queue_depth) |
| 410 | continue; | 432 | continue; |
| 411 | if (sdev->ordered_tags) | 433 | lpfc_change_queue_depth(sdev, |
| 412 | scsi_adjust_queue_depth(sdev, | 434 | sdev->queue_depth+1, |
| 413 | MSG_ORDERED_TAG, | 435 | SCSI_QDEPTH_RAMP_UP); |
| 414 | sdev->queue_depth+1); | ||
| 415 | else | ||
| 416 | scsi_adjust_queue_depth(sdev, | ||
| 417 | MSG_SIMPLE_TAG, | ||
| 418 | sdev->queue_depth+1); | ||
| 419 | rdata = sdev->hostdata; | ||
| 420 | if (rdata) | ||
| 421 | lpfc_send_sdev_queuedepth_change_event( | ||
| 422 | phba, vports[i], | ||
| 423 | rdata->pnode, | ||
| 424 | sdev->lun, | ||
| 425 | sdev->queue_depth - 1, | ||
| 426 | sdev->queue_depth); | ||
| 427 | } | 436 | } |
| 428 | } | 437 | } |
| 429 | lpfc_destroy_vport_work_array(phba, vports); | 438 | lpfc_destroy_vport_work_array(phba, vports); |
| @@ -589,7 +598,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) | |||
| 589 | iocb->ulpClass = CLASS3; | 598 | iocb->ulpClass = CLASS3; |
| 590 | psb->status = IOSTAT_SUCCESS; | 599 | psb->status = IOSTAT_SUCCESS; |
| 591 | /* Put it back into the SCSI buffer list */ | 600 | /* Put it back into the SCSI buffer list */ |
| 592 | lpfc_release_scsi_buf_s4(phba, psb); | 601 | lpfc_release_scsi_buf_s3(phba, psb); |
| 593 | 602 | ||
| 594 | } | 603 | } |
| 595 | 604 | ||
| @@ -1024,7 +1033,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
| 1024 | 1033 | ||
| 1025 | lpfc_cmd->seg_cnt = nseg; | 1034 | lpfc_cmd->seg_cnt = nseg; |
| 1026 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | 1035 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
| 1027 | printk(KERN_ERR "%s: Too many sg segments from " | 1036 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1037 | "9064 BLKGRD: %s: Too many sg segments from " | ||
| 1028 | "dma_map_sg. Config %d, seg_cnt %d\n", | 1038 | "dma_map_sg. Config %d, seg_cnt %d\n", |
| 1029 | __func__, phba->cfg_sg_seg_cnt, | 1039 | __func__, phba->cfg_sg_seg_cnt, |
| 1030 | lpfc_cmd->seg_cnt); | 1040 | lpfc_cmd->seg_cnt); |
| @@ -1112,7 +1122,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
| 1112 | * with the cmd | 1122 | * with the cmd |
| 1113 | */ | 1123 | */ |
| 1114 | static int | 1124 | static int |
| 1115 | lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) | 1125 | lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) |
| 1116 | { | 1126 | { |
| 1117 | uint8_t guard_type = scsi_host_get_guard(sc->device->host); | 1127 | uint8_t guard_type = scsi_host_get_guard(sc->device->host); |
| 1118 | uint8_t ret_prof = LPFC_PROF_INVALID; | 1128 | uint8_t ret_prof = LPFC_PROF_INVALID; |
| @@ -1136,7 +1146,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) | |||
| 1136 | 1146 | ||
| 1137 | case SCSI_PROT_NORMAL: | 1147 | case SCSI_PROT_NORMAL: |
| 1138 | default: | 1148 | default: |
| 1139 | printk(KERN_ERR "Bad op/guard:%d/%d combination\n", | 1149 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1150 | "9063 BLKGRD:Bad op/guard:%d/%d combination\n", | ||
| 1140 | scsi_get_prot_op(sc), guard_type); | 1151 | scsi_get_prot_op(sc), guard_type); |
| 1141 | break; | 1152 | break; |
| 1142 | 1153 | ||
| @@ -1157,7 +1168,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) | |||
| 1157 | case SCSI_PROT_WRITE_STRIP: | 1168 | case SCSI_PROT_WRITE_STRIP: |
| 1158 | case SCSI_PROT_NORMAL: | 1169 | case SCSI_PROT_NORMAL: |
| 1159 | default: | 1170 | default: |
| 1160 | printk(KERN_ERR "Bad op/guard:%d/%d combination\n", | 1171 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1172 | "9075 BLKGRD: Bad op/guard:%d/%d combination\n", | ||
| 1161 | scsi_get_prot_op(sc), guard_type); | 1173 | scsi_get_prot_op(sc), guard_type); |
| 1162 | break; | 1174 | break; |
| 1163 | } | 1175 | } |
| @@ -1259,7 +1271,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
| 1259 | uint16_t apptagmask, apptagval; | 1271 | uint16_t apptagmask, apptagval; |
| 1260 | 1272 | ||
| 1261 | pde1 = (struct lpfc_pde *) bpl; | 1273 | pde1 = (struct lpfc_pde *) bpl; |
| 1262 | prof = lpfc_sc_to_sli_prof(sc); | 1274 | prof = lpfc_sc_to_sli_prof(phba, sc); |
| 1263 | 1275 | ||
| 1264 | if (prof == LPFC_PROF_INVALID) | 1276 | if (prof == LPFC_PROF_INVALID) |
| 1265 | goto out; | 1277 | goto out; |
| @@ -1359,7 +1371,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
| 1359 | return 0; | 1371 | return 0; |
| 1360 | } | 1372 | } |
| 1361 | 1373 | ||
| 1362 | prof = lpfc_sc_to_sli_prof(sc); | 1374 | prof = lpfc_sc_to_sli_prof(phba, sc); |
| 1363 | if (prof == LPFC_PROF_INVALID) | 1375 | if (prof == LPFC_PROF_INVALID) |
| 1364 | goto out; | 1376 | goto out; |
| 1365 | 1377 | ||
| @@ -1408,7 +1420,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
| 1408 | subtotal = 0; /* total bytes processed for current prot grp */ | 1420 | subtotal = 0; /* total bytes processed for current prot grp */ |
| 1409 | while (!pgdone) { | 1421 | while (!pgdone) { |
| 1410 | if (!sgde) { | 1422 | if (!sgde) { |
| 1411 | printk(KERN_ERR "%s Invalid data segment\n", | 1423 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1424 | "9065 BLKGRD:%s Invalid data segment\n", | ||
| 1412 | __func__); | 1425 | __func__); |
| 1413 | return 0; | 1426 | return 0; |
| 1414 | } | 1427 | } |
| @@ -1462,7 +1475,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
| 1462 | reftag += protgrp_blks; | 1475 | reftag += protgrp_blks; |
| 1463 | } else { | 1476 | } else { |
| 1464 | /* if we're here, we have a bug */ | 1477 | /* if we're here, we have a bug */ |
| 1465 | printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); | 1478 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1479 | "9054 BLKGRD: bug in %s\n", __func__); | ||
| 1466 | } | 1480 | } |
| 1467 | 1481 | ||
| 1468 | } while (!alldone); | 1482 | } while (!alldone); |
| @@ -1544,8 +1558,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, | |||
| 1544 | 1558 | ||
| 1545 | lpfc_cmd->seg_cnt = datasegcnt; | 1559 | lpfc_cmd->seg_cnt = datasegcnt; |
| 1546 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | 1560 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
| 1547 | printk(KERN_ERR "%s: Too many sg segments from " | 1561 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1548 | "dma_map_sg. Config %d, seg_cnt %d\n", | 1562 | "9067 BLKGRD: %s: Too many sg segments" |
| 1563 | " from dma_map_sg. Config %d, seg_cnt" | ||
| 1564 | " %d\n", | ||
| 1549 | __func__, phba->cfg_sg_seg_cnt, | 1565 | __func__, phba->cfg_sg_seg_cnt, |
| 1550 | lpfc_cmd->seg_cnt); | 1566 | lpfc_cmd->seg_cnt); |
| 1551 | scsi_dma_unmap(scsi_cmnd); | 1567 | scsi_dma_unmap(scsi_cmnd); |
| @@ -1579,8 +1595,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, | |||
| 1579 | lpfc_cmd->prot_seg_cnt = protsegcnt; | 1595 | lpfc_cmd->prot_seg_cnt = protsegcnt; |
| 1580 | if (lpfc_cmd->prot_seg_cnt | 1596 | if (lpfc_cmd->prot_seg_cnt |
| 1581 | > phba->cfg_prot_sg_seg_cnt) { | 1597 | > phba->cfg_prot_sg_seg_cnt) { |
| 1582 | printk(KERN_ERR "%s: Too many prot sg segments " | 1598 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1583 | "from dma_map_sg. Config %d," | 1599 | "9068 BLKGRD: %s: Too many prot sg " |
| 1600 | "segments from dma_map_sg. Config %d," | ||
| 1584 | "prot_seg_cnt %d\n", __func__, | 1601 | "prot_seg_cnt %d\n", __func__, |
| 1585 | phba->cfg_prot_sg_seg_cnt, | 1602 | phba->cfg_prot_sg_seg_cnt, |
| 1586 | lpfc_cmd->prot_seg_cnt); | 1603 | lpfc_cmd->prot_seg_cnt); |
| @@ -1671,23 +1688,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1671 | uint32_t bgstat = bgf->bgstat; | 1688 | uint32_t bgstat = bgf->bgstat; |
| 1672 | uint64_t failing_sector = 0; | 1689 | uint64_t failing_sector = 0; |
| 1673 | 1690 | ||
| 1674 | printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " | 1691 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" |
| 1692 | " 0x%x lba 0x%llx blk cnt 0x%x " | ||
| 1675 | "bgstat=0x%x bghm=0x%x\n", | 1693 | "bgstat=0x%x bghm=0x%x\n", |
| 1676 | cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), | 1694 | cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), |
| 1677 | blk_rq_sectors(cmd->request), bgstat, bghm); | 1695 | blk_rq_sectors(cmd->request), bgstat, bghm); |
| 1678 | 1696 | ||
| 1679 | spin_lock(&_dump_buf_lock); | 1697 | spin_lock(&_dump_buf_lock); |
| 1680 | if (!_dump_buf_done) { | 1698 | if (!_dump_buf_done) { |
| 1681 | printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", | 1699 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" |
| 1700 | " Data for %u blocks to debugfs\n", | ||
| 1682 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); | 1701 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); |
| 1683 | lpfc_debug_save_data(cmd); | 1702 | lpfc_debug_save_data(phba, cmd); |
| 1684 | 1703 | ||
| 1685 | /* If we have a prot sgl, save the DIF buffer */ | 1704 | /* If we have a prot sgl, save the DIF buffer */ |
| 1686 | if (lpfc_prot_group_type(phba, cmd) == | 1705 | if (lpfc_prot_group_type(phba, cmd) == |
| 1687 | LPFC_PG_TYPE_DIF_BUF) { | 1706 | LPFC_PG_TYPE_DIF_BUF) { |
| 1688 | printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", | 1707 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " |
| 1689 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); | 1708 | "Saving DIF for %u blocks to debugfs\n", |
| 1690 | lpfc_debug_save_dif(cmd); | 1709 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); |
| 1710 | lpfc_debug_save_dif(phba, cmd); | ||
| 1691 | } | 1711 | } |
| 1692 | 1712 | ||
| 1693 | _dump_buf_done = 1; | 1713 | _dump_buf_done = 1; |
| @@ -1696,15 +1716,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1696 | 1716 | ||
| 1697 | if (lpfc_bgs_get_invalid_prof(bgstat)) { | 1717 | if (lpfc_bgs_get_invalid_prof(bgstat)) { |
| 1698 | cmd->result = ScsiResult(DID_ERROR, 0); | 1718 | cmd->result = ScsiResult(DID_ERROR, 0); |
| 1699 | printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", | 1719 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" |
| 1700 | bgstat); | 1720 | " BlockGuard profile. bgstat:0x%x\n", |
| 1721 | bgstat); | ||
| 1701 | ret = (-1); | 1722 | ret = (-1); |
| 1702 | goto out; | 1723 | goto out; |
| 1703 | } | 1724 | } |
| 1704 | 1725 | ||
| 1705 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { | 1726 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { |
| 1706 | cmd->result = ScsiResult(DID_ERROR, 0); | 1727 | cmd->result = ScsiResult(DID_ERROR, 0); |
| 1707 | printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", | 1728 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " |
| 1729 | "Invalid BlockGuard DIF Block. bgstat:0x%x\n", | ||
| 1708 | bgstat); | 1730 | bgstat); |
| 1709 | ret = (-1); | 1731 | ret = (-1); |
| 1710 | goto out; | 1732 | goto out; |
| @@ -1718,7 +1740,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1718 | cmd->result = DRIVER_SENSE << 24 | 1740 | cmd->result = DRIVER_SENSE << 24 |
| 1719 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 1741 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); |
| 1720 | phba->bg_guard_err_cnt++; | 1742 | phba->bg_guard_err_cnt++; |
| 1721 | printk(KERN_ERR "BLKGRD: guard_tag error\n"); | 1743 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1744 | "9055 BLKGRD: guard_tag error\n"); | ||
| 1722 | } | 1745 | } |
| 1723 | 1746 | ||
| 1724 | if (lpfc_bgs_get_reftag_err(bgstat)) { | 1747 | if (lpfc_bgs_get_reftag_err(bgstat)) { |
| @@ -1730,7 +1753,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1730 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 1753 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); |
| 1731 | 1754 | ||
| 1732 | phba->bg_reftag_err_cnt++; | 1755 | phba->bg_reftag_err_cnt++; |
| 1733 | printk(KERN_ERR "BLKGRD: ref_tag error\n"); | 1756 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1757 | "9056 BLKGRD: ref_tag error\n"); | ||
| 1734 | } | 1758 | } |
| 1735 | 1759 | ||
| 1736 | if (lpfc_bgs_get_apptag_err(bgstat)) { | 1760 | if (lpfc_bgs_get_apptag_err(bgstat)) { |
| @@ -1742,7 +1766,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1742 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 1766 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); |
| 1743 | 1767 | ||
| 1744 | phba->bg_apptag_err_cnt++; | 1768 | phba->bg_apptag_err_cnt++; |
| 1745 | printk(KERN_ERR "BLKGRD: app_tag error\n"); | 1769 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1770 | "9061 BLKGRD: app_tag error\n"); | ||
| 1746 | } | 1771 | } |
| 1747 | 1772 | ||
| 1748 | if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { | 1773 | if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { |
| @@ -1763,7 +1788,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 1763 | if (!ret) { | 1788 | if (!ret) { |
| 1764 | /* No error was reported - problem in FW? */ | 1789 | /* No error was reported - problem in FW? */ |
| 1765 | cmd->result = ScsiResult(DID_ERROR, 0); | 1790 | cmd->result = ScsiResult(DID_ERROR, 0); |
| 1766 | printk(KERN_ERR "BLKGRD: no errors reported!\n"); | 1791 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 1792 | "9057 BLKGRD: no errors reported!\n"); | ||
| 1767 | } | 1793 | } |
| 1768 | 1794 | ||
| 1769 | out: | 1795 | out: |
| @@ -1822,9 +1848,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
| 1822 | 1848 | ||
| 1823 | lpfc_cmd->seg_cnt = nseg; | 1849 | lpfc_cmd->seg_cnt = nseg; |
| 1824 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | 1850 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
| 1825 | printk(KERN_ERR "%s: Too many sg segments from " | 1851 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" |
| 1826 | "dma_map_sg. Config %d, seg_cnt %d\n", | 1852 | " %s: Too many sg segments from " |
| 1827 | __func__, phba->cfg_sg_seg_cnt, | 1853 | "dma_map_sg. Config %d, seg_cnt %d\n", |
| 1854 | __func__, phba->cfg_sg_seg_cnt, | ||
| 1828 | lpfc_cmd->seg_cnt); | 1855 | lpfc_cmd->seg_cnt); |
| 1829 | scsi_dma_unmap(scsi_cmnd); | 1856 | scsi_dma_unmap(scsi_cmnd); |
| 1830 | return 1; | 1857 | return 1; |
| @@ -2050,6 +2077,21 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 2050 | goto out; | 2077 | goto out; |
| 2051 | } | 2078 | } |
| 2052 | 2079 | ||
| 2080 | if (resp_info & RSP_LEN_VALID) { | ||
| 2081 | rsplen = be32_to_cpu(fcprsp->rspRspLen); | ||
| 2082 | if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || | ||
| 2083 | (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { | ||
| 2084 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
| 2085 | "2719 Invalid response length: " | ||
| 2086 | "tgt x%x lun x%x cmnd x%x rsplen x%x\n", | ||
| 2087 | cmnd->device->id, | ||
| 2088 | cmnd->device->lun, cmnd->cmnd[0], | ||
| 2089 | rsplen); | ||
| 2090 | host_status = DID_ERROR; | ||
| 2091 | goto out; | ||
| 2092 | } | ||
| 2093 | } | ||
| 2094 | |||
| 2053 | if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { | 2095 | if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { |
| 2054 | uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); | 2096 | uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); |
| 2055 | if (snslen > SCSI_SENSE_BUFFERSIZE) | 2097 | if (snslen > SCSI_SENSE_BUFFERSIZE) |
| @@ -2074,15 +2116,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 2074 | be32_to_cpu(fcprsp->rspRspLen), | 2116 | be32_to_cpu(fcprsp->rspRspLen), |
| 2075 | fcprsp->rspInfo3); | 2117 | fcprsp->rspInfo3); |
| 2076 | 2118 | ||
| 2077 | if (resp_info & RSP_LEN_VALID) { | ||
| 2078 | rsplen = be32_to_cpu(fcprsp->rspRspLen); | ||
| 2079 | if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || | ||
| 2080 | (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { | ||
| 2081 | host_status = DID_ERROR; | ||
| 2082 | goto out; | ||
| 2083 | } | ||
| 2084 | } | ||
| 2085 | |||
| 2086 | scsi_set_resid(cmnd, 0); | 2119 | scsi_set_resid(cmnd, 0); |
| 2087 | if (resp_info & RESID_UNDER) { | 2120 | if (resp_info & RESID_UNDER) { |
| 2088 | scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); | 2121 | scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); |
| @@ -2180,7 +2213,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 2180 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; | 2213 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
| 2181 | int result; | 2214 | int result; |
| 2182 | struct scsi_device *tmp_sdev; | 2215 | struct scsi_device *tmp_sdev; |
| 2183 | int depth = 0; | 2216 | int depth; |
| 2184 | unsigned long flags; | 2217 | unsigned long flags; |
| 2185 | struct lpfc_fast_path_event *fast_path_evt; | 2218 | struct lpfc_fast_path_event *fast_path_evt; |
| 2186 | struct Scsi_Host *shost = cmd->device->host; | 2219 | struct Scsi_Host *shost = cmd->device->host; |
| @@ -2264,7 +2297,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 2264 | lpfc_printf_vlog(vport, KERN_WARNING, | 2297 | lpfc_printf_vlog(vport, KERN_WARNING, |
| 2265 | LOG_BG, | 2298 | LOG_BG, |
| 2266 | "9031 non-zero BGSTAT " | 2299 | "9031 non-zero BGSTAT " |
| 2267 | "on unprotected cmd"); | 2300 | "on unprotected cmd\n"); |
| 2268 | } | 2301 | } |
| 2269 | } | 2302 | } |
| 2270 | 2303 | ||
| @@ -2347,67 +2380,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 2347 | return; | 2380 | return; |
| 2348 | } | 2381 | } |
| 2349 | 2382 | ||
| 2350 | |||
| 2351 | if (!result) | 2383 | if (!result) |
| 2352 | lpfc_rampup_queue_depth(vport, queue_depth); | 2384 | lpfc_rampup_queue_depth(vport, queue_depth); |
| 2353 | 2385 | ||
| 2354 | if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && | ||
| 2355 | ((jiffies - pnode->last_ramp_up_time) > | ||
| 2356 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && | ||
| 2357 | ((jiffies - pnode->last_q_full_time) > | ||
| 2358 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && | ||
| 2359 | (vport->cfg_lun_queue_depth > queue_depth)) { | ||
| 2360 | shost_for_each_device(tmp_sdev, shost) { | ||
| 2361 | if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ | ||
| 2362 | if (tmp_sdev->id != scsi_id) | ||
| 2363 | continue; | ||
| 2364 | if (tmp_sdev->ordered_tags) | ||
| 2365 | scsi_adjust_queue_depth(tmp_sdev, | ||
| 2366 | MSG_ORDERED_TAG, | ||
| 2367 | tmp_sdev->queue_depth+1); | ||
| 2368 | else | ||
| 2369 | scsi_adjust_queue_depth(tmp_sdev, | ||
| 2370 | MSG_SIMPLE_TAG, | ||
| 2371 | tmp_sdev->queue_depth+1); | ||
| 2372 | |||
| 2373 | pnode->last_ramp_up_time = jiffies; | ||
| 2374 | } | ||
| 2375 | } | ||
| 2376 | lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, | ||
| 2377 | 0xFFFFFFFF, | ||
| 2378 | queue_depth , queue_depth + 1); | ||
| 2379 | } | ||
| 2380 | |||
| 2381 | /* | 2386 | /* |
| 2382 | * Check for queue full. If the lun is reporting queue full, then | 2387 | * Check for queue full. If the lun is reporting queue full, then |
| 2383 | * back off the lun queue depth to prevent target overloads. | 2388 | * back off the lun queue depth to prevent target overloads. |
| 2384 | */ | 2389 | */ |
| 2385 | if (result == SAM_STAT_TASK_SET_FULL && pnode && | 2390 | if (result == SAM_STAT_TASK_SET_FULL && pnode && |
| 2386 | NLP_CHK_NODE_ACT(pnode)) { | 2391 | NLP_CHK_NODE_ACT(pnode)) { |
| 2387 | pnode->last_q_full_time = jiffies; | ||
| 2388 | |||
| 2389 | shost_for_each_device(tmp_sdev, shost) { | 2392 | shost_for_each_device(tmp_sdev, shost) { |
| 2390 | if (tmp_sdev->id != scsi_id) | 2393 | if (tmp_sdev->id != scsi_id) |
| 2391 | continue; | 2394 | continue; |
| 2392 | depth = scsi_track_queue_full(tmp_sdev, | 2395 | depth = scsi_track_queue_full(tmp_sdev, |
| 2393 | tmp_sdev->queue_depth - 1); | 2396 | tmp_sdev->queue_depth-1); |
| 2394 | } | 2397 | if (depth <= 0) |
| 2395 | /* | 2398 | continue; |
| 2396 | * The queue depth cannot be lowered any more. | ||
| 2397 | * Modify the returned error code to store | ||
| 2398 | * the final depth value set by | ||
| 2399 | * scsi_track_queue_full. | ||
| 2400 | */ | ||
| 2401 | if (depth == -1) | ||
| 2402 | depth = shost->cmd_per_lun; | ||
| 2403 | |||
| 2404 | if (depth) { | ||
| 2405 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 2399 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 2406 | "0711 detected queue full - lun queue " | 2400 | "0711 detected queue full - lun queue " |
| 2407 | "depth adjusted to %d.\n", depth); | 2401 | "depth adjusted to %d.\n", depth); |
| 2408 | lpfc_send_sdev_queuedepth_change_event(phba, vport, | 2402 | lpfc_send_sdev_queuedepth_change_event(phba, vport, |
| 2409 | pnode, 0xFFFFFFFF, | 2403 | pnode, |
| 2410 | depth+1, depth); | 2404 | tmp_sdev->lun, |
| 2405 | depth+1, depth); | ||
| 2411 | } | 2406 | } |
| 2412 | } | 2407 | } |
| 2413 | 2408 | ||
| @@ -2745,7 +2740,9 @@ void lpfc_poll_timeout(unsigned long ptr) | |||
| 2745 | struct lpfc_hba *phba = (struct lpfc_hba *) ptr; | 2740 | struct lpfc_hba *phba = (struct lpfc_hba *) ptr; |
| 2746 | 2741 | ||
| 2747 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 2742 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
| 2748 | lpfc_sli_poll_fcp_ring (phba); | 2743 | lpfc_sli_handle_fast_ring_event(phba, |
| 2744 | &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); | ||
| 2745 | |||
| 2749 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | 2746 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
| 2750 | lpfc_poll_rearm_timer(phba); | 2747 | lpfc_poll_rearm_timer(phba); |
| 2751 | } | 2748 | } |
| @@ -2771,7 +2768,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
| 2771 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2768 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
| 2772 | struct lpfc_hba *phba = vport->phba; | 2769 | struct lpfc_hba *phba = vport->phba; |
| 2773 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 2770 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
| 2774 | struct lpfc_nodelist *ndlp = rdata->pnode; | 2771 | struct lpfc_nodelist *ndlp; |
| 2775 | struct lpfc_scsi_buf *lpfc_cmd; | 2772 | struct lpfc_scsi_buf *lpfc_cmd; |
| 2776 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | 2773 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
| 2777 | int err; | 2774 | int err; |
| @@ -2781,13 +2778,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
| 2781 | cmnd->result = err; | 2778 | cmnd->result = err; |
| 2782 | goto out_fail_command; | 2779 | goto out_fail_command; |
| 2783 | } | 2780 | } |
| 2781 | ndlp = rdata->pnode; | ||
| 2784 | 2782 | ||
| 2785 | if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && | 2783 | if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
| 2786 | scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { | 2784 | scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { |
| 2787 | 2785 | ||
| 2788 | printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " | 2786 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
| 2789 | "str=%s without registering for BlockGuard - " | 2787 | "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" |
| 2790 | "Rejecting command\n", | 2788 | " op:%02x str=%s without registering for" |
| 2789 | " BlockGuard - Rejecting command\n", | ||
| 2791 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | 2790 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), |
| 2792 | dif_op_str[scsi_get_prot_op(cmnd)]); | 2791 | dif_op_str[scsi_get_prot_op(cmnd)]); |
| 2793 | goto out_fail_command; | 2792 | goto out_fail_command; |
| @@ -2827,61 +2826,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
| 2827 | cmnd->scsi_done = done; | 2826 | cmnd->scsi_done = done; |
| 2828 | 2827 | ||
| 2829 | if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { | 2828 | if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { |
| 2830 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2829 | if (vport->phba->cfg_enable_bg) { |
| 2830 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
| 2831 | "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " | 2831 | "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " |
| 2832 | "str=%s\n", | 2832 | "str=%s\n", |
| 2833 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | 2833 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), |
| 2834 | dif_op_str[scsi_get_prot_op(cmnd)]); | 2834 | dif_op_str[scsi_get_prot_op(cmnd)]); |
| 2835 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2835 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2836 | "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " | 2836 | "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " |
| 2837 | "%02x %02x %02x %02x %02x\n", | 2837 | "%02x %02x %02x %02x %02x\n", |
| 2838 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], | 2838 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], |
| 2839 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], | 2839 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], |
| 2840 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], | 2840 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], |
| 2841 | cmnd->cmnd[9]); | 2841 | cmnd->cmnd[9]); |
| 2842 | if (cmnd->cmnd[0] == READ_10) | 2842 | if (cmnd->cmnd[0] == READ_10) |
| 2843 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2843 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2844 | "9035 BLKGRD: READ @ sector %llu, " | 2844 | "9035 BLKGRD: READ @ sector %llu, " |
| 2845 | "count %u\n", | 2845 | "count %u\n", |
| 2846 | (unsigned long long)scsi_get_lba(cmnd), | 2846 | (unsigned long long)scsi_get_lba(cmnd), |
| 2847 | blk_rq_sectors(cmnd->request)); | 2847 | blk_rq_sectors(cmnd->request)); |
| 2848 | else if (cmnd->cmnd[0] == WRITE_10) | 2848 | else if (cmnd->cmnd[0] == WRITE_10) |
| 2849 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2849 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2850 | "9036 BLKGRD: WRITE @ sector %llu, " | 2850 | "9036 BLKGRD: WRITE @ sector %llu, " |
| 2851 | "count %u cmd=%p\n", | 2851 | "count %u cmd=%p\n", |
| 2852 | (unsigned long long)scsi_get_lba(cmnd), | 2852 | (unsigned long long)scsi_get_lba(cmnd), |
| 2853 | blk_rq_sectors(cmnd->request), | 2853 | blk_rq_sectors(cmnd->request), |
| 2854 | cmnd); | 2854 | cmnd); |
| 2855 | } | ||
| 2855 | 2856 | ||
| 2856 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); | 2857 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
| 2857 | } else { | 2858 | } else { |
| 2858 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2859 | if (vport->phba->cfg_enable_bg) { |
| 2859 | "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x" | ||
| 2860 | " str=%s\n", | ||
| 2861 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | ||
| 2862 | dif_op_str[scsi_get_prot_op(cmnd)]); | ||
| 2863 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
| 2864 | "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " | ||
| 2865 | "%02x %02x %02x %02x %02x\n", | ||
| 2866 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], | ||
| 2867 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], | ||
| 2868 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], | ||
| 2869 | cmnd->cmnd[9]); | ||
| 2870 | if (cmnd->cmnd[0] == READ_10) | ||
| 2871 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2860 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2872 | "9040 dbg: READ @ sector %llu, " | 2861 | "9038 BLKGRD: rcvd unprotected cmd:" |
| 2873 | "count %u\n", | 2862 | "%02x op:%02x str=%s\n", |
| 2874 | (unsigned long long)scsi_get_lba(cmnd), | 2863 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), |
| 2864 | dif_op_str[scsi_get_prot_op(cmnd)]); | ||
| 2865 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
| 2866 | "9039 BLKGRD: CDB: %02x %02x %02x " | ||
| 2867 | "%02x %02x %02x %02x %02x %02x %02x\n", | ||
| 2868 | cmnd->cmnd[0], cmnd->cmnd[1], | ||
| 2869 | cmnd->cmnd[2], cmnd->cmnd[3], | ||
| 2870 | cmnd->cmnd[4], cmnd->cmnd[5], | ||
| 2871 | cmnd->cmnd[6], cmnd->cmnd[7], | ||
| 2872 | cmnd->cmnd[8], cmnd->cmnd[9]); | ||
| 2873 | if (cmnd->cmnd[0] == READ_10) | ||
| 2874 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
| 2875 | "9040 dbg: READ @ sector %llu, " | ||
| 2876 | "count %u\n", | ||
| 2877 | (unsigned long long)scsi_get_lba(cmnd), | ||
| 2875 | blk_rq_sectors(cmnd->request)); | 2878 | blk_rq_sectors(cmnd->request)); |
| 2876 | else if (cmnd->cmnd[0] == WRITE_10) | 2879 | else if (cmnd->cmnd[0] == WRITE_10) |
| 2877 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2880 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2878 | "9041 dbg: WRITE @ sector %llu, " | 2881 | "9041 dbg: WRITE @ sector %llu, " |
| 2879 | "count %u cmd=%p\n", | 2882 | "count %u cmd=%p\n", |
| 2880 | (unsigned long long)scsi_get_lba(cmnd), | 2883 | (unsigned long long)scsi_get_lba(cmnd), |
| 2881 | blk_rq_sectors(cmnd->request), cmnd); | 2884 | blk_rq_sectors(cmnd->request), cmnd); |
| 2882 | else | 2885 | else |
| 2883 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 2886 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
| 2884 | "9042 dbg: parser not implemented\n"); | 2887 | "9042 dbg: parser not implemented\n"); |
| 2888 | } | ||
| 2885 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | 2889 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
| 2886 | } | 2890 | } |
| 2887 | 2891 | ||
| @@ -2898,7 +2902,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
| 2898 | goto out_host_busy_free_buf; | 2902 | goto out_host_busy_free_buf; |
| 2899 | } | 2903 | } |
| 2900 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 2904 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
| 2901 | lpfc_sli_poll_fcp_ring(phba); | 2905 | spin_unlock(shost->host_lock); |
| 2906 | lpfc_sli_handle_fast_ring_event(phba, | ||
| 2907 | &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); | ||
| 2908 | |||
| 2909 | spin_lock(shost->host_lock); | ||
| 2902 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | 2910 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
| 2903 | lpfc_poll_rearm_timer(phba); | 2911 | lpfc_poll_rearm_timer(phba); |
| 2904 | } | 2912 | } |
| @@ -2917,28 +2925,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
| 2917 | } | 2925 | } |
| 2918 | 2926 | ||
| 2919 | /** | 2927 | /** |
| 2920 | * lpfc_block_error_handler - Routine to block error handler | ||
| 2921 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
| 2922 | * | ||
| 2923 | * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD. | ||
| 2924 | **/ | ||
| 2925 | static void | ||
| 2926 | lpfc_block_error_handler(struct scsi_cmnd *cmnd) | ||
| 2927 | { | ||
| 2928 | struct Scsi_Host *shost = cmnd->device->host; | ||
| 2929 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
| 2930 | |||
| 2931 | spin_lock_irq(shost->host_lock); | ||
| 2932 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
| 2933 | spin_unlock_irq(shost->host_lock); | ||
| 2934 | msleep(1000); | ||
| 2935 | spin_lock_irq(shost->host_lock); | ||
| 2936 | } | ||
| 2937 | spin_unlock_irq(shost->host_lock); | ||
| 2938 | return; | ||
| 2939 | } | ||
| 2940 | |||
| 2941 | /** | ||
| 2942 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point | 2928 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
| 2943 | * @cmnd: Pointer to scsi_cmnd data structure. | 2929 | * @cmnd: Pointer to scsi_cmnd data structure. |
| 2944 | * | 2930 | * |
| @@ -2961,7 +2947,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 2961 | int ret = SUCCESS; | 2947 | int ret = SUCCESS; |
| 2962 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); | 2948 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
| 2963 | 2949 | ||
| 2964 | lpfc_block_error_handler(cmnd); | 2950 | fc_block_scsi_eh(cmnd); |
| 2965 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; | 2951 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; |
| 2966 | BUG_ON(!lpfc_cmd); | 2952 | BUG_ON(!lpfc_cmd); |
| 2967 | 2953 | ||
| @@ -3001,6 +2987,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 3001 | 2987 | ||
| 3002 | icmd->ulpLe = 1; | 2988 | icmd->ulpLe = 1; |
| 3003 | icmd->ulpClass = cmd->ulpClass; | 2989 | icmd->ulpClass = cmd->ulpClass; |
| 2990 | |||
| 2991 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | ||
| 2992 | abtsiocb->fcp_wqidx = iocb->fcp_wqidx; | ||
| 2993 | |||
| 3004 | if (lpfc_is_link_up(phba)) | 2994 | if (lpfc_is_link_up(phba)) |
| 3005 | icmd->ulpCommand = CMD_ABORT_XRI_CN; | 2995 | icmd->ulpCommand = CMD_ABORT_XRI_CN; |
| 3006 | else | 2996 | else |
| @@ -3016,7 +3006,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 3016 | } | 3006 | } |
| 3017 | 3007 | ||
| 3018 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | 3008 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
| 3019 | lpfc_sli_poll_fcp_ring (phba); | 3009 | lpfc_sli_handle_fast_ring_event(phba, |
| 3010 | &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); | ||
| 3020 | 3011 | ||
| 3021 | lpfc_cmd->waitq = &waitq; | 3012 | lpfc_cmd->waitq = &waitq; |
| 3022 | /* Wait for abort to complete */ | 3013 | /* Wait for abort to complete */ |
| @@ -3166,9 +3157,15 @@ static int | |||
| 3166 | lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) | 3157 | lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) |
| 3167 | { | 3158 | { |
| 3168 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 3159 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
| 3169 | struct lpfc_nodelist *pnode = rdata->pnode; | 3160 | struct lpfc_nodelist *pnode; |
| 3170 | unsigned long later; | 3161 | unsigned long later; |
| 3171 | 3162 | ||
| 3163 | if (!rdata) { | ||
| 3164 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | ||
| 3165 | "0797 Tgt Map rport failure: rdata x%p\n", rdata); | ||
| 3166 | return FAILED; | ||
| 3167 | } | ||
| 3168 | pnode = rdata->pnode; | ||
| 3172 | /* | 3169 | /* |
| 3173 | * If target is not in a MAPPED state, delay until | 3170 | * If target is not in a MAPPED state, delay until |
| 3174 | * target is rediscovered or devloss timeout expires. | 3171 | * target is rediscovered or devloss timeout expires. |
| @@ -3253,13 +3250,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
| 3253 | struct Scsi_Host *shost = cmnd->device->host; | 3250 | struct Scsi_Host *shost = cmnd->device->host; |
| 3254 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3251 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
| 3255 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 3252 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
| 3256 | struct lpfc_nodelist *pnode = rdata->pnode; | 3253 | struct lpfc_nodelist *pnode; |
| 3257 | unsigned tgt_id = cmnd->device->id; | 3254 | unsigned tgt_id = cmnd->device->id; |
| 3258 | unsigned int lun_id = cmnd->device->lun; | 3255 | unsigned int lun_id = cmnd->device->lun; |
| 3259 | struct lpfc_scsi_event_header scsi_event; | 3256 | struct lpfc_scsi_event_header scsi_event; |
| 3260 | int status; | 3257 | int status; |
| 3261 | 3258 | ||
| 3262 | lpfc_block_error_handler(cmnd); | 3259 | if (!rdata) { |
| 3260 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
| 3261 | "0798 Device Reset rport failure: rdata x%p\n", rdata); | ||
| 3262 | return FAILED; | ||
| 3263 | } | ||
| 3264 | pnode = rdata->pnode; | ||
| 3265 | fc_block_scsi_eh(cmnd); | ||
| 3263 | 3266 | ||
| 3264 | status = lpfc_chk_tgt_mapped(vport, cmnd); | 3267 | status = lpfc_chk_tgt_mapped(vport, cmnd); |
| 3265 | if (status == FAILED) { | 3268 | if (status == FAILED) { |
| @@ -3312,13 +3315,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) | |||
| 3312 | struct Scsi_Host *shost = cmnd->device->host; | 3315 | struct Scsi_Host *shost = cmnd->device->host; |
| 3313 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3316 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
| 3314 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 3317 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
| 3315 | struct lpfc_nodelist *pnode = rdata->pnode; | 3318 | struct lpfc_nodelist *pnode; |
| 3316 | unsigned tgt_id = cmnd->device->id; | 3319 | unsigned tgt_id = cmnd->device->id; |
| 3317 | unsigned int lun_id = cmnd->device->lun; | 3320 | unsigned int lun_id = cmnd->device->lun; |
| 3318 | struct lpfc_scsi_event_header scsi_event; | 3321 | struct lpfc_scsi_event_header scsi_event; |
| 3319 | int status; | 3322 | int status; |
| 3320 | 3323 | ||
| 3321 | lpfc_block_error_handler(cmnd); | 3324 | if (!rdata) { |
| 3325 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
| 3326 | "0799 Target Reset rport failure: rdata x%p\n", rdata); | ||
| 3327 | return FAILED; | ||
| 3328 | } | ||
| 3329 | pnode = rdata->pnode; | ||
| 3330 | fc_block_scsi_eh(cmnd); | ||
| 3322 | 3331 | ||
| 3323 | status = lpfc_chk_tgt_mapped(vport, cmnd); | 3332 | status = lpfc_chk_tgt_mapped(vport, cmnd); |
| 3324 | if (status == FAILED) { | 3333 | if (status == FAILED) { |
| @@ -3384,7 +3393,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
| 3384 | fc_host_post_vendor_event(shost, fc_get_event_number(), | 3393 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
| 3385 | sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); | 3394 | sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
| 3386 | 3395 | ||
| 3387 | lpfc_block_error_handler(cmnd); | 3396 | fc_block_scsi_eh(cmnd); |
| 3388 | 3397 | ||
| 3389 | /* | 3398 | /* |
| 3390 | * Since the driver manages a single bus device, reset all | 3399 | * Since the driver manages a single bus device, reset all |
| @@ -3498,6 +3507,8 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
| 3498 | "Allocated %d buffers.\n", | 3507 | "Allocated %d buffers.\n", |
| 3499 | num_to_alloc, num_allocated); | 3508 | num_to_alloc, num_allocated); |
| 3500 | } | 3509 | } |
| 3510 | if (num_allocated > 0) | ||
| 3511 | phba->total_scsi_bufs += num_allocated; | ||
| 3501 | return 0; | 3512 | return 0; |
| 3502 | } | 3513 | } |
| 3503 | 3514 | ||
| @@ -3534,7 +3545,8 @@ lpfc_slave_configure(struct scsi_device *sdev) | |||
| 3534 | rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 3545 | rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
| 3535 | 3546 | ||
| 3536 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 3547 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
| 3537 | lpfc_sli_poll_fcp_ring(phba); | 3548 | lpfc_sli_handle_fast_ring_event(phba, |
| 3549 | &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); | ||
| 3538 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | 3550 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
| 3539 | lpfc_poll_rearm_timer(phba); | 3551 | lpfc_poll_rearm_timer(phba); |
| 3540 | } | 3552 | } |
| @@ -3576,6 +3588,7 @@ struct scsi_host_template lpfc_template = { | |||
| 3576 | .shost_attrs = lpfc_hba_attrs, | 3588 | .shost_attrs = lpfc_hba_attrs, |
| 3577 | .max_sectors = 0xFFFF, | 3589 | .max_sectors = 0xFFFF, |
| 3578 | .vendor_id = LPFC_NL_VENDOR_ID, | 3590 | .vendor_id = LPFC_NL_VENDOR_ID, |
| 3591 | .change_queue_depth = lpfc_change_queue_depth, | ||
| 3579 | }; | 3592 | }; |
| 3580 | 3593 | ||
| 3581 | struct scsi_host_template lpfc_vport_template = { | 3594 | struct scsi_host_template lpfc_vport_template = { |
| @@ -3597,4 +3610,5 @@ struct scsi_host_template lpfc_vport_template = { | |||
| 3597 | .use_clustering = ENABLE_CLUSTERING, | 3610 | .use_clustering = ENABLE_CLUSTERING, |
| 3598 | .shost_attrs = lpfc_vport_attrs, | 3611 | .shost_attrs = lpfc_vport_attrs, |
| 3599 | .max_sectors = 0xFFFF, | 3612 | .max_sectors = 0xFFFF, |
| 3613 | .change_queue_depth = lpfc_change_queue_depth, | ||
| 3600 | }; | 3614 | }; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 43cbe336f1f8..7935667b81a5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
| 31 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
| 32 | #include <scsi/fc/fc_fs.h> | 32 | #include <scsi/fc/fc_fs.h> |
| 33 | #include <linux/aer.h> | ||
| 33 | 34 | ||
| 34 | #include "lpfc_hw4.h" | 35 | #include "lpfc_hw4.h" |
| 35 | #include "lpfc_hw.h" | 36 | #include "lpfc_hw.h" |
| @@ -58,8 +59,11 @@ typedef enum _lpfc_iocb_type { | |||
| 58 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, | 59 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, |
| 59 | uint32_t); | 60 | uint32_t); |
| 60 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, | 61 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, |
| 61 | uint8_t *, uint32_t *); | 62 | uint8_t *, uint32_t *); |
| 62 | 63 | static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, | |
| 64 | struct lpfc_iocbq *); | ||
| 65 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, | ||
| 66 | struct hbq_dmabuf *); | ||
| 63 | static IOCB_t * | 67 | static IOCB_t * |
| 64 | lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) | 68 | lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) |
| 65 | { | 69 | { |
| @@ -259,6 +263,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) | |||
| 259 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); | 263 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); |
| 260 | bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); | 264 | bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); |
| 261 | writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); | 265 | writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); |
| 266 | /* PCI read to flush PCI pipeline on re-arming for INTx mode */ | ||
| 267 | if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) | ||
| 268 | readl(q->phba->sli4_hba.EQCQDBregaddr); | ||
| 262 | return released; | 269 | return released; |
| 263 | } | 270 | } |
| 264 | 271 | ||
| @@ -515,6 +522,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) | |||
| 515 | struct lpfc_sglq *sglq = NULL; | 522 | struct lpfc_sglq *sglq = NULL; |
| 516 | uint16_t adj_xri; | 523 | uint16_t adj_xri; |
| 517 | list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); | 524 | list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); |
| 525 | if (!sglq) | ||
| 526 | return NULL; | ||
| 518 | adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; | 527 | adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; |
| 519 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; | 528 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; |
| 520 | return sglq; | 529 | return sglq; |
| @@ -572,9 +581,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
| 572 | sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); | 581 | sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); |
| 573 | if (sglq) { | 582 | if (sglq) { |
| 574 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED | 583 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED |
| 575 | || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) | 584 | && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) |
| 576 | && (iocbq->iocb.un.ulpWord[4] | 585 | && (iocbq->iocb.un.ulpWord[4] |
| 577 | == IOERR_SLI_ABORTED))) { | 586 | == IOERR_ABORT_REQUESTED))) { |
| 578 | spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, | 587 | spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, |
| 579 | iflag); | 588 | iflag); |
| 580 | list_add(&sglq->list, | 589 | list_add(&sglq->list, |
| @@ -767,6 +776,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
| 767 | case CMD_CLOSE_XRI_CX: | 776 | case CMD_CLOSE_XRI_CX: |
| 768 | case CMD_XRI_ABORTED_CX: | 777 | case CMD_XRI_ABORTED_CX: |
| 769 | case CMD_ABORT_MXRI64_CN: | 778 | case CMD_ABORT_MXRI64_CN: |
| 779 | case CMD_XMIT_BLS_RSP64_CX: | ||
| 770 | type = LPFC_ABORT_IOCB; | 780 | type = LPFC_ABORT_IOCB; |
| 771 | break; | 781 | break; |
| 772 | case CMD_RCV_SEQUENCE_CX: | 782 | case CMD_RCV_SEQUENCE_CX: |
| @@ -1794,7 +1804,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) | |||
| 1794 | */ | 1804 | */ |
| 1795 | if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == | 1805 | if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == |
| 1796 | MBX_SHUTDOWN) { | 1806 | MBX_SHUTDOWN) { |
| 1797 | /* Unknow mailbox command compl */ | 1807 | /* Unknown mailbox command compl */ |
| 1798 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 1808 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
| 1799 | "(%d):0323 Unknown Mailbox command " | 1809 | "(%d):0323 Unknown Mailbox command " |
| 1800 | "x%x (x%x) Cmpl\n", | 1810 | "x%x (x%x) Cmpl\n", |
| @@ -2068,8 +2078,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 2068 | if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || | 2078 | if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || |
| 2069 | (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || | 2079 | (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || |
| 2070 | (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { | 2080 | (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { |
| 2071 | Rctl = FC_ELS_REQ; | 2081 | Rctl = FC_RCTL_ELS_REQ; |
| 2072 | Type = FC_ELS_DATA; | 2082 | Type = FC_TYPE_ELS; |
| 2073 | } else { | 2083 | } else { |
| 2074 | w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); | 2084 | w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); |
| 2075 | Rctl = w5p->hcsw.Rctl; | 2085 | Rctl = w5p->hcsw.Rctl; |
| @@ -2079,8 +2089,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 2079 | if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && | 2089 | if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && |
| 2080 | (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || | 2090 | (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || |
| 2081 | irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { | 2091 | irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { |
| 2082 | Rctl = FC_ELS_REQ; | 2092 | Rctl = FC_RCTL_ELS_REQ; |
| 2083 | Type = FC_ELS_DATA; | 2093 | Type = FC_TYPE_ELS; |
| 2084 | w5p->hcsw.Rctl = Rctl; | 2094 | w5p->hcsw.Rctl = Rctl; |
| 2085 | w5p->hcsw.Type = Type; | 2095 | w5p->hcsw.Type = Type; |
| 2086 | } | 2096 | } |
| @@ -2324,168 +2334,6 @@ void lpfc_poll_eratt(unsigned long ptr) | |||
| 2324 | return; | 2334 | return; |
| 2325 | } | 2335 | } |
| 2326 | 2336 | ||
| 2327 | /** | ||
| 2328 | * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode | ||
| 2329 | * @phba: Pointer to HBA context object. | ||
| 2330 | * | ||
| 2331 | * This function is called from lpfc_queuecommand, lpfc_poll_timeout, | ||
| 2332 | * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING | ||
| 2333 | * is enabled. | ||
| 2334 | * | ||
| 2335 | * The caller does not hold any lock. | ||
| 2336 | * The function processes each response iocb in the response ring until it | ||
| 2337 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
| 2338 | * LE bit set. The function will call the completion handler of the command iocb | ||
| 2339 | * if the response iocb indicates a completion for a command iocb or it is | ||
| 2340 | * an abort completion. | ||
| 2341 | **/ | ||
| 2342 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | ||
| 2343 | { | ||
| 2344 | struct lpfc_sli *psli = &phba->sli; | ||
| 2345 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; | ||
| 2346 | IOCB_t *irsp = NULL; | ||
| 2347 | IOCB_t *entry = NULL; | ||
| 2348 | struct lpfc_iocbq *cmdiocbq = NULL; | ||
| 2349 | struct lpfc_iocbq rspiocbq; | ||
| 2350 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; | ||
| 2351 | uint32_t status; | ||
| 2352 | uint32_t portRspPut, portRspMax; | ||
| 2353 | int type; | ||
| 2354 | uint32_t rsp_cmpl = 0; | ||
| 2355 | uint32_t ha_copy; | ||
| 2356 | unsigned long iflags; | ||
| 2357 | |||
| 2358 | pring->stats.iocb_event++; | ||
| 2359 | |||
| 2360 | /* | ||
| 2361 | * The next available response entry should never exceed the maximum | ||
| 2362 | * entries. If it does, treat it as an adapter hardware error. | ||
| 2363 | */ | ||
| 2364 | portRspMax = pring->numRiocb; | ||
| 2365 | portRspPut = le32_to_cpu(pgp->rspPutInx); | ||
| 2366 | if (unlikely(portRspPut >= portRspMax)) { | ||
| 2367 | lpfc_sli_rsp_pointers_error(phba, pring); | ||
| 2368 | return; | ||
| 2369 | } | ||
| 2370 | |||
| 2371 | rmb(); | ||
| 2372 | while (pring->rspidx != portRspPut) { | ||
| 2373 | entry = lpfc_resp_iocb(phba, pring); | ||
| 2374 | if (++pring->rspidx >= portRspMax) | ||
| 2375 | pring->rspidx = 0; | ||
| 2376 | |||
| 2377 | lpfc_sli_pcimem_bcopy((uint32_t *) entry, | ||
| 2378 | (uint32_t *) &rspiocbq.iocb, | ||
| 2379 | phba->iocb_rsp_size); | ||
| 2380 | irsp = &rspiocbq.iocb; | ||
| 2381 | type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); | ||
| 2382 | pring->stats.iocb_rsp++; | ||
| 2383 | rsp_cmpl++; | ||
| 2384 | |||
| 2385 | if (unlikely(irsp->ulpStatus)) { | ||
| 2386 | /* Rsp ring <ringno> error: IOCB */ | ||
| 2387 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
| 2388 | "0326 Rsp Ring %d error: IOCB Data: " | ||
| 2389 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", | ||
| 2390 | pring->ringno, | ||
| 2391 | irsp->un.ulpWord[0], | ||
| 2392 | irsp->un.ulpWord[1], | ||
| 2393 | irsp->un.ulpWord[2], | ||
| 2394 | irsp->un.ulpWord[3], | ||
| 2395 | irsp->un.ulpWord[4], | ||
| 2396 | irsp->un.ulpWord[5], | ||
| 2397 | *(uint32_t *)&irsp->un1, | ||
| 2398 | *((uint32_t *)&irsp->un1 + 1)); | ||
| 2399 | } | ||
| 2400 | |||
| 2401 | switch (type) { | ||
| 2402 | case LPFC_ABORT_IOCB: | ||
| 2403 | case LPFC_SOL_IOCB: | ||
| 2404 | /* | ||
| 2405 | * Idle exchange closed via ABTS from port. No iocb | ||
| 2406 | * resources need to be recovered. | ||
| 2407 | */ | ||
| 2408 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | ||
| 2409 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
| 2410 | "0314 IOCB cmd 0x%x " | ||
| 2411 | "processed. Skipping " | ||
| 2412 | "completion", | ||
| 2413 | irsp->ulpCommand); | ||
| 2414 | break; | ||
| 2415 | } | ||
| 2416 | |||
| 2417 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
| 2418 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, | ||
| 2419 | &rspiocbq); | ||
| 2420 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
| 2421 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { | ||
| 2422 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | ||
| 2423 | &rspiocbq); | ||
| 2424 | } | ||
| 2425 | break; | ||
| 2426 | default: | ||
| 2427 | if (irsp->ulpCommand == CMD_ADAPTER_MSG) { | ||
| 2428 | char adaptermsg[LPFC_MAX_ADPTMSG]; | ||
| 2429 | memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); | ||
| 2430 | memcpy(&adaptermsg[0], (uint8_t *) irsp, | ||
| 2431 | MAX_MSG_DATA); | ||
| 2432 | dev_warn(&((phba->pcidev)->dev), | ||
| 2433 | "lpfc%d: %s\n", | ||
| 2434 | phba->brd_no, adaptermsg); | ||
| 2435 | } else { | ||
| 2436 | /* Unknown IOCB command */ | ||
| 2437 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 2438 | "0321 Unknown IOCB command " | ||
| 2439 | "Data: x%x, x%x x%x x%x x%x\n", | ||
| 2440 | type, irsp->ulpCommand, | ||
| 2441 | irsp->ulpStatus, | ||
| 2442 | irsp->ulpIoTag, | ||
| 2443 | irsp->ulpContext); | ||
| 2444 | } | ||
| 2445 | break; | ||
| 2446 | } | ||
| 2447 | |||
| 2448 | /* | ||
| 2449 | * The response IOCB has been processed. Update the ring | ||
| 2450 | * pointer in SLIM. If the port response put pointer has not | ||
| 2451 | * been updated, sync the pgp->rspPutInx and fetch the new port | ||
| 2452 | * response put pointer. | ||
| 2453 | */ | ||
| 2454 | writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); | ||
| 2455 | |||
| 2456 | if (pring->rspidx == portRspPut) | ||
| 2457 | portRspPut = le32_to_cpu(pgp->rspPutInx); | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | ha_copy = readl(phba->HAregaddr); | ||
| 2461 | ha_copy >>= (LPFC_FCP_RING * 4); | ||
| 2462 | |||
| 2463 | if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { | ||
| 2464 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
| 2465 | pring->stats.iocb_rsp_full++; | ||
| 2466 | status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); | ||
| 2467 | writel(status, phba->CAregaddr); | ||
| 2468 | readl(phba->CAregaddr); | ||
| 2469 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
| 2470 | } | ||
| 2471 | if ((ha_copy & HA_R0CE_RSP) && | ||
| 2472 | (pring->flag & LPFC_CALL_RING_AVAILABLE)) { | ||
| 2473 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
| 2474 | pring->flag &= ~LPFC_CALL_RING_AVAILABLE; | ||
| 2475 | pring->stats.iocb_cmd_empty++; | ||
| 2476 | |||
| 2477 | /* Force update of the local copy of cmdGetInx */ | ||
| 2478 | pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); | ||
| 2479 | lpfc_sli_resume_iocb(phba, pring); | ||
| 2480 | |||
| 2481 | if ((pring->lpfc_sli_cmd_available)) | ||
| 2482 | (pring->lpfc_sli_cmd_available) (phba, pring); | ||
| 2483 | |||
| 2484 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
| 2485 | } | ||
| 2486 | |||
| 2487 | return; | ||
| 2488 | } | ||
| 2489 | 2337 | ||
| 2490 | /** | 2338 | /** |
| 2491 | * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring | 2339 | * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring |
| @@ -2502,9 +2350,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
| 2502 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb | 2350 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb |
| 2503 | * function if this is an unsolicited iocb. | 2351 | * function if this is an unsolicited iocb. |
| 2504 | * This routine presumes LPFC_FCP_RING handling and doesn't bother | 2352 | * This routine presumes LPFC_FCP_RING handling and doesn't bother |
| 2505 | * to check it explicitly. This function always returns 1. | 2353 | * to check it explicitly. |
| 2506 | **/ | 2354 | */ |
| 2507 | static int | 2355 | int |
| 2508 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | 2356 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, |
| 2509 | struct lpfc_sli_ring *pring, uint32_t mask) | 2357 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 2510 | { | 2358 | { |
| @@ -2534,6 +2382,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
| 2534 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2382 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| 2535 | return 1; | 2383 | return 1; |
| 2536 | } | 2384 | } |
| 2385 | if (phba->fcp_ring_in_use) { | ||
| 2386 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
| 2387 | return 1; | ||
| 2388 | } else | ||
| 2389 | phba->fcp_ring_in_use = 1; | ||
| 2537 | 2390 | ||
| 2538 | rmb(); | 2391 | rmb(); |
| 2539 | while (pring->rspidx != portRspPut) { | 2392 | while (pring->rspidx != portRspPut) { |
| @@ -2604,10 +2457,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
| 2604 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, | 2457 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, |
| 2605 | &rspiocbq); | 2458 | &rspiocbq); |
| 2606 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { | 2459 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { |
| 2607 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | ||
| 2608 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | ||
| 2609 | &rspiocbq); | ||
| 2610 | } else { | ||
| 2611 | spin_unlock_irqrestore(&phba->hbalock, | 2460 | spin_unlock_irqrestore(&phba->hbalock, |
| 2612 | iflag); | 2461 | iflag); |
| 2613 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | 2462 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, |
| @@ -2615,7 +2464,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
| 2615 | spin_lock_irqsave(&phba->hbalock, | 2464 | spin_lock_irqsave(&phba->hbalock, |
| 2616 | iflag); | 2465 | iflag); |
| 2617 | } | 2466 | } |
| 2618 | } | ||
| 2619 | break; | 2467 | break; |
| 2620 | case LPFC_UNSOL_IOCB: | 2468 | case LPFC_UNSOL_IOCB: |
| 2621 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2469 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| @@ -2675,6 +2523,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
| 2675 | 2523 | ||
| 2676 | } | 2524 | } |
| 2677 | 2525 | ||
| 2526 | phba->fcp_ring_in_use = 0; | ||
| 2678 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2527 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| 2679 | return rc; | 2528 | return rc; |
| 2680 | } | 2529 | } |
| @@ -3018,16 +2867,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, | |||
| 3018 | struct lpfc_sli_ring *pring, uint32_t mask) | 2867 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 3019 | { | 2868 | { |
| 3020 | struct lpfc_iocbq *irspiocbq; | 2869 | struct lpfc_iocbq *irspiocbq; |
| 2870 | struct hbq_dmabuf *dmabuf; | ||
| 2871 | struct lpfc_cq_event *cq_event; | ||
| 3021 | unsigned long iflag; | 2872 | unsigned long iflag; |
| 3022 | 2873 | ||
| 3023 | while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { | 2874 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 2875 | phba->hba_flag &= ~HBA_SP_QUEUE_EVT; | ||
| 2876 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
| 2877 | while (!list_empty(&phba->sli4_hba.sp_queue_event)) { | ||
| 3024 | /* Get the response iocb from the head of work queue */ | 2878 | /* Get the response iocb from the head of work queue */ |
| 3025 | spin_lock_irqsave(&phba->hbalock, iflag); | 2879 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 3026 | list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, | 2880 | list_remove_head(&phba->sli4_hba.sp_queue_event, |
| 3027 | irspiocbq, struct lpfc_iocbq, list); | 2881 | cq_event, struct lpfc_cq_event, list); |
| 3028 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2882 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| 3029 | /* Process the response iocb */ | 2883 | |
| 3030 | lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); | 2884 | switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { |
| 2885 | case CQE_CODE_COMPL_WQE: | ||
| 2886 | irspiocbq = container_of(cq_event, struct lpfc_iocbq, | ||
| 2887 | cq_event); | ||
| 2888 | /* Translate ELS WCQE to response IOCBQ */ | ||
| 2889 | irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, | ||
| 2890 | irspiocbq); | ||
| 2891 | if (irspiocbq) | ||
| 2892 | lpfc_sli_sp_handle_rspiocb(phba, pring, | ||
| 2893 | irspiocbq); | ||
| 2894 | break; | ||
| 2895 | case CQE_CODE_RECEIVE: | ||
| 2896 | dmabuf = container_of(cq_event, struct hbq_dmabuf, | ||
| 2897 | cq_event); | ||
| 2898 | lpfc_sli4_handle_received_buffer(phba, dmabuf); | ||
| 2899 | break; | ||
| 2900 | default: | ||
| 2901 | break; | ||
| 2902 | } | ||
| 3031 | } | 2903 | } |
| 3032 | } | 2904 | } |
| 3033 | 2905 | ||
| @@ -3416,6 +3288,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
| 3416 | 3288 | ||
| 3417 | /* perform board reset */ | 3289 | /* perform board reset */ |
| 3418 | phba->fc_eventTag = 0; | 3290 | phba->fc_eventTag = 0; |
| 3291 | phba->link_events = 0; | ||
| 3419 | phba->pport->fc_myDID = 0; | 3292 | phba->pport->fc_myDID = 0; |
| 3420 | phba->pport->fc_prevDID = 0; | 3293 | phba->pport->fc_prevDID = 0; |
| 3421 | 3294 | ||
| @@ -3476,6 +3349,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
| 3476 | 3349 | ||
| 3477 | /* perform board reset */ | 3350 | /* perform board reset */ |
| 3478 | phba->fc_eventTag = 0; | 3351 | phba->fc_eventTag = 0; |
| 3352 | phba->link_events = 0; | ||
| 3479 | phba->pport->fc_myDID = 0; | 3353 | phba->pport->fc_myDID = 0; |
| 3480 | phba->pport->fc_prevDID = 0; | 3354 | phba->pport->fc_prevDID = 0; |
| 3481 | 3355 | ||
| @@ -3495,7 +3369,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
| 3495 | list_del_init(&phba->sli4_hba.dat_rq->list); | 3369 | list_del_init(&phba->sli4_hba.dat_rq->list); |
| 3496 | list_del_init(&phba->sli4_hba.mbx_cq->list); | 3370 | list_del_init(&phba->sli4_hba.mbx_cq->list); |
| 3497 | list_del_init(&phba->sli4_hba.els_cq->list); | 3371 | list_del_init(&phba->sli4_hba.els_cq->list); |
| 3498 | list_del_init(&phba->sli4_hba.rxq_cq->list); | ||
| 3499 | for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) | 3372 | for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) |
| 3500 | list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); | 3373 | list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); |
| 3501 | for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) | 3374 | for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) |
| @@ -3531,9 +3404,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
| 3531 | struct lpfc_sli *psli; | 3404 | struct lpfc_sli *psli; |
| 3532 | volatile uint32_t word0; | 3405 | volatile uint32_t word0; |
| 3533 | void __iomem *to_slim; | 3406 | void __iomem *to_slim; |
| 3407 | uint32_t hba_aer_enabled; | ||
| 3534 | 3408 | ||
| 3535 | spin_lock_irq(&phba->hbalock); | 3409 | spin_lock_irq(&phba->hbalock); |
| 3536 | 3410 | ||
| 3411 | /* Take PCIe device Advanced Error Reporting (AER) state */ | ||
| 3412 | hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; | ||
| 3413 | |||
| 3537 | psli = &phba->sli; | 3414 | psli = &phba->sli; |
| 3538 | 3415 | ||
| 3539 | /* Restart HBA */ | 3416 | /* Restart HBA */ |
| @@ -3573,6 +3450,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
| 3573 | /* Give the INITFF and Post time to settle. */ | 3450 | /* Give the INITFF and Post time to settle. */ |
| 3574 | mdelay(100); | 3451 | mdelay(100); |
| 3575 | 3452 | ||
| 3453 | /* Reset HBA AER if it was enabled, note hba_flag was reset above */ | ||
| 3454 | if (hba_aer_enabled) | ||
| 3455 | pci_disable_pcie_error_reporting(phba->pcidev); | ||
| 3456 | |||
| 3576 | lpfc_hba_down_post(phba); | 3457 | lpfc_hba_down_post(phba); |
| 3577 | 3458 | ||
| 3578 | return 0; | 3459 | return 0; |
| @@ -4042,6 +3923,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) | |||
| 4042 | if (rc) | 3923 | if (rc) |
| 4043 | goto lpfc_sli_hba_setup_error; | 3924 | goto lpfc_sli_hba_setup_error; |
| 4044 | 3925 | ||
| 3926 | /* Enable PCIe device Advanced Error Reporting (AER) if configured */ | ||
| 3927 | if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { | ||
| 3928 | rc = pci_enable_pcie_error_reporting(phba->pcidev); | ||
| 3929 | if (!rc) { | ||
| 3930 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
| 3931 | "2709 This device supports " | ||
| 3932 | "Advanced Error Reporting (AER)\n"); | ||
| 3933 | spin_lock_irq(&phba->hbalock); | ||
| 3934 | phba->hba_flag |= HBA_AER_ENABLED; | ||
| 3935 | spin_unlock_irq(&phba->hbalock); | ||
| 3936 | } else { | ||
| 3937 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
| 3938 | "2708 This device does not support " | ||
| 3939 | "Advanced Error Reporting (AER)\n"); | ||
| 3940 | phba->cfg_aer_support = 0; | ||
| 3941 | } | ||
| 3942 | } | ||
| 3943 | |||
| 4045 | if (phba->sli_rev == 3) { | 3944 | if (phba->sli_rev == 3) { |
| 4046 | phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; | 3945 | phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; |
| 4047 | phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; | 3946 | phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; |
| @@ -4163,7 +4062,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, | |||
| 4163 | * addition, this routine gets the port vpd data. | 4062 | * addition, this routine gets the port vpd data. |
| 4164 | * | 4063 | * |
| 4165 | * Return codes | 4064 | * Return codes |
| 4166 | * 0 - sucessful | 4065 | * 0 - successful |
| 4167 | * ENOMEM - could not allocated memory. | 4066 | * ENOMEM - could not allocated memory. |
| 4168 | **/ | 4067 | **/ |
| 4169 | static int | 4068 | static int |
| @@ -4243,7 +4142,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) | |||
| 4243 | 4142 | ||
| 4244 | lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); | 4143 | lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); |
| 4245 | lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); | 4144 | lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); |
| 4246 | lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); | ||
| 4247 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) | 4145 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) |
| 4248 | lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], | 4146 | lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], |
| 4249 | LPFC_QUEUE_REARM); | 4147 | LPFC_QUEUE_REARM); |
| @@ -4322,6 +4220,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
| 4322 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); | 4220 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
| 4323 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) | 4221 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) |
| 4324 | phba->hba_flag |= HBA_FCOE_SUPPORT; | 4222 | phba->hba_flag |= HBA_FCOE_SUPPORT; |
| 4223 | |||
| 4224 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == | ||
| 4225 | LPFC_DCBX_CEE_MODE) | ||
| 4226 | phba->hba_flag |= HBA_FIP_SUPPORT; | ||
| 4227 | else | ||
| 4228 | phba->hba_flag &= ~HBA_FIP_SUPPORT; | ||
| 4229 | |||
| 4325 | if (phba->sli_rev != LPFC_SLI_REV4 || | 4230 | if (phba->sli_rev != LPFC_SLI_REV4 || |
| 4326 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { | 4231 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { |
| 4327 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4232 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
| @@ -4468,7 +4373,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
| 4468 | rc = lpfc_sli4_post_sgl_list(phba); | 4373 | rc = lpfc_sli4_post_sgl_list(phba); |
| 4469 | if (unlikely(rc)) { | 4374 | if (unlikely(rc)) { |
| 4470 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4375 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
| 4471 | "0582 Error %d during sgl post operation", rc); | 4376 | "0582 Error %d during sgl post operation\n", |
| 4377 | rc); | ||
| 4472 | rc = -ENODEV; | 4378 | rc = -ENODEV; |
| 4473 | goto out_free_vpd; | 4379 | goto out_free_vpd; |
| 4474 | } | 4380 | } |
| @@ -4477,8 +4383,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
| 4477 | rc = lpfc_sli4_repost_scsi_sgl_list(phba); | 4383 | rc = lpfc_sli4_repost_scsi_sgl_list(phba); |
| 4478 | if (unlikely(rc)) { | 4384 | if (unlikely(rc)) { |
| 4479 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, | 4385 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 4480 | "0383 Error %d during scsi sgl post opeation", | 4386 | "0383 Error %d during scsi sgl post " |
| 4481 | rc); | 4387 | "operation\n", rc); |
| 4482 | /* Some Scsi buffers were moved to the abort scsi list */ | 4388 | /* Some Scsi buffers were moved to the abort scsi list */ |
| 4483 | /* A pci function reset will repost them */ | 4389 | /* A pci function reset will repost them */ |
| 4484 | rc = -ENODEV; | 4390 | rc = -ENODEV; |
| @@ -4494,10 +4400,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
| 4494 | rc = -ENODEV; | 4400 | rc = -ENODEV; |
| 4495 | goto out_free_vpd; | 4401 | goto out_free_vpd; |
| 4496 | } | 4402 | } |
| 4497 | if (phba->cfg_enable_fip) | ||
| 4498 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); | ||
| 4499 | else | ||
| 4500 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | ||
| 4501 | 4403 | ||
| 4502 | /* Set up all the queues to the device */ | 4404 | /* Set up all the queues to the device */ |
| 4503 | rc = lpfc_sli4_queue_setup(phba); | 4405 | rc = lpfc_sli4_queue_setup(phba); |
| @@ -5669,7 +5571,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, | |||
| 5669 | case CMD_GEN_REQUEST64_CX: | 5571 | case CMD_GEN_REQUEST64_CX: |
| 5670 | if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || | 5572 | if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || |
| 5671 | (piocb->iocb.un.genreq64.w5.hcsw.Rctl != | 5573 | (piocb->iocb.un.genreq64.w5.hcsw.Rctl != |
| 5672 | FC_FCP_CMND) || | 5574 | FC_RCTL_DD_UNSOL_CMD) || |
| 5673 | (piocb->iocb.un.genreq64.w5.hcsw.Type != | 5575 | (piocb->iocb.un.genreq64.w5.hcsw.Type != |
| 5674 | MENLO_TRANSPORT_TYPE)) | 5576 | MENLO_TRANSPORT_TYPE)) |
| 5675 | 5577 | ||
| @@ -5849,7 +5751,7 @@ static int | |||
| 5849 | lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | 5751 | lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, |
| 5850 | union lpfc_wqe *wqe) | 5752 | union lpfc_wqe *wqe) |
| 5851 | { | 5753 | { |
| 5852 | uint32_t payload_len = 0; | 5754 | uint32_t xmit_len = 0, total_len = 0; |
| 5853 | uint8_t ct = 0; | 5755 | uint8_t ct = 0; |
| 5854 | uint32_t fip; | 5756 | uint32_t fip; |
| 5855 | uint32_t abort_tag; | 5757 | uint32_t abort_tag; |
| @@ -5857,12 +5759,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5857 | uint8_t cmnd; | 5759 | uint8_t cmnd; |
| 5858 | uint16_t xritag; | 5760 | uint16_t xritag; |
| 5859 | struct ulp_bde64 *bpl = NULL; | 5761 | struct ulp_bde64 *bpl = NULL; |
| 5762 | uint32_t els_id = ELS_ID_DEFAULT; | ||
| 5763 | int numBdes, i; | ||
| 5764 | struct ulp_bde64 bde; | ||
| 5860 | 5765 | ||
| 5861 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); | 5766 | fip = phba->hba_flag & HBA_FIP_SUPPORT; |
| 5862 | /* The fcp commands will set command type */ | 5767 | /* The fcp commands will set command type */ |
| 5863 | if (iocbq->iocb_flag & LPFC_IO_FCP) | 5768 | if (iocbq->iocb_flag & LPFC_IO_FCP) |
| 5864 | command_type = FCP_COMMAND; | 5769 | command_type = FCP_COMMAND; |
| 5865 | else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) | 5770 | else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) |
| 5866 | command_type = ELS_COMMAND_FIP; | 5771 | command_type = ELS_COMMAND_FIP; |
| 5867 | else | 5772 | else |
| 5868 | command_type = ELS_COMMAND_NON_FIP; | 5773 | command_type = ELS_COMMAND_NON_FIP; |
| @@ -5874,6 +5779,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5874 | wqe->words[7] = 0; /* The ct field has moved so reset */ | 5779 | wqe->words[7] = 0; /* The ct field has moved so reset */ |
| 5875 | /* words0-2 bpl convert bde */ | 5780 | /* words0-2 bpl convert bde */ |
| 5876 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { | 5781 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { |
| 5782 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | ||
| 5783 | sizeof(struct ulp_bde64); | ||
| 5877 | bpl = (struct ulp_bde64 *) | 5784 | bpl = (struct ulp_bde64 *) |
| 5878 | ((struct lpfc_dmabuf *)iocbq->context3)->virt; | 5785 | ((struct lpfc_dmabuf *)iocbq->context3)->virt; |
| 5879 | if (!bpl) | 5786 | if (!bpl) |
| @@ -5886,9 +5793,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5886 | * can assign it to the sgl. | 5793 | * can assign it to the sgl. |
| 5887 | */ | 5794 | */ |
| 5888 | wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); | 5795 | wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); |
| 5889 | payload_len = wqe->generic.bde.tus.f.bdeSize; | 5796 | xmit_len = wqe->generic.bde.tus.f.bdeSize; |
| 5797 | total_len = 0; | ||
| 5798 | for (i = 0; i < numBdes; i++) { | ||
| 5799 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); | ||
| 5800 | total_len += bde.tus.f.bdeSize; | ||
| 5801 | } | ||
| 5890 | } else | 5802 | } else |
| 5891 | payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; | 5803 | xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; |
| 5892 | 5804 | ||
| 5893 | iocbq->iocb.ulpIoTag = iocbq->iotag; | 5805 | iocbq->iocb.ulpIoTag = iocbq->iotag; |
| 5894 | cmnd = iocbq->iocb.ulpCommand; | 5806 | cmnd = iocbq->iocb.ulpCommand; |
| @@ -5902,7 +5814,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5902 | iocbq->iocb.ulpCommand); | 5814 | iocbq->iocb.ulpCommand); |
| 5903 | return IOCB_ERROR; | 5815 | return IOCB_ERROR; |
| 5904 | } | 5816 | } |
| 5905 | wqe->els_req.payload_len = payload_len; | 5817 | wqe->els_req.payload_len = xmit_len; |
| 5906 | /* Els_reguest64 has a TMO */ | 5818 | /* Els_reguest64 has a TMO */ |
| 5907 | bf_set(wqe_tmo, &wqe->els_req.wqe_com, | 5819 | bf_set(wqe_tmo, &wqe->els_req.wqe_com, |
| 5908 | iocbq->iocb.ulpTimeout); | 5820 | iocbq->iocb.ulpTimeout); |
| @@ -5923,7 +5835,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5923 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); | 5835 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); |
| 5924 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 5836 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
| 5925 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ | 5837 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ |
| 5838 | |||
| 5839 | if (command_type == ELS_COMMAND_FIP) { | ||
| 5840 | els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) | ||
| 5841 | >> LPFC_FIP_ELS_ID_SHIFT); | ||
| 5842 | } | ||
| 5843 | bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); | ||
| 5844 | |||
| 5926 | break; | 5845 | break; |
| 5846 | case CMD_XMIT_SEQUENCE64_CX: | ||
| 5847 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | ||
| 5848 | iocbq->iocb.un.ulpWord[3]); | ||
| 5849 | wqe->generic.word3 = 0; | ||
| 5850 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); | ||
| 5851 | bf_set(wqe_xc, &wqe->generic, 1); | ||
| 5852 | /* The entire sequence is transmitted for this IOCB */ | ||
| 5853 | xmit_len = total_len; | ||
| 5854 | cmnd = CMD_XMIT_SEQUENCE64_CR; | ||
| 5927 | case CMD_XMIT_SEQUENCE64_CR: | 5855 | case CMD_XMIT_SEQUENCE64_CR: |
| 5928 | /* word3 iocb=io_tag32 wqe=payload_offset */ | 5856 | /* word3 iocb=io_tag32 wqe=payload_offset */ |
| 5929 | /* payload offset used for multilpe outstanding | 5857 | /* payload offset used for multilpe outstanding |
| @@ -5933,7 +5861,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5933 | /* word4 relative_offset memcpy */ | 5861 | /* word4 relative_offset memcpy */ |
| 5934 | /* word5 r_ctl/df_ctl memcpy */ | 5862 | /* word5 r_ctl/df_ctl memcpy */ |
| 5935 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 5863 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
| 5936 | wqe->xmit_sequence.xmit_len = payload_len; | 5864 | wqe->xmit_sequence.xmit_len = xmit_len; |
| 5865 | command_type = OTHER_COMMAND; | ||
| 5937 | break; | 5866 | break; |
| 5938 | case CMD_XMIT_BCAST64_CN: | 5867 | case CMD_XMIT_BCAST64_CN: |
| 5939 | /* word3 iocb=iotag32 wqe=payload_len */ | 5868 | /* word3 iocb=iotag32 wqe=payload_len */ |
| @@ -5962,7 +5891,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5962 | case CMD_FCP_IREAD64_CR: | 5891 | case CMD_FCP_IREAD64_CR: |
| 5963 | /* FCP_CMD is always the 1st sgl entry */ | 5892 | /* FCP_CMD is always the 1st sgl entry */ |
| 5964 | wqe->fcp_iread.payload_len = | 5893 | wqe->fcp_iread.payload_len = |
| 5965 | payload_len + sizeof(struct fcp_rsp); | 5894 | xmit_len + sizeof(struct fcp_rsp); |
| 5966 | 5895 | ||
| 5967 | /* word 4 (xfer length) should have been set on the memcpy */ | 5896 | /* word 4 (xfer length) should have been set on the memcpy */ |
| 5968 | 5897 | ||
| @@ -5999,7 +5928,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5999 | * sgl[1] = rsp. | 5928 | * sgl[1] = rsp. |
| 6000 | * | 5929 | * |
| 6001 | */ | 5930 | */ |
| 6002 | wqe->gen_req.command_len = payload_len; | 5931 | wqe->gen_req.command_len = xmit_len; |
| 6003 | /* Word4 parameter copied in the memcpy */ | 5932 | /* Word4 parameter copied in the memcpy */ |
| 6004 | /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ | 5933 | /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ |
| 6005 | /* word6 context tag copied in memcpy */ | 5934 | /* word6 context tag copied in memcpy */ |
| @@ -6066,6 +5995,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 6066 | command_type = OTHER_COMMAND; | 5995 | command_type = OTHER_COMMAND; |
| 6067 | xritag = 0; | 5996 | xritag = 0; |
| 6068 | break; | 5997 | break; |
| 5998 | case CMD_XMIT_BLS_RSP64_CX: | ||
| 5999 | /* As BLS ABTS-ACC WQE is very different from other WQEs, | ||
| 6000 | * we re-construct this WQE here based on information in | ||
| 6001 | * iocbq from scratch. | ||
| 6002 | */ | ||
| 6003 | memset(wqe, 0, sizeof(union lpfc_wqe)); | ||
| 6004 | /* OX_ID is invariable to who sent ABTS to CT exchange */ | ||
| 6005 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, | ||
| 6006 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); | ||
| 6007 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == | ||
| 6008 | LPFC_ABTS_UNSOL_INT) { | ||
| 6009 | /* ABTS sent by initiator to CT exchange, the | ||
| 6010 | * RX_ID field will be filled with the newly | ||
| 6011 | * allocated responder XRI. | ||
| 6012 | */ | ||
| 6013 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, | ||
| 6014 | iocbq->sli4_xritag); | ||
| 6015 | } else { | ||
| 6016 | /* ABTS sent by responder to CT exchange, the | ||
| 6017 | * RX_ID field will be filled with the responder | ||
| 6018 | * RX_ID from ABTS. | ||
| 6019 | */ | ||
| 6020 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, | ||
| 6021 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); | ||
| 6022 | } | ||
| 6023 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); | ||
| 6024 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); | ||
| 6025 | bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, | ||
| 6026 | iocbq->iocb.ulpContext); | ||
| 6027 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ | ||
| 6028 | command_type = OTHER_COMMAND; | ||
| 6029 | break; | ||
| 6069 | case CMD_XRI_ABORTED_CX: | 6030 | case CMD_XRI_ABORTED_CX: |
| 6070 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 6031 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
| 6071 | /* words0-2 are all 0's no bde */ | 6032 | /* words0-2 are all 0's no bde */ |
| @@ -6120,11 +6081,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
| 6120 | uint16_t xritag; | 6081 | uint16_t xritag; |
| 6121 | union lpfc_wqe wqe; | 6082 | union lpfc_wqe wqe; |
| 6122 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; | 6083 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; |
| 6123 | uint32_t fcp_wqidx; | ||
| 6124 | 6084 | ||
| 6125 | if (piocb->sli4_xritag == NO_XRI) { | 6085 | if (piocb->sli4_xritag == NO_XRI) { |
| 6126 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | 6086 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
| 6127 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | 6087 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) |
| 6128 | sglq = NULL; | 6088 | sglq = NULL; |
| 6129 | else { | 6089 | else { |
| 6130 | sglq = __lpfc_sli_get_sglq(phba); | 6090 | sglq = __lpfc_sli_get_sglq(phba); |
| @@ -6155,8 +6115,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
| 6155 | return IOCB_ERROR; | 6115 | return IOCB_ERROR; |
| 6156 | 6116 | ||
| 6157 | if (piocb->iocb_flag & LPFC_IO_FCP) { | 6117 | if (piocb->iocb_flag & LPFC_IO_FCP) { |
| 6158 | fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); | 6118 | /* |
| 6159 | if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) | 6119 | * For FCP command IOCB, get a new WQ index to distribute |
| 6120 | * WQE across the WQsr. On the other hand, for abort IOCB, | ||
| 6121 | * it carries the same WQ index to the original command | ||
| 6122 | * IOCB. | ||
| 6123 | */ | ||
| 6124 | if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && | ||
| 6125 | (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) | ||
| 6126 | piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); | ||
| 6127 | if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], | ||
| 6128 | &wqe)) | ||
| 6160 | return IOCB_ERROR; | 6129 | return IOCB_ERROR; |
| 6161 | } else { | 6130 | } else { |
| 6162 | if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) | 6131 | if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) |
| @@ -6449,31 +6418,37 @@ lpfc_sli_setup(struct lpfc_hba *phba) | |||
| 6449 | pring->iotag_max = 4096; | 6418 | pring->iotag_max = 4096; |
| 6450 | pring->lpfc_sli_rcv_async_status = | 6419 | pring->lpfc_sli_rcv_async_status = |
| 6451 | lpfc_sli_async_event_handler; | 6420 | lpfc_sli_async_event_handler; |
| 6452 | pring->num_mask = 4; | 6421 | pring->num_mask = LPFC_MAX_RING_MASK; |
| 6453 | pring->prt[0].profile = 0; /* Mask 0 */ | 6422 | pring->prt[0].profile = 0; /* Mask 0 */ |
| 6454 | pring->prt[0].rctl = FC_ELS_REQ; | 6423 | pring->prt[0].rctl = FC_RCTL_ELS_REQ; |
| 6455 | pring->prt[0].type = FC_ELS_DATA; | 6424 | pring->prt[0].type = FC_TYPE_ELS; |
| 6456 | pring->prt[0].lpfc_sli_rcv_unsol_event = | 6425 | pring->prt[0].lpfc_sli_rcv_unsol_event = |
| 6457 | lpfc_els_unsol_event; | 6426 | lpfc_els_unsol_event; |
| 6458 | pring->prt[1].profile = 0; /* Mask 1 */ | 6427 | pring->prt[1].profile = 0; /* Mask 1 */ |
| 6459 | pring->prt[1].rctl = FC_ELS_RSP; | 6428 | pring->prt[1].rctl = FC_RCTL_ELS_REP; |
| 6460 | pring->prt[1].type = FC_ELS_DATA; | 6429 | pring->prt[1].type = FC_TYPE_ELS; |
| 6461 | pring->prt[1].lpfc_sli_rcv_unsol_event = | 6430 | pring->prt[1].lpfc_sli_rcv_unsol_event = |
| 6462 | lpfc_els_unsol_event; | 6431 | lpfc_els_unsol_event; |
| 6463 | pring->prt[2].profile = 0; /* Mask 2 */ | 6432 | pring->prt[2].profile = 0; /* Mask 2 */ |
| 6464 | /* NameServer Inquiry */ | 6433 | /* NameServer Inquiry */ |
| 6465 | pring->prt[2].rctl = FC_UNSOL_CTL; | 6434 | pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; |
| 6466 | /* NameServer */ | 6435 | /* NameServer */ |
| 6467 | pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; | 6436 | pring->prt[2].type = FC_TYPE_CT; |
| 6468 | pring->prt[2].lpfc_sli_rcv_unsol_event = | 6437 | pring->prt[2].lpfc_sli_rcv_unsol_event = |
| 6469 | lpfc_ct_unsol_event; | 6438 | lpfc_ct_unsol_event; |
| 6470 | pring->prt[3].profile = 0; /* Mask 3 */ | 6439 | pring->prt[3].profile = 0; /* Mask 3 */ |
| 6471 | /* NameServer response */ | 6440 | /* NameServer response */ |
| 6472 | pring->prt[3].rctl = FC_SOL_CTL; | 6441 | pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; |
| 6473 | /* NameServer */ | 6442 | /* NameServer */ |
| 6474 | pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; | 6443 | pring->prt[3].type = FC_TYPE_CT; |
| 6475 | pring->prt[3].lpfc_sli_rcv_unsol_event = | 6444 | pring->prt[3].lpfc_sli_rcv_unsol_event = |
| 6476 | lpfc_ct_unsol_event; | 6445 | lpfc_ct_unsol_event; |
| 6446 | /* abort unsolicited sequence */ | ||
| 6447 | pring->prt[4].profile = 0; /* Mask 4 */ | ||
| 6448 | pring->prt[4].rctl = FC_RCTL_BA_ABTS; | ||
| 6449 | pring->prt[4].type = FC_TYPE_BLS; | ||
| 6450 | pring->prt[4].lpfc_sli_rcv_unsol_event = | ||
| 6451 | lpfc_sli4_ct_abort_unsol_event; | ||
| 6477 | break; | 6452 | break; |
| 6478 | } | 6453 | } |
| 6479 | totiocbsize += (pring->numCiocb * pring->sizeCiocb) + | 6454 | totiocbsize += (pring->numCiocb * pring->sizeCiocb) + |
| @@ -6976,8 +6951,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 6976 | abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; | 6951 | abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; |
| 6977 | 6952 | ||
| 6978 | spin_lock_irq(&phba->hbalock); | 6953 | spin_lock_irq(&phba->hbalock); |
| 6979 | if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) | 6954 | if (phba->sli_rev < LPFC_SLI_REV4) { |
| 6980 | abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; | 6955 | if (abort_iotag != 0 && |
| 6956 | abort_iotag <= phba->sli.last_iotag) | ||
| 6957 | abort_iocb = | ||
| 6958 | phba->sli.iocbq_lookup[abort_iotag]; | ||
| 6959 | } else | ||
| 6960 | /* For sli4 the abort_tag is the XRI, | ||
| 6961 | * so the abort routine puts the iotag of the iocb | ||
| 6962 | * being aborted in the context field of the abort | ||
| 6963 | * IOCB. | ||
| 6964 | */ | ||
| 6965 | abort_iocb = phba->sli.iocbq_lookup[abort_context]; | ||
| 6981 | 6966 | ||
| 6982 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, | 6967 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, |
| 6983 | "0327 Cannot abort els iocb %p " | 6968 | "0327 Cannot abort els iocb %p " |
| @@ -6991,9 +6976,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 6991 | * might have completed already. Do not free it again. | 6976 | * might have completed already. Do not free it again. |
| 6992 | */ | 6977 | */ |
| 6993 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | 6978 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { |
| 6994 | spin_unlock_irq(&phba->hbalock); | 6979 | if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { |
| 6995 | lpfc_sli_release_iocbq(phba, cmdiocb); | 6980 | spin_unlock_irq(&phba->hbalock); |
| 6996 | return; | 6981 | lpfc_sli_release_iocbq(phba, cmdiocb); |
| 6982 | return; | ||
| 6983 | } | ||
| 6984 | /* For SLI4 the ulpContext field for abort IOCB | ||
| 6985 | * holds the iotag of the IOCB being aborted so | ||
| 6986 | * the local abort_context needs to be reset to | ||
| 6987 | * match the aborted IOCBs ulpContext. | ||
| 6988 | */ | ||
| 6989 | if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) | ||
| 6990 | abort_context = abort_iocb->iocb.ulpContext; | ||
| 6997 | } | 6991 | } |
| 6998 | /* | 6992 | /* |
| 6999 | * make sure we have the right iocbq before taking it | 6993 | * make sure we have the right iocbq before taking it |
| @@ -7112,13 +7106,18 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
| 7112 | iabt = &abtsiocbp->iocb; | 7106 | iabt = &abtsiocbp->iocb; |
| 7113 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; | 7107 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; |
| 7114 | iabt->un.acxri.abortContextTag = icmd->ulpContext; | 7108 | iabt->un.acxri.abortContextTag = icmd->ulpContext; |
| 7115 | if (phba->sli_rev == LPFC_SLI_REV4) | 7109 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 7116 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; | 7110 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; |
| 7111 | iabt->un.acxri.abortContextTag = cmdiocb->iotag; | ||
| 7112 | } | ||
| 7117 | else | 7113 | else |
| 7118 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; | 7114 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; |
| 7119 | iabt->ulpLe = 1; | 7115 | iabt->ulpLe = 1; |
| 7120 | iabt->ulpClass = icmd->ulpClass; | 7116 | iabt->ulpClass = icmd->ulpClass; |
| 7121 | 7117 | ||
| 7118 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | ||
| 7119 | abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; | ||
| 7120 | |||
| 7122 | if (phba->link_state >= LPFC_LINK_UP) | 7121 | if (phba->link_state >= LPFC_LINK_UP) |
| 7123 | iabt->ulpCommand = CMD_ABORT_XRI_CN; | 7122 | iabt->ulpCommand = CMD_ABORT_XRI_CN; |
| 7124 | else | 7123 | else |
| @@ -7322,6 +7321,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
| 7322 | abtsiocb->iocb.ulpClass = cmd->ulpClass; | 7321 | abtsiocb->iocb.ulpClass = cmd->ulpClass; |
| 7323 | abtsiocb->vport = phba->pport; | 7322 | abtsiocb->vport = phba->pport; |
| 7324 | 7323 | ||
| 7324 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | ||
| 7325 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; | ||
| 7326 | |||
| 7325 | if (lpfc_is_link_up(phba)) | 7327 | if (lpfc_is_link_up(phba)) |
| 7326 | abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; | 7328 | abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; |
| 7327 | else | 7329 | else |
| @@ -7687,31 +7689,28 @@ static int | |||
| 7687 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) | 7689 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) |
| 7688 | { | 7690 | { |
| 7689 | uint32_t uerr_sta_hi, uerr_sta_lo; | 7691 | uint32_t uerr_sta_hi, uerr_sta_lo; |
| 7690 | uint32_t onlnreg0, onlnreg1; | ||
| 7691 | 7692 | ||
| 7692 | /* For now, use the SLI4 device internal unrecoverable error | 7693 | /* For now, use the SLI4 device internal unrecoverable error |
| 7693 | * registers for error attention. This can be changed later. | 7694 | * registers for error attention. This can be changed later. |
| 7694 | */ | 7695 | */ |
| 7695 | onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); | 7696 | uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); |
| 7696 | onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); | 7697 | uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); |
| 7697 | if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { | 7698 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
| 7698 | uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); | 7699 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
| 7699 | uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); | 7700 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 7700 | if (uerr_sta_lo || uerr_sta_hi) { | 7701 | "1423 HBA Unrecoverable error: " |
| 7701 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7702 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " |
| 7702 | "1423 HBA Unrecoverable error: " | 7703 | "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", |
| 7703 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | 7704 | uerr_sta_lo, uerr_sta_hi, |
| 7704 | "online0_reg=0x%x, online1_reg=0x%x\n", | 7705 | phba->sli4_hba.ue_mask_lo, |
| 7705 | uerr_sta_lo, uerr_sta_hi, | 7706 | phba->sli4_hba.ue_mask_hi); |
| 7706 | onlnreg0, onlnreg1); | 7707 | phba->work_status[0] = uerr_sta_lo; |
| 7707 | phba->work_status[0] = uerr_sta_lo; | 7708 | phba->work_status[1] = uerr_sta_hi; |
| 7708 | phba->work_status[1] = uerr_sta_hi; | 7709 | /* Set the driver HA work bitmap */ |
| 7709 | /* Set the driver HA work bitmap */ | 7710 | phba->work_ha |= HA_ERATT; |
| 7710 | phba->work_ha |= HA_ERATT; | 7711 | /* Indicate polling handles this ERATT */ |
| 7711 | /* Indicate polling handles this ERATT */ | 7712 | phba->hba_flag |= HBA_ERATT_HANDLED; |
| 7712 | phba->hba_flag |= HBA_ERATT_HANDLED; | 7713 | return 1; |
| 7713 | return 1; | ||
| 7714 | } | ||
| 7715 | } | 7714 | } |
| 7716 | return 0; | 7715 | return 0; |
| 7717 | } | 7716 | } |
| @@ -7834,7 +7833,7 @@ irqreturn_t | |||
| 7834 | lpfc_sli_sp_intr_handler(int irq, void *dev_id) | 7833 | lpfc_sli_sp_intr_handler(int irq, void *dev_id) |
| 7835 | { | 7834 | { |
| 7836 | struct lpfc_hba *phba; | 7835 | struct lpfc_hba *phba; |
| 7837 | uint32_t ha_copy; | 7836 | uint32_t ha_copy, hc_copy; |
| 7838 | uint32_t work_ha_copy; | 7837 | uint32_t work_ha_copy; |
| 7839 | unsigned long status; | 7838 | unsigned long status; |
| 7840 | unsigned long iflag; | 7839 | unsigned long iflag; |
| @@ -7892,8 +7891,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
| 7892 | } | 7891 | } |
| 7893 | 7892 | ||
| 7894 | /* Clear up only attention source related to slow-path */ | 7893 | /* Clear up only attention source related to slow-path */ |
| 7894 | hc_copy = readl(phba->HCregaddr); | ||
| 7895 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | | ||
| 7896 | HC_LAINT_ENA | HC_ERINT_ENA), | ||
| 7897 | phba->HCregaddr); | ||
| 7895 | writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), | 7898 | writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), |
| 7896 | phba->HAregaddr); | 7899 | phba->HAregaddr); |
| 7900 | writel(hc_copy, phba->HCregaddr); | ||
| 7897 | readl(phba->HAregaddr); /* flush */ | 7901 | readl(phba->HAregaddr); /* flush */ |
| 7898 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 7902 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| 7899 | } else | 7903 | } else |
| @@ -8049,7 +8053,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
| 8049 | KERN_ERR, | 8053 | KERN_ERR, |
| 8050 | LOG_MBOX | LOG_SLI, | 8054 | LOG_MBOX | LOG_SLI, |
| 8051 | "0350 rc should have" | 8055 | "0350 rc should have" |
| 8052 | "been MBX_BUSY"); | 8056 | "been MBX_BUSY\n"); |
| 8053 | if (rc != MBX_NOT_FINISHED) | 8057 | if (rc != MBX_NOT_FINISHED) |
| 8054 | goto send_current_mbox; | 8058 | goto send_current_mbox; |
| 8055 | } | 8059 | } |
| @@ -8078,7 +8082,7 @@ send_current_mbox: | |||
| 8078 | if (rc != MBX_SUCCESS) | 8082 | if (rc != MBX_SUCCESS) |
| 8079 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | | 8083 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | |
| 8080 | LOG_SLI, "0349 rc should be " | 8084 | LOG_SLI, "0349 rc should be " |
| 8081 | "MBX_SUCCESS"); | 8085 | "MBX_SUCCESS\n"); |
| 8082 | } | 8086 | } |
| 8083 | 8087 | ||
| 8084 | spin_lock_irqsave(&phba->hbalock, iflag); | 8088 | spin_lock_irqsave(&phba->hbalock, iflag); |
| @@ -8203,6 +8207,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
| 8203 | struct lpfc_hba *phba; | 8207 | struct lpfc_hba *phba; |
| 8204 | irqreturn_t sp_irq_rc, fp_irq_rc; | 8208 | irqreturn_t sp_irq_rc, fp_irq_rc; |
| 8205 | unsigned long status1, status2; | 8209 | unsigned long status1, status2; |
| 8210 | uint32_t hc_copy; | ||
| 8206 | 8211 | ||
| 8207 | /* | 8212 | /* |
| 8208 | * Get the driver's phba structure from the dev_id and | 8213 | * Get the driver's phba structure from the dev_id and |
| @@ -8240,7 +8245,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
| 8240 | } | 8245 | } |
| 8241 | 8246 | ||
| 8242 | /* Clear attention sources except link and error attentions */ | 8247 | /* Clear attention sources except link and error attentions */ |
| 8248 | hc_copy = readl(phba->HCregaddr); | ||
| 8249 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | ||
| 8250 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), | ||
| 8251 | phba->HCregaddr); | ||
| 8243 | writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); | 8252 | writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); |
| 8253 | writel(hc_copy, phba->HCregaddr); | ||
| 8244 | readl(phba->HAregaddr); /* flush */ | 8254 | readl(phba->HAregaddr); /* flush */ |
| 8245 | spin_unlock(&phba->hbalock); | 8255 | spin_unlock(&phba->hbalock); |
| 8246 | 8256 | ||
| @@ -8351,8 +8361,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
| 8351 | 8361 | ||
| 8352 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, | 8362 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, |
| 8353 | sizeof(struct lpfc_iocbq) - offset); | 8363 | sizeof(struct lpfc_iocbq) - offset); |
| 8354 | memset(&pIocbIn->sli4_info, 0, | ||
| 8355 | sizeof(struct lpfc_sli4_rspiocb_info)); | ||
| 8356 | /* Map WCQE parameters into irspiocb parameters */ | 8364 | /* Map WCQE parameters into irspiocb parameters */ |
| 8357 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); | 8365 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); |
| 8358 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) | 8366 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) |
| @@ -8364,16 +8372,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
| 8364 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8372 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
| 8365 | else | 8373 | else |
| 8366 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8374 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
| 8367 | /* Load in additional WCQE parameters */ | 8375 | } |
| 8368 | pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); | 8376 | |
| 8369 | pIocbIn->sli4_info.bfield = 0; | 8377 | /** |
| 8370 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) | 8378 | * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe |
| 8371 | pIocbIn->sli4_info.bfield |= LPFC_XB; | 8379 | * @phba: Pointer to HBA context object. |
| 8372 | if (bf_get(lpfc_wcqe_c_pv, wcqe)) { | 8380 | * @wcqe: Pointer to work-queue completion queue entry. |
| 8373 | pIocbIn->sli4_info.bfield |= LPFC_PV; | 8381 | * |
| 8374 | pIocbIn->sli4_info.priority = | 8382 | * This routine handles an ELS work-queue completion event and construct |
| 8375 | bf_get(lpfc_wcqe_c_priority, wcqe); | 8383 | * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common |
| 8384 | * discovery engine to handle. | ||
| 8385 | * | ||
| 8386 | * Return: Pointer to the receive IOCBQ, NULL otherwise. | ||
| 8387 | **/ | ||
| 8388 | static struct lpfc_iocbq * | ||
| 8389 | lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, | ||
| 8390 | struct lpfc_iocbq *irspiocbq) | ||
| 8391 | { | ||
| 8392 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
| 8393 | struct lpfc_iocbq *cmdiocbq; | ||
| 8394 | struct lpfc_wcqe_complete *wcqe; | ||
| 8395 | unsigned long iflags; | ||
| 8396 | |||
| 8397 | wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; | ||
| 8398 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
| 8399 | pring->stats.iocb_event++; | ||
| 8400 | /* Look up the ELS command IOCB and create pseudo response IOCB */ | ||
| 8401 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
| 8402 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
| 8403 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
| 8404 | |||
| 8405 | if (unlikely(!cmdiocbq)) { | ||
| 8406 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
| 8407 | "0386 ELS complete with no corresponding " | ||
| 8408 | "cmdiocb: iotag (%d)\n", | ||
| 8409 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
| 8410 | lpfc_sli_release_iocbq(phba, irspiocbq); | ||
| 8411 | return NULL; | ||
| 8376 | } | 8412 | } |
| 8413 | |||
| 8414 | /* Fake the irspiocbq and copy necessary response information */ | ||
| 8415 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | ||
| 8416 | |||
| 8417 | return irspiocbq; | ||
| 8377 | } | 8418 | } |
| 8378 | 8419 | ||
| 8379 | /** | 8420 | /** |
| @@ -8566,45 +8607,26 @@ static bool | |||
| 8566 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, | 8607 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, |
| 8567 | struct lpfc_wcqe_complete *wcqe) | 8608 | struct lpfc_wcqe_complete *wcqe) |
| 8568 | { | 8609 | { |
| 8569 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
| 8570 | struct lpfc_iocbq *cmdiocbq; | ||
| 8571 | struct lpfc_iocbq *irspiocbq; | 8610 | struct lpfc_iocbq *irspiocbq; |
| 8572 | unsigned long iflags; | 8611 | unsigned long iflags; |
| 8573 | bool workposted = false; | ||
| 8574 | |||
| 8575 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
| 8576 | pring->stats.iocb_event++; | ||
| 8577 | /* Look up the ELS command IOCB and create pseudo response IOCB */ | ||
| 8578 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
| 8579 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
| 8580 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
| 8581 | 8612 | ||
| 8582 | if (unlikely(!cmdiocbq)) { | 8613 | /* Get an irspiocbq for later ELS response processing use */ |
| 8583 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
| 8584 | "0386 ELS complete with no corresponding " | ||
| 8585 | "cmdiocb: iotag (%d)\n", | ||
| 8586 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
| 8587 | return workposted; | ||
| 8588 | } | ||
| 8589 | |||
| 8590 | /* Fake the irspiocbq and copy necessary response information */ | ||
| 8591 | irspiocbq = lpfc_sli_get_iocbq(phba); | 8614 | irspiocbq = lpfc_sli_get_iocbq(phba); |
| 8592 | if (!irspiocbq) { | 8615 | if (!irspiocbq) { |
| 8593 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8616 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 8594 | "0387 Failed to allocate an iocbq\n"); | 8617 | "0387 Failed to allocate an iocbq\n"); |
| 8595 | return workposted; | 8618 | return false; |
| 8596 | } | 8619 | } |
| 8597 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | ||
| 8598 | 8620 | ||
| 8599 | /* Add the irspiocb to the response IOCB work list */ | 8621 | /* Save off the slow-path queue event for work thread to process */ |
| 8622 | memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); | ||
| 8600 | spin_lock_irqsave(&phba->hbalock, iflags); | 8623 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 8601 | list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); | 8624 | list_add_tail(&irspiocbq->cq_event.list, |
| 8602 | /* Indicate ELS ring attention */ | 8625 | &phba->sli4_hba.sp_queue_event); |
| 8603 | phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); | 8626 | phba->hba_flag |= HBA_SP_QUEUE_EVT; |
| 8604 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8627 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
| 8605 | workposted = true; | ||
| 8606 | 8628 | ||
| 8607 | return workposted; | 8629 | return true; |
| 8608 | } | 8630 | } |
| 8609 | 8631 | ||
| 8610 | /** | 8632 | /** |
| @@ -8690,52 +8712,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, | |||
| 8690 | } | 8712 | } |
| 8691 | 8713 | ||
| 8692 | /** | 8714 | /** |
| 8693 | * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry | ||
| 8694 | * @phba: Pointer to HBA context object. | ||
| 8695 | * @cq: Pointer to the completion queue. | ||
| 8696 | * @wcqe: Pointer to a completion queue entry. | ||
| 8697 | * | ||
| 8698 | * This routine process a slow-path work-queue completion queue entry. | ||
| 8699 | * | ||
| 8700 | * Return: true if work posted to worker thread, otherwise false. | ||
| 8701 | **/ | ||
| 8702 | static bool | ||
| 8703 | lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
| 8704 | struct lpfc_cqe *cqe) | ||
| 8705 | { | ||
| 8706 | struct lpfc_wcqe_complete wcqe; | ||
| 8707 | bool workposted = false; | ||
| 8708 | |||
| 8709 | /* Copy the work queue CQE and convert endian order if needed */ | ||
| 8710 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | ||
| 8711 | |||
| 8712 | /* Check and process for different type of WCQE and dispatch */ | ||
| 8713 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | ||
| 8714 | case CQE_CODE_COMPL_WQE: | ||
| 8715 | /* Process the WQ complete event */ | ||
| 8716 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | ||
| 8717 | (struct lpfc_wcqe_complete *)&wcqe); | ||
| 8718 | break; | ||
| 8719 | case CQE_CODE_RELEASE_WQE: | ||
| 8720 | /* Process the WQ release event */ | ||
| 8721 | lpfc_sli4_sp_handle_rel_wcqe(phba, | ||
| 8722 | (struct lpfc_wcqe_release *)&wcqe); | ||
| 8723 | break; | ||
| 8724 | case CQE_CODE_XRI_ABORTED: | ||
| 8725 | /* Process the WQ XRI abort event */ | ||
| 8726 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
| 8727 | (struct sli4_wcqe_xri_aborted *)&wcqe); | ||
| 8728 | break; | ||
| 8729 | default: | ||
| 8730 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 8731 | "0388 Not a valid WCQE code: x%x\n", | ||
| 8732 | bf_get(lpfc_wcqe_c_code, &wcqe)); | ||
| 8733 | break; | ||
| 8734 | } | ||
| 8735 | return workposted; | ||
| 8736 | } | ||
| 8737 | |||
| 8738 | /** | ||
| 8739 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry | 8715 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry |
| 8740 | * @phba: Pointer to HBA context object. | 8716 | * @phba: Pointer to HBA context object. |
| 8741 | * @rcqe: Pointer to receive-queue completion queue entry. | 8717 | * @rcqe: Pointer to receive-queue completion queue entry. |
| @@ -8745,9 +8721,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 8745 | * Return: true if work posted to worker thread, otherwise false. | 8721 | * Return: true if work posted to worker thread, otherwise false. |
| 8746 | **/ | 8722 | **/ |
| 8747 | static bool | 8723 | static bool |
| 8748 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | 8724 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) |
| 8749 | { | 8725 | { |
| 8750 | struct lpfc_rcqe rcqe; | ||
| 8751 | bool workposted = false; | 8726 | bool workposted = false; |
| 8752 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; | 8727 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; |
| 8753 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; | 8728 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; |
| @@ -8755,31 +8730,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | |||
| 8755 | uint32_t status; | 8730 | uint32_t status; |
| 8756 | unsigned long iflags; | 8731 | unsigned long iflags; |
| 8757 | 8732 | ||
| 8758 | /* Copy the receive queue CQE and convert endian order if needed */ | 8733 | if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) |
| 8759 | lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); | ||
| 8760 | lpfc_sli4_rq_release(hrq, drq); | ||
| 8761 | if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) | ||
| 8762 | goto out; | ||
| 8763 | if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) | ||
| 8764 | goto out; | 8734 | goto out; |
| 8765 | 8735 | ||
| 8766 | status = bf_get(lpfc_rcqe_status, &rcqe); | 8736 | status = bf_get(lpfc_rcqe_status, rcqe); |
| 8767 | switch (status) { | 8737 | switch (status) { |
| 8768 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 8738 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 8769 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8739 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 8770 | "2537 Receive Frame Truncated!!\n"); | 8740 | "2537 Receive Frame Truncated!!\n"); |
| 8771 | case FC_STATUS_RQ_SUCCESS: | 8741 | case FC_STATUS_RQ_SUCCESS: |
| 8742 | lpfc_sli4_rq_release(hrq, drq); | ||
| 8772 | spin_lock_irqsave(&phba->hbalock, iflags); | 8743 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 8773 | dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); | 8744 | dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); |
| 8774 | if (!dma_buf) { | 8745 | if (!dma_buf) { |
| 8775 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8746 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
| 8776 | goto out; | 8747 | goto out; |
| 8777 | } | 8748 | } |
| 8778 | memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); | 8749 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
| 8779 | /* save off the frame for the word thread to process */ | 8750 | /* save off the frame for the word thread to process */ |
| 8780 | list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); | 8751 | list_add_tail(&dma_buf->cq_event.list, |
| 8752 | &phba->sli4_hba.sp_queue_event); | ||
| 8781 | /* Frame received */ | 8753 | /* Frame received */ |
| 8782 | phba->hba_flag |= HBA_RECEIVE_BUFFER; | 8754 | phba->hba_flag |= HBA_SP_QUEUE_EVT; |
| 8783 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8755 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
| 8784 | workposted = true; | 8756 | workposted = true; |
| 8785 | break; | 8757 | break; |
| @@ -8794,7 +8766,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | |||
| 8794 | } | 8766 | } |
| 8795 | out: | 8767 | out: |
| 8796 | return workposted; | 8768 | return workposted; |
| 8769 | } | ||
| 8770 | |||
| 8771 | /** | ||
| 8772 | * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry | ||
| 8773 | * @phba: Pointer to HBA context object. | ||
| 8774 | * @cq: Pointer to the completion queue. | ||
| 8775 | * @wcqe: Pointer to a completion queue entry. | ||
| 8776 | * | ||
| 8777 | * This routine process a slow-path work-queue or recieve queue completion queue | ||
| 8778 | * entry. | ||
| 8779 | * | ||
| 8780 | * Return: true if work posted to worker thread, otherwise false. | ||
| 8781 | **/ | ||
| 8782 | static bool | ||
| 8783 | lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
| 8784 | struct lpfc_cqe *cqe) | ||
| 8785 | { | ||
| 8786 | struct lpfc_cqe cqevt; | ||
| 8787 | bool workposted = false; | ||
| 8788 | |||
| 8789 | /* Copy the work queue CQE and convert endian order if needed */ | ||
| 8790 | lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); | ||
| 8797 | 8791 | ||
| 8792 | /* Check and process for different type of WCQE and dispatch */ | ||
| 8793 | switch (bf_get(lpfc_cqe_code, &cqevt)) { | ||
| 8794 | case CQE_CODE_COMPL_WQE: | ||
| 8795 | /* Process the WQ/RQ complete event */ | ||
| 8796 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | ||
| 8797 | (struct lpfc_wcqe_complete *)&cqevt); | ||
| 8798 | break; | ||
| 8799 | case CQE_CODE_RELEASE_WQE: | ||
| 8800 | /* Process the WQ release event */ | ||
| 8801 | lpfc_sli4_sp_handle_rel_wcqe(phba, | ||
| 8802 | (struct lpfc_wcqe_release *)&cqevt); | ||
| 8803 | break; | ||
| 8804 | case CQE_CODE_XRI_ABORTED: | ||
| 8805 | /* Process the WQ XRI abort event */ | ||
| 8806 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
| 8807 | (struct sli4_wcqe_xri_aborted *)&cqevt); | ||
| 8808 | break; | ||
| 8809 | case CQE_CODE_RECEIVE: | ||
| 8810 | /* Process the RQ event */ | ||
| 8811 | workposted = lpfc_sli4_sp_handle_rcqe(phba, | ||
| 8812 | (struct lpfc_rcqe *)&cqevt); | ||
| 8813 | break; | ||
| 8814 | default: | ||
| 8815 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 8816 | "0388 Not a valid WCQE code: x%x\n", | ||
| 8817 | bf_get(lpfc_cqe_code, &cqevt)); | ||
| 8818 | break; | ||
| 8819 | } | ||
| 8820 | return workposted; | ||
| 8798 | } | 8821 | } |
| 8799 | 8822 | ||
| 8800 | /** | 8823 | /** |
| @@ -8858,14 +8881,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | |||
| 8858 | break; | 8881 | break; |
| 8859 | case LPFC_WCQ: | 8882 | case LPFC_WCQ: |
| 8860 | while ((cqe = lpfc_sli4_cq_get(cq))) { | 8883 | while ((cqe = lpfc_sli4_cq_get(cq))) { |
| 8861 | workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); | 8884 | workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); |
| 8862 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
| 8863 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
| 8864 | } | ||
| 8865 | break; | ||
| 8866 | case LPFC_RCQ: | ||
| 8867 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
| 8868 | workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); | ||
| 8869 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | 8885 | if (!(++ecount % LPFC_GET_QE_REL_INT)) |
| 8870 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 8886 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); |
| 8871 | } | 8887 | } |
| @@ -10427,8 +10443,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) | |||
| 10427 | return xritag; | 10443 | return xritag; |
| 10428 | } | 10444 | } |
| 10429 | spin_unlock_irq(&phba->hbalock); | 10445 | spin_unlock_irq(&phba->hbalock); |
| 10430 | 10446 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | |
| 10431 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 10432 | "2004 Failed to allocate XRI.last XRITAG is %d" | 10447 | "2004 Failed to allocate XRI.last XRITAG is %d" |
| 10433 | " Max XRI is %d, Used XRI is %d\n", | 10448 | " Max XRI is %d, Used XRI is %d\n", |
| 10434 | phba->sli4_hba.next_xri, | 10449 | phba->sli4_hba.next_xri, |
| @@ -10492,15 +10507,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) | |||
| 10492 | lpfc_sli4_mbox_cmd_free(phba, mbox); | 10507 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 10493 | return -ENOMEM; | 10508 | return -ENOMEM; |
| 10494 | } | 10509 | } |
| 10495 | |||
| 10496 | /* Get the first SGE entry from the non-embedded DMA memory */ | 10510 | /* Get the first SGE entry from the non-embedded DMA memory */ |
| 10497 | if (unlikely(!mbox->sge_array)) { | ||
| 10498 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
| 10499 | "2525 Failed to get the non-embedded SGE " | ||
| 10500 | "virtual address\n"); | ||
| 10501 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
| 10502 | return -ENOMEM; | ||
| 10503 | } | ||
| 10504 | viraddr = mbox->sge_array->addr[0]; | 10511 | viraddr = mbox->sge_array->addr[0]; |
| 10505 | 10512 | ||
| 10506 | /* Set up the SGL pages in the non-embedded DMA pages */ | 10513 | /* Set up the SGL pages in the non-embedded DMA pages */ |
| @@ -10524,8 +10531,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) | |||
| 10524 | sgl_pg_pairs++; | 10531 | sgl_pg_pairs++; |
| 10525 | } | 10532 | } |
| 10526 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); | 10533 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); |
| 10527 | pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; | 10534 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); |
| 10528 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); | ||
| 10529 | /* Perform endian conversion if necessary */ | 10535 | /* Perform endian conversion if necessary */ |
| 10530 | sgl->word0 = cpu_to_le32(sgl->word0); | 10536 | sgl->word0 = cpu_to_le32(sgl->word0); |
| 10531 | 10537 | ||
| @@ -10607,15 +10613,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, | |||
| 10607 | lpfc_sli4_mbox_cmd_free(phba, mbox); | 10613 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 10608 | return -ENOMEM; | 10614 | return -ENOMEM; |
| 10609 | } | 10615 | } |
| 10610 | |||
| 10611 | /* Get the first SGE entry from the non-embedded DMA memory */ | 10616 | /* Get the first SGE entry from the non-embedded DMA memory */ |
| 10612 | if (unlikely(!mbox->sge_array)) { | ||
| 10613 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
| 10614 | "2565 Failed to get the non-embedded SGE " | ||
| 10615 | "virtual address\n"); | ||
| 10616 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
| 10617 | return -ENOMEM; | ||
| 10618 | } | ||
| 10619 | viraddr = mbox->sge_array->addr[0]; | 10617 | viraddr = mbox->sge_array->addr[0]; |
| 10620 | 10618 | ||
| 10621 | /* Set up the SGL pages in the non-embedded DMA pages */ | 10619 | /* Set up the SGL pages in the non-embedded DMA pages */ |
| @@ -10802,6 +10800,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, | |||
| 10802 | } | 10800 | } |
| 10803 | 10801 | ||
| 10804 | /** | 10802 | /** |
| 10803 | * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp | ||
| 10804 | * @vport: The vport to work on. | ||
| 10805 | * | ||
| 10806 | * This function updates the receive sequence time stamp for this vport. The | ||
| 10807 | * receive sequence time stamp indicates the time that the last frame of the | ||
| 10808 | * the sequence that has been idle for the longest amount of time was received. | ||
| 10809 | * the driver uses this time stamp to indicate if any received sequences have | ||
| 10810 | * timed out. | ||
| 10811 | **/ | ||
| 10812 | void | ||
| 10813 | lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) | ||
| 10814 | { | ||
| 10815 | struct lpfc_dmabuf *h_buf; | ||
| 10816 | struct hbq_dmabuf *dmabuf = NULL; | ||
| 10817 | |||
| 10818 | /* get the oldest sequence on the rcv list */ | ||
| 10819 | h_buf = list_get_first(&vport->rcv_buffer_list, | ||
| 10820 | struct lpfc_dmabuf, list); | ||
| 10821 | if (!h_buf) | ||
| 10822 | return; | ||
| 10823 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
| 10824 | vport->rcv_buffer_time_stamp = dmabuf->time_stamp; | ||
| 10825 | } | ||
| 10826 | |||
| 10827 | /** | ||
| 10828 | * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. | ||
| 10829 | * @vport: The vport that the received sequences were sent to. | ||
| 10830 | * | ||
| 10831 | * This function cleans up all outstanding received sequences. This is called | ||
| 10832 | * by the driver when a link event or user action invalidates all the received | ||
| 10833 | * sequences. | ||
| 10834 | **/ | ||
| 10835 | void | ||
| 10836 | lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) | ||
| 10837 | { | ||
| 10838 | struct lpfc_dmabuf *h_buf, *hnext; | ||
| 10839 | struct lpfc_dmabuf *d_buf, *dnext; | ||
| 10840 | struct hbq_dmabuf *dmabuf = NULL; | ||
| 10841 | |||
| 10842 | /* start with the oldest sequence on the rcv list */ | ||
| 10843 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { | ||
| 10844 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
| 10845 | list_del_init(&dmabuf->hbuf.list); | ||
| 10846 | list_for_each_entry_safe(d_buf, dnext, | ||
| 10847 | &dmabuf->dbuf.list, list) { | ||
| 10848 | list_del_init(&d_buf->list); | ||
| 10849 | lpfc_in_buf_free(vport->phba, d_buf); | ||
| 10850 | } | ||
| 10851 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); | ||
| 10852 | } | ||
| 10853 | } | ||
| 10854 | |||
| 10855 | /** | ||
| 10856 | * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. | ||
| 10857 | * @vport: The vport that the received sequences were sent to. | ||
| 10858 | * | ||
| 10859 | * This function determines whether any received sequences have timed out by | ||
| 10860 | * first checking the vport's rcv_buffer_time_stamp. If this time_stamp | ||
| 10861 | * indicates that there is at least one timed out sequence this routine will | ||
| 10862 | * go through the received sequences one at a time from most inactive to most | ||
| 10863 | * active to determine which ones need to be cleaned up. Once it has determined | ||
| 10864 | * that a sequence needs to be cleaned up it will simply free up the resources | ||
| 10865 | * without sending an abort. | ||
| 10866 | **/ | ||
| 10867 | void | ||
| 10868 | lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) | ||
| 10869 | { | ||
| 10870 | struct lpfc_dmabuf *h_buf, *hnext; | ||
| 10871 | struct lpfc_dmabuf *d_buf, *dnext; | ||
| 10872 | struct hbq_dmabuf *dmabuf = NULL; | ||
| 10873 | unsigned long timeout; | ||
| 10874 | int abort_count = 0; | ||
| 10875 | |||
| 10876 | timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + | ||
| 10877 | vport->rcv_buffer_time_stamp); | ||
| 10878 | if (list_empty(&vport->rcv_buffer_list) || | ||
| 10879 | time_before(jiffies, timeout)) | ||
| 10880 | return; | ||
| 10881 | /* start with the oldest sequence on the rcv list */ | ||
| 10882 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { | ||
| 10883 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
| 10884 | timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + | ||
| 10885 | dmabuf->time_stamp); | ||
| 10886 | if (time_before(jiffies, timeout)) | ||
| 10887 | break; | ||
| 10888 | abort_count++; | ||
| 10889 | list_del_init(&dmabuf->hbuf.list); | ||
| 10890 | list_for_each_entry_safe(d_buf, dnext, | ||
| 10891 | &dmabuf->dbuf.list, list) { | ||
| 10892 | list_del_init(&d_buf->list); | ||
| 10893 | lpfc_in_buf_free(vport->phba, d_buf); | ||
| 10894 | } | ||
| 10895 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); | ||
| 10896 | } | ||
| 10897 | if (abort_count) | ||
| 10898 | lpfc_update_rcv_time_stamp(vport); | ||
| 10899 | } | ||
| 10900 | |||
| 10901 | /** | ||
| 10805 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences | 10902 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences |
| 10806 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame | 10903 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame |
| 10807 | * | 10904 | * |
| @@ -10823,6 +10920,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10823 | struct hbq_dmabuf *seq_dmabuf = NULL; | 10920 | struct hbq_dmabuf *seq_dmabuf = NULL; |
| 10824 | struct hbq_dmabuf *temp_dmabuf = NULL; | 10921 | struct hbq_dmabuf *temp_dmabuf = NULL; |
| 10825 | 10922 | ||
| 10923 | INIT_LIST_HEAD(&dmabuf->dbuf.list); | ||
| 10924 | dmabuf->time_stamp = jiffies; | ||
| 10826 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 10925 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 10827 | /* Use the hdr_buf to find the sequence that this frame belongs to */ | 10926 | /* Use the hdr_buf to find the sequence that this frame belongs to */ |
| 10828 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { | 10927 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { |
| @@ -10841,13 +10940,21 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10841 | * Queue the buffer on the vport's rcv_buffer_list. | 10940 | * Queue the buffer on the vport's rcv_buffer_list. |
| 10842 | */ | 10941 | */ |
| 10843 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | 10942 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); |
| 10943 | lpfc_update_rcv_time_stamp(vport); | ||
| 10844 | return dmabuf; | 10944 | return dmabuf; |
| 10845 | } | 10945 | } |
| 10846 | temp_hdr = seq_dmabuf->hbuf.virt; | 10946 | temp_hdr = seq_dmabuf->hbuf.virt; |
| 10847 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { | 10947 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { |
| 10848 | list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); | 10948 | list_del_init(&seq_dmabuf->hbuf.list); |
| 10949 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | ||
| 10950 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); | ||
| 10951 | lpfc_update_rcv_time_stamp(vport); | ||
| 10849 | return dmabuf; | 10952 | return dmabuf; |
| 10850 | } | 10953 | } |
| 10954 | /* move this sequence to the tail to indicate a young sequence */ | ||
| 10955 | list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); | ||
| 10956 | seq_dmabuf->time_stamp = jiffies; | ||
| 10957 | lpfc_update_rcv_time_stamp(vport); | ||
| 10851 | /* find the correct place in the sequence to insert this frame */ | 10958 | /* find the correct place in the sequence to insert this frame */ |
| 10852 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { | 10959 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { |
| 10853 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | 10960 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| @@ -10865,6 +10972,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10865 | } | 10972 | } |
| 10866 | 10973 | ||
| 10867 | /** | 10974 | /** |
| 10975 | * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence | ||
| 10976 | * @vport: pointer to a vitural port | ||
| 10977 | * @dmabuf: pointer to a dmabuf that describes the FC sequence | ||
| 10978 | * | ||
| 10979 | * This function tries to abort from the partially assembed sequence, described | ||
| 10980 | * by the information from basic abbort @dmabuf. It checks to see whether such | ||
| 10981 | * partially assembled sequence held by the driver. If so, it shall free up all | ||
| 10982 | * the frames from the partially assembled sequence. | ||
| 10983 | * | ||
| 10984 | * Return | ||
| 10985 | * true -- if there is matching partially assembled sequence present and all | ||
| 10986 | * the frames freed with the sequence; | ||
| 10987 | * false -- if there is no matching partially assembled sequence present so | ||
| 10988 | * nothing got aborted in the lower layer driver | ||
| 10989 | **/ | ||
| 10990 | static bool | ||
| 10991 | lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, | ||
| 10992 | struct hbq_dmabuf *dmabuf) | ||
| 10993 | { | ||
| 10994 | struct fc_frame_header *new_hdr; | ||
| 10995 | struct fc_frame_header *temp_hdr; | ||
| 10996 | struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; | ||
| 10997 | struct hbq_dmabuf *seq_dmabuf = NULL; | ||
| 10998 | |||
| 10999 | /* Use the hdr_buf to find the sequence that matches this frame */ | ||
| 11000 | INIT_LIST_HEAD(&dmabuf->dbuf.list); | ||
| 11001 | INIT_LIST_HEAD(&dmabuf->hbuf.list); | ||
| 11002 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | ||
| 11003 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { | ||
| 11004 | temp_hdr = (struct fc_frame_header *)h_buf->virt; | ||
| 11005 | if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || | ||
| 11006 | (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || | ||
| 11007 | (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) | ||
| 11008 | continue; | ||
| 11009 | /* found a pending sequence that matches this frame */ | ||
| 11010 | seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
| 11011 | break; | ||
| 11012 | } | ||
| 11013 | |||
| 11014 | /* Free up all the frames from the partially assembled sequence */ | ||
| 11015 | if (seq_dmabuf) { | ||
| 11016 | list_for_each_entry_safe(d_buf, n_buf, | ||
| 11017 | &seq_dmabuf->dbuf.list, list) { | ||
| 11018 | list_del_init(&d_buf->list); | ||
| 11019 | lpfc_in_buf_free(vport->phba, d_buf); | ||
| 11020 | } | ||
| 11021 | return true; | ||
| 11022 | } | ||
| 11023 | return false; | ||
| 11024 | } | ||
| 11025 | |||
| 11026 | /** | ||
| 11027 | * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler | ||
| 11028 | * @phba: Pointer to HBA context object. | ||
| 11029 | * @cmd_iocbq: pointer to the command iocbq structure. | ||
| 11030 | * @rsp_iocbq: pointer to the response iocbq structure. | ||
| 11031 | * | ||
| 11032 | * This function handles the sequence abort accept iocb command complete | ||
| 11033 | * event. It properly releases the memory allocated to the sequence abort | ||
| 11034 | * accept iocb. | ||
| 11035 | **/ | ||
| 11036 | static void | ||
| 11037 | lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | ||
| 11038 | struct lpfc_iocbq *cmd_iocbq, | ||
| 11039 | struct lpfc_iocbq *rsp_iocbq) | ||
| 11040 | { | ||
| 11041 | if (cmd_iocbq) | ||
| 11042 | lpfc_sli_release_iocbq(phba, cmd_iocbq); | ||
| 11043 | } | ||
| 11044 | |||
| 11045 | /** | ||
| 11046 | * lpfc_sli4_seq_abort_acc - Accept sequence abort | ||
| 11047 | * @phba: Pointer to HBA context object. | ||
| 11048 | * @fc_hdr: pointer to a FC frame header. | ||
| 11049 | * | ||
| 11050 | * This function sends a basic accept to a previous unsol sequence abort | ||
| 11051 | * event after aborting the sequence handling. | ||
| 11052 | **/ | ||
| 11053 | static void | ||
| 11054 | lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | ||
| 11055 | struct fc_frame_header *fc_hdr) | ||
| 11056 | { | ||
| 11057 | struct lpfc_iocbq *ctiocb = NULL; | ||
| 11058 | struct lpfc_nodelist *ndlp; | ||
| 11059 | uint16_t oxid, rxid; | ||
| 11060 | uint32_t sid, fctl; | ||
| 11061 | IOCB_t *icmd; | ||
| 11062 | |||
| 11063 | if (!lpfc_is_link_up(phba)) | ||
| 11064 | return; | ||
| 11065 | |||
| 11066 | sid = sli4_sid_from_fc_hdr(fc_hdr); | ||
| 11067 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); | ||
| 11068 | rxid = be16_to_cpu(fc_hdr->fh_rx_id); | ||
| 11069 | |||
| 11070 | ndlp = lpfc_findnode_did(phba->pport, sid); | ||
| 11071 | if (!ndlp) { | ||
| 11072 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | ||
| 11073 | "1268 Find ndlp returned NULL for oxid:x%x " | ||
| 11074 | "SID:x%x\n", oxid, sid); | ||
| 11075 | return; | ||
| 11076 | } | ||
| 11077 | |||
| 11078 | /* Allocate buffer for acc iocb */ | ||
| 11079 | ctiocb = lpfc_sli_get_iocbq(phba); | ||
| 11080 | if (!ctiocb) | ||
| 11081 | return; | ||
| 11082 | |||
| 11083 | /* Extract the F_CTL field from FC_HDR */ | ||
| 11084 | fctl = sli4_fctl_from_fc_hdr(fc_hdr); | ||
| 11085 | |||
| 11086 | icmd = &ctiocb->iocb; | ||
| 11087 | icmd->un.xseq64.bdl.bdeSize = 0; | ||
| 11088 | icmd->un.xseq64.bdl.ulpIoTag32 = 0; | ||
| 11089 | icmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
| 11090 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; | ||
| 11091 | icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; | ||
| 11092 | |||
| 11093 | /* Fill in the rest of iocb fields */ | ||
| 11094 | icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; | ||
| 11095 | icmd->ulpBdeCount = 0; | ||
| 11096 | icmd->ulpLe = 1; | ||
| 11097 | icmd->ulpClass = CLASS3; | ||
| 11098 | icmd->ulpContext = ndlp->nlp_rpi; | ||
| 11099 | |||
| 11100 | ctiocb->iocb_cmpl = NULL; | ||
| 11101 | ctiocb->vport = phba->pport; | ||
| 11102 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; | ||
| 11103 | |||
| 11104 | if (fctl & FC_FC_EX_CTX) { | ||
| 11105 | /* ABTS sent by responder to CT exchange, construction | ||
| 11106 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG | ||
| 11107 | * field and RX_ID from ABTS for RX_ID field. | ||
| 11108 | */ | ||
| 11109 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); | ||
| 11110 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); | ||
| 11111 | ctiocb->sli4_xritag = oxid; | ||
| 11112 | } else { | ||
| 11113 | /* ABTS sent by initiator to CT exchange, construction | ||
| 11114 | * of BA_ACC will need to allocate a new XRI as for the | ||
| 11115 | * XRI_TAG and RX_ID fields. | ||
| 11116 | */ | ||
| 11117 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); | ||
| 11118 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); | ||
| 11119 | ctiocb->sli4_xritag = NO_XRI; | ||
| 11120 | } | ||
| 11121 | bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); | ||
| 11122 | |||
| 11123 | /* Xmit CT abts accept on exchange <xid> */ | ||
| 11124 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
| 11125 | "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", | ||
| 11126 | CMD_XMIT_BLS_RSP64_CX, phba->link_state); | ||
| 11127 | lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
| 11128 | } | ||
| 11129 | |||
| 11130 | /** | ||
| 11131 | * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event | ||
| 11132 | * @vport: Pointer to the vport on which this sequence was received | ||
| 11133 | * @dmabuf: pointer to a dmabuf that describes the FC sequence | ||
| 11134 | * | ||
| 11135 | * This function handles an SLI-4 unsolicited abort event. If the unsolicited | ||
| 11136 | * receive sequence is only partially assembed by the driver, it shall abort | ||
| 11137 | * the partially assembled frames for the sequence. Otherwise, if the | ||
| 11138 | * unsolicited receive sequence has been completely assembled and passed to | ||
| 11139 | * the Upper Layer Protocol (UPL), it then mark the per oxid status for the | ||
| 11140 | * unsolicited sequence has been aborted. After that, it will issue a basic | ||
| 11141 | * accept to accept the abort. | ||
| 11142 | **/ | ||
| 11143 | void | ||
| 11144 | lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, | ||
| 11145 | struct hbq_dmabuf *dmabuf) | ||
| 11146 | { | ||
| 11147 | struct lpfc_hba *phba = vport->phba; | ||
| 11148 | struct fc_frame_header fc_hdr; | ||
| 11149 | uint32_t fctl; | ||
| 11150 | bool abts_par; | ||
| 11151 | |||
| 11152 | /* Make a copy of fc_hdr before the dmabuf being released */ | ||
| 11153 | memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); | ||
| 11154 | fctl = sli4_fctl_from_fc_hdr(&fc_hdr); | ||
| 11155 | |||
| 11156 | if (fctl & FC_FC_EX_CTX) { | ||
| 11157 | /* | ||
| 11158 | * ABTS sent by responder to exchange, just free the buffer | ||
| 11159 | */ | ||
| 11160 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
| 11161 | } else { | ||
| 11162 | /* | ||
| 11163 | * ABTS sent by initiator to exchange, need to do cleanup | ||
| 11164 | */ | ||
| 11165 | /* Try to abort partially assembled seq */ | ||
| 11166 | abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); | ||
| 11167 | |||
| 11168 | /* Send abort to ULP if partially seq abort failed */ | ||
| 11169 | if (abts_par == false) | ||
| 11170 | lpfc_sli4_send_seq_to_ulp(vport, dmabuf); | ||
| 11171 | else | ||
| 11172 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
| 11173 | } | ||
| 11174 | /* Send basic accept (BA_ACC) to the abort requester */ | ||
| 11175 | lpfc_sli4_seq_abort_acc(phba, &fc_hdr); | ||
| 11176 | } | ||
| 11177 | |||
| 11178 | /** | ||
| 10868 | * lpfc_seq_complete - Indicates if a sequence is complete | 11179 | * lpfc_seq_complete - Indicates if a sequence is complete |
| 10869 | * @dmabuf: pointer to a dmabuf that describes the FC sequence | 11180 | * @dmabuf: pointer to a dmabuf that describes the FC sequence |
| 10870 | * | 11181 | * |
| @@ -10935,10 +11246,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 10935 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11246 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 10936 | /* remove from receive buffer list */ | 11247 | /* remove from receive buffer list */ |
| 10937 | list_del_init(&seq_dmabuf->hbuf.list); | 11248 | list_del_init(&seq_dmabuf->hbuf.list); |
| 11249 | lpfc_update_rcv_time_stamp(vport); | ||
| 10938 | /* get the Remote Port's SID */ | 11250 | /* get the Remote Port's SID */ |
| 10939 | sid = (fc_hdr->fh_s_id[0] << 16 | | 11251 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
| 10940 | fc_hdr->fh_s_id[1] << 8 | | ||
| 10941 | fc_hdr->fh_s_id[2]); | ||
| 10942 | /* Get an iocbq struct to fill in. */ | 11252 | /* Get an iocbq struct to fill in. */ |
| 10943 | first_iocbq = lpfc_sli_get_iocbq(vport->phba); | 11253 | first_iocbq = lpfc_sli_get_iocbq(vport->phba); |
| 10944 | if (first_iocbq) { | 11254 | if (first_iocbq) { |
| @@ -10957,7 +11267,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 10957 | LPFC_DATA_BUF_SIZE; | 11267 | LPFC_DATA_BUF_SIZE; |
| 10958 | first_iocbq->iocb.un.rcvels.remoteID = sid; | 11268 | first_iocbq->iocb.un.rcvels.remoteID = sid; |
| 10959 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 11269 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
| 10960 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 11270 | bf_get(lpfc_rcqe_length, |
| 11271 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
| 10961 | } | 11272 | } |
| 10962 | iocbq = first_iocbq; | 11273 | iocbq = first_iocbq; |
| 10963 | /* | 11274 | /* |
| @@ -10975,7 +11286,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 10975 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = | 11286 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = |
| 10976 | LPFC_DATA_BUF_SIZE; | 11287 | LPFC_DATA_BUF_SIZE; |
| 10977 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 11288 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
| 10978 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 11289 | bf_get(lpfc_rcqe_length, |
| 11290 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
| 10979 | } else { | 11291 | } else { |
| 10980 | iocbq = lpfc_sli_get_iocbq(vport->phba); | 11292 | iocbq = lpfc_sli_get_iocbq(vport->phba); |
| 10981 | if (!iocbq) { | 11293 | if (!iocbq) { |
| @@ -10994,7 +11306,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 10994 | iocbq->iocb.un.cont64[0].tus.f.bdeSize = | 11306 | iocbq->iocb.un.cont64[0].tus.f.bdeSize = |
| 10995 | LPFC_DATA_BUF_SIZE; | 11307 | LPFC_DATA_BUF_SIZE; |
| 10996 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 11308 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
| 10997 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 11309 | bf_get(lpfc_rcqe_length, |
| 11310 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
| 10998 | iocbq->iocb.un.rcvels.remoteID = sid; | 11311 | iocbq->iocb.un.rcvels.remoteID = sid; |
| 10999 | list_add_tail(&iocbq->list, &first_iocbq->list); | 11312 | list_add_tail(&iocbq->list, &first_iocbq->list); |
| 11000 | } | 11313 | } |
| @@ -11002,6 +11315,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 11002 | return first_iocbq; | 11315 | return first_iocbq; |
| 11003 | } | 11316 | } |
| 11004 | 11317 | ||
| 11318 | static void | ||
| 11319 | lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, | ||
| 11320 | struct hbq_dmabuf *seq_dmabuf) | ||
| 11321 | { | ||
| 11322 | struct fc_frame_header *fc_hdr; | ||
| 11323 | struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; | ||
| 11324 | struct lpfc_hba *phba = vport->phba; | ||
| 11325 | |||
| 11326 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | ||
| 11327 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); | ||
| 11328 | if (!iocbq) { | ||
| 11329 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
| 11330 | "2707 Ring %d handler: Failed to allocate " | ||
| 11331 | "iocb Rctl x%x Type x%x received\n", | ||
| 11332 | LPFC_ELS_RING, | ||
| 11333 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); | ||
| 11334 | return; | ||
| 11335 | } | ||
| 11336 | if (!lpfc_complete_unsol_iocb(phba, | ||
| 11337 | &phba->sli.ring[LPFC_ELS_RING], | ||
| 11338 | iocbq, fc_hdr->fh_r_ctl, | ||
| 11339 | fc_hdr->fh_type)) | ||
| 11340 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
| 11341 | "2540 Ring %d handler: unexpected Rctl " | ||
| 11342 | "x%x Type x%x received\n", | ||
| 11343 | LPFC_ELS_RING, | ||
| 11344 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); | ||
| 11345 | |||
| 11346 | /* Free iocb created in lpfc_prep_seq */ | ||
| 11347 | list_for_each_entry_safe(curr_iocb, next_iocb, | ||
| 11348 | &iocbq->list, list) { | ||
| 11349 | list_del_init(&curr_iocb->list); | ||
| 11350 | lpfc_sli_release_iocbq(phba, curr_iocb); | ||
| 11351 | } | ||
| 11352 | lpfc_sli_release_iocbq(phba, iocbq); | ||
| 11353 | } | ||
| 11354 | |||
| 11005 | /** | 11355 | /** |
| 11006 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware | 11356 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware |
| 11007 | * @phba: Pointer to HBA context object. | 11357 | * @phba: Pointer to HBA context object. |
| @@ -11014,67 +11364,54 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 11014 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the | 11364 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the |
| 11015 | * appropriate receive function when the final frame in a sequence is received. | 11365 | * appropriate receive function when the final frame in a sequence is received. |
| 11016 | **/ | 11366 | **/ |
| 11017 | int | 11367 | void |
| 11018 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) | 11368 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, |
| 11369 | struct hbq_dmabuf *dmabuf) | ||
| 11019 | { | 11370 | { |
| 11020 | LIST_HEAD(cmplq); | 11371 | struct hbq_dmabuf *seq_dmabuf; |
| 11021 | struct hbq_dmabuf *dmabuf, *seq_dmabuf; | ||
| 11022 | struct fc_frame_header *fc_hdr; | 11372 | struct fc_frame_header *fc_hdr; |
| 11023 | struct lpfc_vport *vport; | 11373 | struct lpfc_vport *vport; |
| 11024 | uint32_t fcfi; | 11374 | uint32_t fcfi; |
| 11025 | struct lpfc_iocbq *iocbq; | ||
| 11026 | |||
| 11027 | /* Clear hba flag and get all received buffers into the cmplq */ | ||
| 11028 | spin_lock_irq(&phba->hbalock); | ||
| 11029 | phba->hba_flag &= ~HBA_RECEIVE_BUFFER; | ||
| 11030 | list_splice_init(&phba->rb_pend_list, &cmplq); | ||
| 11031 | spin_unlock_irq(&phba->hbalock); | ||
| 11032 | 11375 | ||
| 11033 | /* Process each received buffer */ | 11376 | /* Process each received buffer */ |
| 11034 | while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { | 11377 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 11035 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 11378 | /* check to see if this a valid type of frame */ |
| 11036 | /* check to see if this a valid type of frame */ | 11379 | if (lpfc_fc_frame_check(phba, fc_hdr)) { |
| 11037 | if (lpfc_fc_frame_check(phba, fc_hdr)) { | 11380 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 11038 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11381 | return; |
| 11039 | continue; | 11382 | } |
| 11040 | } | 11383 | fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); |
| 11041 | fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); | 11384 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); |
| 11042 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); | 11385 | if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { |
| 11043 | if (!vport) { | 11386 | /* throw out the frame */ |
| 11044 | /* throw out the frame */ | 11387 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 11045 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11388 | return; |
| 11046 | continue; | 11389 | } |
| 11047 | } | 11390 | /* Handle the basic abort sequence (BA_ABTS) event */ |
| 11048 | /* Link this frame */ | 11391 | if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { |
| 11049 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); | 11392 | lpfc_sli4_handle_unsol_abort(vport, dmabuf); |
| 11050 | if (!seq_dmabuf) { | 11393 | return; |
| 11051 | /* unable to add frame to vport - throw it out */ | 11394 | } |
| 11052 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11395 | |
| 11053 | continue; | 11396 | /* Link this frame */ |
| 11054 | } | 11397 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); |
| 11055 | /* If not last frame in sequence continue processing frames. */ | 11398 | if (!seq_dmabuf) { |
| 11056 | if (!lpfc_seq_complete(seq_dmabuf)) { | 11399 | /* unable to add frame to vport - throw it out */ |
| 11057 | /* | 11400 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 11058 | * When saving off frames post a new one and mark this | 11401 | return; |
| 11059 | * frame to be freed when it is finished. | 11402 | } |
| 11060 | **/ | 11403 | /* If not last frame in sequence continue processing frames. */ |
| 11061 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); | 11404 | if (!lpfc_seq_complete(seq_dmabuf)) { |
| 11062 | dmabuf->tag = -1; | 11405 | /* |
| 11063 | continue; | 11406 | * When saving off frames post a new one and mark this |
| 11064 | } | 11407 | * frame to be freed when it is finished. |
| 11065 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11408 | **/ |
| 11066 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); | 11409 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); |
| 11067 | if (!lpfc_complete_unsol_iocb(phba, | 11410 | dmabuf->tag = -1; |
| 11068 | &phba->sli.ring[LPFC_ELS_RING], | 11411 | return; |
| 11069 | iocbq, fc_hdr->fh_r_ctl, | 11412 | } |
| 11070 | fc_hdr->fh_type)) | 11413 | /* Send the complete sequence to the upper layer protocol */ |
| 11071 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 11414 | lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); |
| 11072 | "2540 Ring %d handler: unexpected Rctl " | ||
| 11073 | "x%x Type x%x received\n", | ||
| 11074 | LPFC_ELS_RING, | ||
| 11075 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); | ||
| 11076 | }; | ||
| 11077 | return 0; | ||
| 11078 | } | 11415 | } |
| 11079 | 11416 | ||
| 11080 | /** | 11417 | /** |
| @@ -11091,7 +11428,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) | |||
| 11091 | * sequential. | 11428 | * sequential. |
| 11092 | * | 11429 | * |
| 11093 | * Return codes | 11430 | * Return codes |
| 11094 | * 0 - sucessful | 11431 | * 0 - successful |
| 11095 | * EIO - The mailbox failed to complete successfully. | 11432 | * EIO - The mailbox failed to complete successfully. |
| 11096 | * When this error occurs, the driver is not guaranteed | 11433 | * When this error occurs, the driver is not guaranteed |
| 11097 | * to have any rpi regions posted to the device and | 11434 | * to have any rpi regions posted to the device and |
| @@ -11129,7 +11466,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) | |||
| 11129 | * maps up to 64 rpi context regions. | 11466 | * maps up to 64 rpi context regions. |
| 11130 | * | 11467 | * |
| 11131 | * Return codes | 11468 | * Return codes |
| 11132 | * 0 - sucessful | 11469 | * 0 - successful |
| 11133 | * ENOMEM - No available memory | 11470 | * ENOMEM - No available memory |
| 11134 | * EIO - The mailbox failed to complete successfully. | 11471 | * EIO - The mailbox failed to complete successfully. |
| 11135 | **/ | 11472 | **/ |
| @@ -11191,7 +11528,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) | |||
| 11191 | * PAGE_SIZE modulo 64 rpi context headers. | 11528 | * PAGE_SIZE modulo 64 rpi context headers. |
| 11192 | * | 11529 | * |
| 11193 | * Returns | 11530 | * Returns |
| 11194 | * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful | 11531 | * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful |
| 11195 | * LPFC_RPI_ALLOC_ERROR if no rpis are available. | 11532 | * LPFC_RPI_ALLOC_ERROR if no rpis are available. |
| 11196 | **/ | 11533 | **/ |
| 11197 | int | 11534 | int |
| @@ -11334,6 +11671,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) | |||
| 11334 | { | 11671 | { |
| 11335 | LPFC_MBOXQ_t *mboxq; | 11672 | LPFC_MBOXQ_t *mboxq; |
| 11336 | int rc = 0; | 11673 | int rc = 0; |
| 11674 | int retval = MBX_SUCCESS; | ||
| 11337 | uint32_t mbox_tmo; | 11675 | uint32_t mbox_tmo; |
| 11338 | 11676 | ||
| 11339 | if (vpi == 0) | 11677 | if (vpi == 0) |
| @@ -11344,16 +11682,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) | |||
| 11344 | lpfc_init_vpi(phba, mboxq, vpi); | 11682 | lpfc_init_vpi(phba, mboxq, vpi); |
| 11345 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); | 11683 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); |
| 11346 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | 11684 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); |
| 11347 | if (rc != MBX_TIMEOUT) | ||
| 11348 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
| 11349 | if (rc != MBX_SUCCESS) { | 11685 | if (rc != MBX_SUCCESS) { |
| 11350 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 11686 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 11351 | "2022 INIT VPI Mailbox failed " | 11687 | "2022 INIT VPI Mailbox failed " |
| 11352 | "status %d, mbxStatus x%x\n", rc, | 11688 | "status %d, mbxStatus x%x\n", rc, |
| 11353 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); | 11689 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
| 11354 | rc = -EIO; | 11690 | retval = -EIO; |
| 11355 | } | 11691 | } |
| 11356 | return rc; | 11692 | if (rc != MBX_TIMEOUT) |
| 11693 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
| 11694 | |||
| 11695 | return retval; | ||
| 11357 | } | 11696 | } |
| 11358 | 11697 | ||
| 11359 | /** | 11698 | /** |
| @@ -11438,13 +11777,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) | |||
| 11438 | */ | 11777 | */ |
| 11439 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); | 11778 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); |
| 11440 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); | 11779 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); |
| 11441 | if (unlikely(!mboxq->sge_array)) { | ||
| 11442 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
| 11443 | "2526 Failed to get the non-embedded SGE " | ||
| 11444 | "virtual address\n"); | ||
| 11445 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
| 11446 | return -ENOMEM; | ||
| 11447 | } | ||
| 11448 | virt_addr = mboxq->sge_array->addr[0]; | 11780 | virt_addr = mboxq->sge_array->addr[0]; |
| 11449 | /* | 11781 | /* |
| 11450 | * Configure the FCF record for FCFI 0. This is the driver's | 11782 | * Configure the FCF record for FCFI 0. This is the driver's |
| @@ -11542,7 +11874,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
| 11542 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 11874 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 11543 | "2000 Failed to allocate mbox for " | 11875 | "2000 Failed to allocate mbox for " |
| 11544 | "READ_FCF cmd\n"); | 11876 | "READ_FCF cmd\n"); |
| 11545 | return -ENOMEM; | 11877 | error = -ENOMEM; |
| 11878 | goto fail_fcfscan; | ||
| 11546 | } | 11879 | } |
| 11547 | 11880 | ||
| 11548 | req_len = sizeof(struct fcf_record) + | 11881 | req_len = sizeof(struct fcf_record) + |
| @@ -11558,8 +11891,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
| 11558 | "0291 Allocated DMA memory size (x%x) is " | 11891 | "0291 Allocated DMA memory size (x%x) is " |
| 11559 | "less than the requested DMA memory " | 11892 | "less than the requested DMA memory " |
| 11560 | "size (x%x)\n", alloc_len, req_len); | 11893 | "size (x%x)\n", alloc_len, req_len); |
| 11561 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 11894 | error = -ENOMEM; |
| 11562 | return -ENOMEM; | 11895 | goto fail_fcfscan; |
| 11563 | } | 11896 | } |
| 11564 | 11897 | ||
| 11565 | /* Get the first SGE entry from the non-embedded DMA memory. This | 11898 | /* Get the first SGE entry from the non-embedded DMA memory. This |
| @@ -11567,13 +11900,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
| 11567 | */ | 11900 | */ |
| 11568 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); | 11901 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); |
| 11569 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); | 11902 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); |
| 11570 | if (unlikely(!mboxq->sge_array)) { | ||
| 11571 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
| 11572 | "2527 Failed to get the non-embedded SGE " | ||
| 11573 | "virtual address\n"); | ||
| 11574 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
| 11575 | return -ENOMEM; | ||
| 11576 | } | ||
| 11577 | virt_addr = mboxq->sge_array->addr[0]; | 11903 | virt_addr = mboxq->sge_array->addr[0]; |
| 11578 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; | 11904 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; |
| 11579 | 11905 | ||
| @@ -11586,7 +11912,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
| 11586 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; | 11912 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; |
| 11587 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 11913 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
| 11588 | if (rc == MBX_NOT_FINISHED) { | 11914 | if (rc == MBX_NOT_FINISHED) { |
| 11589 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
| 11590 | error = -EIO; | 11915 | error = -EIO; |
| 11591 | } else { | 11916 | } else { |
| 11592 | spin_lock_irq(&phba->hbalock); | 11917 | spin_lock_irq(&phba->hbalock); |
| @@ -11594,6 +11919,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
| 11594 | spin_unlock_irq(&phba->hbalock); | 11919 | spin_unlock_irq(&phba->hbalock); |
| 11595 | error = 0; | 11920 | error = 0; |
| 11596 | } | 11921 | } |
| 11922 | fail_fcfscan: | ||
| 11923 | if (error) { | ||
| 11924 | if (mboxq) | ||
| 11925 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
| 11926 | /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ | ||
| 11927 | spin_lock_irq(&phba->hbalock); | ||
| 11928 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
| 11929 | spin_unlock_irq(&phba->hbalock); | ||
| 11930 | } | ||
| 11597 | return error; | 11931 | return error; |
| 11598 | } | 11932 | } |
| 11599 | 11933 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 3c53316cf6d0..ba38de3c28f1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
| @@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd { | |||
| 29 | LPFC_CTX_HOST | 29 | LPFC_CTX_HOST |
| 30 | } lpfc_ctx_cmd; | 30 | } lpfc_ctx_cmd; |
| 31 | 31 | ||
| 32 | /* This structure is used to carry the needed response IOCB states */ | 32 | struct lpfc_cq_event { |
| 33 | struct lpfc_sli4_rspiocb_info { | 33 | struct list_head list; |
| 34 | uint8_t hw_status; | 34 | union { |
| 35 | uint8_t bfield; | 35 | struct lpfc_mcqe mcqe_cmpl; |
| 36 | #define LPFC_XB 0x1 | 36 | struct lpfc_acqe_link acqe_link; |
| 37 | #define LPFC_PV 0x2 | 37 | struct lpfc_acqe_fcoe acqe_fcoe; |
| 38 | uint8_t priority; | 38 | struct lpfc_acqe_dcbx acqe_dcbx; |
| 39 | uint8_t reserved; | 39 | struct lpfc_rcqe rcqe_cmpl; |
| 40 | struct sli4_wcqe_xri_aborted wcqe_axri; | ||
| 41 | struct lpfc_wcqe_complete wcqe_cmpl; | ||
| 42 | } cqe; | ||
| 40 | }; | 43 | }; |
| 41 | 44 | ||
| 42 | /* This structure is used to handle IOCB requests / responses */ | 45 | /* This structure is used to handle IOCB requests / responses */ |
| @@ -46,6 +49,7 @@ struct lpfc_iocbq { | |||
| 46 | struct list_head clist; | 49 | struct list_head clist; |
| 47 | uint16_t iotag; /* pre-assigned IO tag */ | 50 | uint16_t iotag; /* pre-assigned IO tag */ |
| 48 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ | 51 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ |
| 52 | struct lpfc_cq_event cq_event; | ||
| 49 | 53 | ||
| 50 | IOCB_t iocb; /* IOCB cmd */ | 54 | IOCB_t iocb; /* IOCB cmd */ |
| 51 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ | 55 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ |
| @@ -56,11 +60,13 @@ struct lpfc_iocbq { | |||
| 56 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ | 60 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ |
| 57 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ | 61 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ |
| 58 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ | 62 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ |
| 59 | #define LPFC_FIP_ELS 0x40 | 63 | #define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */ |
| 64 | #define LPFC_FIP_ELS_ID_SHIFT 6 | ||
| 60 | 65 | ||
| 61 | uint8_t abort_count; | 66 | uint8_t abort_count; |
| 62 | uint8_t rsvd2; | 67 | uint8_t rsvd2; |
| 63 | uint32_t drvrTimeout; /* driver timeout in seconds */ | 68 | uint32_t drvrTimeout; /* driver timeout in seconds */ |
| 69 | uint32_t fcp_wqidx; /* index to FCP work queue */ | ||
| 64 | struct lpfc_vport *vport;/* virtual port pointer */ | 70 | struct lpfc_vport *vport;/* virtual port pointer */ |
| 65 | void *context1; /* caller context information */ | 71 | void *context1; /* caller context information */ |
| 66 | void *context2; /* caller context information */ | 72 | void *context2; /* caller context information */ |
| @@ -76,7 +82,6 @@ struct lpfc_iocbq { | |||
| 76 | struct lpfc_iocbq *); | 82 | struct lpfc_iocbq *); |
| 77 | void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, | 83 | void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, |
| 78 | struct lpfc_iocbq *); | 84 | struct lpfc_iocbq *); |
| 79 | struct lpfc_sli4_rspiocb_info sli4_info; | ||
| 80 | }; | 85 | }; |
| 81 | 86 | ||
| 82 | #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ | 87 | #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ |
| @@ -110,7 +115,7 @@ typedef struct lpfcMboxq { | |||
| 110 | return */ | 115 | return */ |
| 111 | #define MBX_NOWAIT 2 /* issue command then return immediately */ | 116 | #define MBX_NOWAIT 2 /* issue command then return immediately */ |
| 112 | 117 | ||
| 113 | #define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per | 118 | #define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per |
| 114 | ring */ | 119 | ring */ |
| 115 | #define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ | 120 | #define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ |
| 116 | 121 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index b5f4ba1a5c27..25d66d070cf8 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
| @@ -58,6 +58,16 @@ | |||
| 58 | #define LPFC_FCOE_FKA_ADV_PER 0 | 58 | #define LPFC_FCOE_FKA_ADV_PER 0 |
| 59 | #define LPFC_FCOE_FIP_PRIORITY 0x80 | 59 | #define LPFC_FCOE_FIP_PRIORITY 0x80 |
| 60 | 60 | ||
| 61 | #define sli4_sid_from_fc_hdr(fc_hdr) \ | ||
| 62 | ((fc_hdr)->fh_s_id[0] << 16 | \ | ||
| 63 | (fc_hdr)->fh_s_id[1] << 8 | \ | ||
| 64 | (fc_hdr)->fh_s_id[2]) | ||
| 65 | |||
| 66 | #define sli4_fctl_from_fc_hdr(fc_hdr) \ | ||
| 67 | ((fc_hdr)->fh_f_ctl[0] << 16 | \ | ||
| 68 | (fc_hdr)->fh_f_ctl[1] << 8 | \ | ||
| 69 | (fc_hdr)->fh_f_ctl[2]) | ||
| 70 | |||
| 61 | enum lpfc_sli4_queue_type { | 71 | enum lpfc_sli4_queue_type { |
| 62 | LPFC_EQ, | 72 | LPFC_EQ, |
| 63 | LPFC_GCQ, | 73 | LPFC_GCQ, |
| @@ -110,18 +120,6 @@ struct lpfc_queue { | |||
| 110 | union sli4_qe qe[1]; /* array to index entries (must be last) */ | 120 | union sli4_qe qe[1]; /* array to index entries (must be last) */ |
| 111 | }; | 121 | }; |
| 112 | 122 | ||
| 113 | struct lpfc_cq_event { | ||
| 114 | struct list_head list; | ||
| 115 | union { | ||
| 116 | struct lpfc_mcqe mcqe_cmpl; | ||
| 117 | struct lpfc_acqe_link acqe_link; | ||
| 118 | struct lpfc_acqe_fcoe acqe_fcoe; | ||
| 119 | struct lpfc_acqe_dcbx acqe_dcbx; | ||
| 120 | struct lpfc_rcqe rcqe_cmpl; | ||
| 121 | struct sli4_wcqe_xri_aborted wcqe_axri; | ||
| 122 | } cqe; | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct lpfc_sli4_link { | 123 | struct lpfc_sli4_link { |
| 126 | uint8_t speed; | 124 | uint8_t speed; |
| 127 | uint8_t duplex; | 125 | uint8_t duplex; |
| @@ -166,7 +164,7 @@ struct lpfc_fip_param_hdr { | |||
| 166 | #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 | 164 | #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 |
| 167 | #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 | 165 | #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 |
| 168 | #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags | 166 | #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags |
| 169 | #define FIPP_MODE_ON 0x2 | 167 | #define FIPP_MODE_ON 0x1 |
| 170 | #define FIPP_MODE_OFF 0x0 | 168 | #define FIPP_MODE_OFF 0x0 |
| 171 | #define FIPP_VLAN_VALID 0x1 | 169 | #define FIPP_VLAN_VALID 0x1 |
| 172 | }; | 170 | }; |
| @@ -295,9 +293,8 @@ struct lpfc_sli4_hba { | |||
| 295 | /* BAR0 PCI config space register memory map */ | 293 | /* BAR0 PCI config space register memory map */ |
| 296 | void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ | 294 | void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ |
| 297 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ | 295 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ |
| 298 | void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ | 296 | void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ |
| 299 | void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ | 297 | void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ |
| 300 | #define LPFC_ONLINE_NERR 0xFFFFFFFF | ||
| 301 | void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ | 298 | void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ |
| 302 | /* BAR1 FCoE function CSR register memory map */ | 299 | /* BAR1 FCoE function CSR register memory map */ |
| 303 | void __iomem *STAregaddr; /* Address to HST_STATE register */ | 300 | void __iomem *STAregaddr; /* Address to HST_STATE register */ |
| @@ -311,6 +308,8 @@ struct lpfc_sli4_hba { | |||
| 311 | void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ | 308 | void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ |
| 312 | void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ | 309 | void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ |
| 313 | 310 | ||
| 311 | uint32_t ue_mask_lo; | ||
| 312 | uint32_t ue_mask_hi; | ||
| 314 | struct msix_entry *msix_entries; | 313 | struct msix_entry *msix_entries; |
| 315 | uint32_t cfg_eqn; | 314 | uint32_t cfg_eqn; |
| 316 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ | 315 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ |
| @@ -325,7 +324,6 @@ struct lpfc_sli4_hba { | |||
| 325 | struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ | 324 | struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ |
| 326 | struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ | 325 | struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ |
| 327 | struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ | 326 | struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ |
| 328 | struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ | ||
| 329 | 327 | ||
| 330 | /* Setup information for various queue parameters */ | 328 | /* Setup information for various queue parameters */ |
| 331 | int eq_esize; | 329 | int eq_esize; |
| @@ -360,7 +358,7 @@ struct lpfc_sli4_hba { | |||
| 360 | unsigned long *rpi_bmask; | 358 | unsigned long *rpi_bmask; |
| 361 | uint16_t rpi_count; | 359 | uint16_t rpi_count; |
| 362 | struct lpfc_sli4_flags sli4_flags; | 360 | struct lpfc_sli4_flags sli4_flags; |
| 363 | struct list_head sp_rspiocb_work_queue; | 361 | struct list_head sp_queue_event; |
| 364 | struct list_head sp_cqe_event_pool; | 362 | struct list_head sp_cqe_event_pool; |
| 365 | struct list_head sp_asynce_work_queue; | 363 | struct list_head sp_asynce_work_queue; |
| 366 | struct list_head sp_fcp_xri_aborted_work_queue; | 364 | struct list_head sp_fcp_xri_aborted_work_queue; |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 9ae20af4bdb7..c7f3aed2aab8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
| @@ -18,8 +18,7 @@ | |||
| 18 | * included with this package. * | 18 | * included with this package. * |
| 19 | *******************************************************************/ | 19 | *******************************************************************/ |
| 20 | 20 | ||
| 21 | #define LPFC_DRIVER_VERSION "8.3.4" | 21 | #define LPFC_DRIVER_VERSION "8.3.6" |
| 22 | |||
| 23 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
| 24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
| 25 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 606efa767548..7d6dd83d3592 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
| @@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
| 389 | * by the port. | 389 | * by the port. |
| 390 | */ | 390 | */ |
| 391 | if ((phba->sli_rev == LPFC_SLI_REV4) && | 391 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
| 392 | (pport->vfi_state & LPFC_VFI_REGISTERED)) { | 392 | (pport->vpi_state & LPFC_VPI_REGISTERED)) { |
| 393 | rc = lpfc_sli4_init_vpi(phba, vpi); | 393 | rc = lpfc_sli4_init_vpi(phba, vpi); |
| 394 | if (rc) { | 394 | if (rc) { |
| 395 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | 395 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, |
| @@ -700,6 +700,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
| 700 | } | 700 | } |
| 701 | spin_unlock_irq(&phba->ndlp_lock); | 701 | spin_unlock_irq(&phba->ndlp_lock); |
| 702 | } | 702 | } |
| 703 | if (vport->vpi_state != LPFC_VPI_REGISTERED) | ||
| 704 | goto skip_logo; | ||
| 703 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 705 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
| 704 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 706 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
| 705 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | 707 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) |
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 512c2cc1a33f..d310f49d077e 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h | |||
| @@ -381,7 +381,7 @@ typedef struct { | |||
| 381 | u8 battery_status; /* | 381 | u8 battery_status; /* |
| 382 | * BIT 0: battery module missing | 382 | * BIT 0: battery module missing |
| 383 | * BIT 1: VBAD | 383 | * BIT 1: VBAD |
| 384 | * BIT 2: temprature high | 384 | * BIT 2: temperature high |
| 385 | * BIT 3: battery pack missing | 385 | * BIT 3: battery pack missing |
| 386 | * BIT 4,5: | 386 | * BIT 4,5: |
| 387 | * 00 - charge complete | 387 | * 00 - charge complete |
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index b25b74764ec3..ce2487a888ed 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h | |||
| @@ -497,7 +497,7 @@ typedef struct { | |||
| 497 | * @inserted_drive : channel:Id of inserted drive | 497 | * @inserted_drive : channel:Id of inserted drive |
| 498 | * @battery_status : bit 0: battery module missing | 498 | * @battery_status : bit 0: battery module missing |
| 499 | * bit 1: VBAD | 499 | * bit 1: VBAD |
| 500 | * bit 2: temprature high | 500 | * bit 2: temperature high |
| 501 | * bit 3: battery pack missing | 501 | * bit 3: battery pack missing |
| 502 | * bit 4,5: | 502 | * bit 4,5: |
| 503 | * 00 - charge complete | 503 | * 00 - charge complete |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 234f0b7eb21c..7f977967b884 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
| @@ -335,12 +335,17 @@ static struct device_attribute *megaraid_sdev_attrs[] = { | |||
| 335 | * megaraid_change_queue_depth - Change the device's queue depth | 335 | * megaraid_change_queue_depth - Change the device's queue depth |
| 336 | * @sdev: scsi device struct | 336 | * @sdev: scsi device struct |
| 337 | * @qdepth: depth to set | 337 | * @qdepth: depth to set |
| 338 | * @reason: calling context | ||
| 338 | * | 339 | * |
| 339 | * Return value: | 340 | * Return value: |
| 340 | * actual depth set | 341 | * actual depth set |
| 341 | */ | 342 | */ |
| 342 | static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) | 343 | static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth, |
| 344 | int reason) | ||
| 343 | { | 345 | { |
| 346 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 347 | return -EOPNOTSUPP; | ||
| 348 | |||
| 344 | if (qdepth > MBOX_MAX_SCSI_CMDS) | 349 | if (qdepth > MBOX_MAX_SCSI_CMDS) |
| 345 | qdepth = MBOX_MAX_SCSI_CMDS; | 350 | qdepth = MBOX_MAX_SCSI_CMDS; |
| 346 | scsi_adjust_queue_depth(sdev, 0, qdepth); | 351 | scsi_adjust_queue_depth(sdev, 0, qdepth); |
| @@ -2704,7 +2709,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp) | |||
| 2704 | } | 2709 | } |
| 2705 | else { | 2710 | else { |
| 2706 | con_log(CL_ANN, (KERN_NOTICE | 2711 | con_log(CL_ANN, (KERN_NOTICE |
| 2707 | "megaraid mbox: reset sequence completed sucessfully\n")); | 2712 | "megaraid mbox: reset sequence completed successfully\n")); |
| 2708 | } | 2713 | } |
| 2709 | 2714 | ||
| 2710 | 2715 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index a39addc3a596..134c63ef6d38 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
| 11 | * | 11 | * |
| 12 | * FILE : megaraid_sas.c | 12 | * FILE : megaraid_sas.c |
| 13 | * Version : v00.00.04.01-rc1 | 13 | * Version : v00.00.04.12-rc1 |
| 14 | * | 14 | * |
| 15 | * Authors: | 15 | * Authors: |
| 16 | * (email-id : megaraidlinux@lsi.com) | 16 | * (email-id : megaraidlinux@lsi.com) |
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/compat.h> | 40 | #include <linux/compat.h> |
| 41 | #include <linux/blkdev.h> | 41 | #include <linux/blkdev.h> |
| 42 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
| 43 | #include <linux/poll.h> | ||
| 43 | 44 | ||
| 44 | #include <scsi/scsi.h> | 45 | #include <scsi/scsi.h> |
| 45 | #include <scsi/scsi_cmnd.h> | 46 | #include <scsi/scsi_cmnd.h> |
| @@ -75,6 +76,10 @@ static struct pci_device_id megasas_pci_table[] = { | |||
| 75 | /* gen2*/ | 76 | /* gen2*/ |
| 76 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, | 77 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, |
| 77 | /* gen2*/ | 78 | /* gen2*/ |
| 79 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, | ||
| 80 | /* skinny*/ | ||
| 81 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, | ||
| 82 | /* skinny*/ | ||
| 78 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, | 83 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, |
| 79 | /* xscale IOP, vega */ | 84 | /* xscale IOP, vega */ |
| 80 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, | 85 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, |
| @@ -89,8 +94,14 @@ static struct megasas_mgmt_info megasas_mgmt_info; | |||
| 89 | static struct fasync_struct *megasas_async_queue; | 94 | static struct fasync_struct *megasas_async_queue; |
| 90 | static DEFINE_MUTEX(megasas_async_queue_mutex); | 95 | static DEFINE_MUTEX(megasas_async_queue_mutex); |
| 91 | 96 | ||
| 97 | static int megasas_poll_wait_aen; | ||
| 98 | static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); | ||
| 99 | static u32 support_poll_for_event; | ||
| 92 | static u32 megasas_dbg_lvl; | 100 | static u32 megasas_dbg_lvl; |
| 93 | 101 | ||
| 102 | /* define lock for aen poll */ | ||
| 103 | spinlock_t poll_aen_lock; | ||
| 104 | |||
| 94 | static void | 105 | static void |
| 95 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | 106 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, |
| 96 | u8 alt_status); | 107 | u8 alt_status); |
| @@ -215,7 +226,10 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) | |||
| 215 | * @regs : MFI register set | 226 | * @regs : MFI register set |
| 216 | */ | 227 | */ |
| 217 | static inline void | 228 | static inline void |
| 218 | megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs) | 229 | megasas_fire_cmd_xscale(struct megasas_instance *instance, |
| 230 | dma_addr_t frame_phys_addr, | ||
| 231 | u32 frame_count, | ||
| 232 | struct megasas_register_set __iomem *regs) | ||
| 219 | { | 233 | { |
| 220 | writel((frame_phys_addr >> 3)|(frame_count), | 234 | writel((frame_phys_addr >> 3)|(frame_count), |
| 221 | &(regs)->inbound_queue_port); | 235 | &(regs)->inbound_queue_port); |
| @@ -312,7 +326,10 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) | |||
| 312 | * @regs : MFI register set | 326 | * @regs : MFI register set |
| 313 | */ | 327 | */ |
| 314 | static inline void | 328 | static inline void |
| 315 | megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) | 329 | megasas_fire_cmd_ppc(struct megasas_instance *instance, |
| 330 | dma_addr_t frame_phys_addr, | ||
| 331 | u32 frame_count, | ||
| 332 | struct megasas_register_set __iomem *regs) | ||
| 316 | { | 333 | { |
| 317 | writel((frame_phys_addr | (frame_count<<1))|1, | 334 | writel((frame_phys_addr | (frame_count<<1))|1, |
| 318 | &(regs)->inbound_queue_port); | 335 | &(regs)->inbound_queue_port); |
| @@ -328,6 +345,104 @@ static struct megasas_instance_template megasas_instance_template_ppc = { | |||
| 328 | }; | 345 | }; |
| 329 | 346 | ||
| 330 | /** | 347 | /** |
| 348 | * megasas_enable_intr_skinny - Enables interrupts | ||
| 349 | * @regs: MFI register set | ||
| 350 | */ | ||
| 351 | static inline void | ||
| 352 | megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) | ||
| 353 | { | ||
| 354 | writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); | ||
| 355 | |||
| 356 | writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); | ||
| 357 | |||
| 358 | /* Dummy readl to force pci flush */ | ||
| 359 | readl(®s->outbound_intr_mask); | ||
| 360 | } | ||
| 361 | |||
| 362 | /** | ||
| 363 | * megasas_disable_intr_skinny - Disables interrupt | ||
| 364 | * @regs: MFI register set | ||
| 365 | */ | ||
| 366 | static inline void | ||
| 367 | megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs) | ||
| 368 | { | ||
| 369 | u32 mask = 0xFFFFFFFF; | ||
| 370 | writel(mask, ®s->outbound_intr_mask); | ||
| 371 | /* Dummy readl to force pci flush */ | ||
| 372 | readl(®s->outbound_intr_mask); | ||
| 373 | } | ||
| 374 | |||
| 375 | /** | ||
| 376 | * megasas_read_fw_status_reg_skinny - returns the current FW status value | ||
| 377 | * @regs: MFI register set | ||
| 378 | */ | ||
| 379 | static u32 | ||
| 380 | megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) | ||
| 381 | { | ||
| 382 | return readl(&(regs)->outbound_scratch_pad); | ||
| 383 | } | ||
| 384 | |||
| 385 | /** | ||
| 386 | * megasas_clear_interrupt_skinny - Check & clear interrupt | ||
| 387 | * @regs: MFI register set | ||
| 388 | */ | ||
| 389 | static int | ||
| 390 | megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) | ||
| 391 | { | ||
| 392 | u32 status; | ||
| 393 | /* | ||
| 394 | * Check if it is our interrupt | ||
| 395 | */ | ||
| 396 | status = readl(®s->outbound_intr_status); | ||
| 397 | |||
| 398 | if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { | ||
| 399 | return 1; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Clear the interrupt by writing back the same value | ||
| 404 | */ | ||
| 405 | writel(status, ®s->outbound_intr_status); | ||
| 406 | |||
| 407 | /* | ||
| 408 | * dummy read to flush PCI | ||
| 409 | */ | ||
| 410 | readl(®s->outbound_intr_status); | ||
| 411 | |||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | /** | ||
| 416 | * megasas_fire_cmd_skinny - Sends command to the FW | ||
| 417 | * @frame_phys_addr : Physical address of cmd | ||
| 418 | * @frame_count : Number of frames for the command | ||
| 419 | * @regs : MFI register set | ||
| 420 | */ | ||
| 421 | static inline void | ||
| 422 | megasas_fire_cmd_skinny(struct megasas_instance *instance, | ||
| 423 | dma_addr_t frame_phys_addr, | ||
| 424 | u32 frame_count, | ||
| 425 | struct megasas_register_set __iomem *regs) | ||
| 426 | { | ||
| 427 | unsigned long flags; | ||
| 428 | spin_lock_irqsave(&instance->fire_lock, flags); | ||
| 429 | writel(0, &(regs)->inbound_high_queue_port); | ||
| 430 | writel((frame_phys_addr | (frame_count<<1))|1, | ||
| 431 | &(regs)->inbound_low_queue_port); | ||
| 432 | spin_unlock_irqrestore(&instance->fire_lock, flags); | ||
| 433 | } | ||
| 434 | |||
| 435 | static struct megasas_instance_template megasas_instance_template_skinny = { | ||
| 436 | |||
| 437 | .fire_cmd = megasas_fire_cmd_skinny, | ||
| 438 | .enable_intr = megasas_enable_intr_skinny, | ||
| 439 | .disable_intr = megasas_disable_intr_skinny, | ||
| 440 | .clear_intr = megasas_clear_intr_skinny, | ||
| 441 | .read_fw_status_reg = megasas_read_fw_status_reg_skinny, | ||
| 442 | }; | ||
| 443 | |||
| 444 | |||
| 445 | /** | ||
| 331 | * The following functions are defined for gen2 (deviceid : 0x78 0x79) | 446 | * The following functions are defined for gen2 (deviceid : 0x78 0x79) |
| 332 | * controllers | 447 | * controllers |
| 333 | */ | 448 | */ |
| @@ -404,7 +519,9 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) | |||
| 404 | * @regs : MFI register set | 519 | * @regs : MFI register set |
| 405 | */ | 520 | */ |
| 406 | static inline void | 521 | static inline void |
| 407 | megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count, | 522 | megasas_fire_cmd_gen2(struct megasas_instance *instance, |
| 523 | dma_addr_t frame_phys_addr, | ||
| 524 | u32 frame_count, | ||
| 408 | struct megasas_register_set __iomem *regs) | 525 | struct megasas_register_set __iomem *regs) |
| 409 | { | 526 | { |
| 410 | writel((frame_phys_addr | (frame_count<<1))|1, | 527 | writel((frame_phys_addr | (frame_count<<1))|1, |
| @@ -446,7 +563,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
| 446 | /* | 563 | /* |
| 447 | * Issue the frame using inbound queue port | 564 | * Issue the frame using inbound queue port |
| 448 | */ | 565 | */ |
| 449 | instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); | 566 | instance->instancet->fire_cmd(instance, |
| 567 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
| 450 | 568 | ||
| 451 | /* | 569 | /* |
| 452 | * Wait for cmd_status to change | 570 | * Wait for cmd_status to change |
| @@ -477,7 +595,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, | |||
| 477 | { | 595 | { |
| 478 | cmd->cmd_status = ENODATA; | 596 | cmd->cmd_status = ENODATA; |
| 479 | 597 | ||
| 480 | instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); | 598 | instance->instancet->fire_cmd(instance, |
| 599 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
| 481 | 600 | ||
| 482 | wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), | 601 | wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), |
| 483 | MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); | 602 | MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); |
| @@ -522,7 +641,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, | |||
| 522 | cmd->sync_cmd = 1; | 641 | cmd->sync_cmd = 1; |
| 523 | cmd->cmd_status = 0xFF; | 642 | cmd->cmd_status = 0xFF; |
| 524 | 643 | ||
| 525 | instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); | 644 | instance->instancet->fire_cmd(instance, |
| 645 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
| 526 | 646 | ||
| 527 | /* | 647 | /* |
| 528 | * Wait for this cmd to complete | 648 | * Wait for this cmd to complete |
| @@ -592,6 +712,35 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 592 | return sge_count; | 712 | return sge_count; |
| 593 | } | 713 | } |
| 594 | 714 | ||
| 715 | /** | ||
| 716 | * megasas_make_sgl_skinny - Prepares IEEE SGL | ||
| 717 | * @instance: Adapter soft state | ||
| 718 | * @scp: SCSI command from the mid-layer | ||
| 719 | * @mfi_sgl: SGL to be filled in | ||
| 720 | * | ||
| 721 | * If successful, this function returns the number of SG elements. Otherwise, | ||
| 722 | * it returnes -1. | ||
| 723 | */ | ||
| 724 | static int | ||
| 725 | megasas_make_sgl_skinny(struct megasas_instance *instance, | ||
| 726 | struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) | ||
| 727 | { | ||
| 728 | int i; | ||
| 729 | int sge_count; | ||
| 730 | struct scatterlist *os_sgl; | ||
| 731 | |||
| 732 | sge_count = scsi_dma_map(scp); | ||
| 733 | |||
| 734 | if (sge_count) { | ||
| 735 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | ||
| 736 | mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); | ||
| 737 | mfi_sgl->sge_skinny[i].phys_addr = | ||
| 738 | sg_dma_address(os_sgl); | ||
| 739 | } | ||
| 740 | } | ||
| 741 | return sge_count; | ||
| 742 | } | ||
| 743 | |||
| 595 | /** | 744 | /** |
| 596 | * megasas_get_frame_count - Computes the number of frames | 745 | * megasas_get_frame_count - Computes the number of frames |
| 597 | * @frame_type : type of frame- io or pthru frame | 746 | * @frame_type : type of frame- io or pthru frame |
| @@ -600,7 +749,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 600 | * Returns the number of frames required for numnber of sge's (sge_count) | 749 | * Returns the number of frames required for numnber of sge's (sge_count) |
| 601 | */ | 750 | */ |
| 602 | 751 | ||
| 603 | static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) | 752 | static u32 megasas_get_frame_count(struct megasas_instance *instance, |
| 753 | u8 sge_count, u8 frame_type) | ||
| 604 | { | 754 | { |
| 605 | int num_cnt; | 755 | int num_cnt; |
| 606 | int sge_bytes; | 756 | int sge_bytes; |
| @@ -610,6 +760,10 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) | |||
| 610 | sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : | 760 | sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : |
| 611 | sizeof(struct megasas_sge32); | 761 | sizeof(struct megasas_sge32); |
| 612 | 762 | ||
| 763 | if (instance->flag_ieee) { | ||
| 764 | sge_sz = sizeof(struct megasas_sge_skinny); | ||
| 765 | } | ||
| 766 | |||
| 613 | /* | 767 | /* |
| 614 | * Main frame can contain 2 SGEs for 64-bit SGLs and | 768 | * Main frame can contain 2 SGEs for 64-bit SGLs and |
| 615 | * 3 SGEs for 32-bit SGLs for ldio & | 769 | * 3 SGEs for 32-bit SGLs for ldio & |
| @@ -617,12 +771,16 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) | |||
| 617 | * 2 SGEs for 32-bit SGLs for pthru frame | 771 | * 2 SGEs for 32-bit SGLs for pthru frame |
| 618 | */ | 772 | */ |
| 619 | if (unlikely(frame_type == PTHRU_FRAME)) { | 773 | if (unlikely(frame_type == PTHRU_FRAME)) { |
| 620 | if (IS_DMA64) | 774 | if (instance->flag_ieee == 1) { |
| 775 | num_cnt = sge_count - 1; | ||
| 776 | } else if (IS_DMA64) | ||
| 621 | num_cnt = sge_count - 1; | 777 | num_cnt = sge_count - 1; |
| 622 | else | 778 | else |
| 623 | num_cnt = sge_count - 2; | 779 | num_cnt = sge_count - 2; |
| 624 | } else { | 780 | } else { |
| 625 | if (IS_DMA64) | 781 | if (instance->flag_ieee == 1) { |
| 782 | num_cnt = sge_count - 1; | ||
| 783 | } else if (IS_DMA64) | ||
| 626 | num_cnt = sge_count - 2; | 784 | num_cnt = sge_count - 2; |
| 627 | else | 785 | else |
| 628 | num_cnt = sge_count - 3; | 786 | num_cnt = sge_count - 3; |
| @@ -671,6 +829,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 671 | else if (scp->sc_data_direction == PCI_DMA_NONE) | 829 | else if (scp->sc_data_direction == PCI_DMA_NONE) |
| 672 | flags = MFI_FRAME_DIR_NONE; | 830 | flags = MFI_FRAME_DIR_NONE; |
| 673 | 831 | ||
| 832 | if (instance->flag_ieee == 1) { | ||
| 833 | flags |= MFI_FRAME_IEEE; | ||
| 834 | } | ||
| 835 | |||
| 674 | /* | 836 | /* |
| 675 | * Prepare the DCDB frame | 837 | * Prepare the DCDB frame |
| 676 | */ | 838 | */ |
| @@ -687,9 +849,24 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 687 | memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); | 849 | memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); |
| 688 | 850 | ||
| 689 | /* | 851 | /* |
| 852 | * If the command is for the tape device, set the | ||
| 853 | * pthru timeout to the os layer timeout value. | ||
| 854 | */ | ||
| 855 | if (scp->device->type == TYPE_TAPE) { | ||
| 856 | if ((scp->request->timeout / HZ) > 0xFFFF) | ||
| 857 | pthru->timeout = 0xFFFF; | ||
| 858 | else | ||
| 859 | pthru->timeout = scp->request->timeout / HZ; | ||
| 860 | } | ||
| 861 | |||
| 862 | /* | ||
| 690 | * Construct SGL | 863 | * Construct SGL |
| 691 | */ | 864 | */ |
| 692 | if (IS_DMA64) { | 865 | if (instance->flag_ieee == 1) { |
| 866 | pthru->flags |= MFI_FRAME_SGL64; | ||
| 867 | pthru->sge_count = megasas_make_sgl_skinny(instance, scp, | ||
| 868 | &pthru->sgl); | ||
| 869 | } else if (IS_DMA64) { | ||
| 693 | pthru->flags |= MFI_FRAME_SGL64; | 870 | pthru->flags |= MFI_FRAME_SGL64; |
| 694 | pthru->sge_count = megasas_make_sgl64(instance, scp, | 871 | pthru->sge_count = megasas_make_sgl64(instance, scp, |
| 695 | &pthru->sgl); | 872 | &pthru->sgl); |
| @@ -708,7 +885,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 708 | * Compute the total number of frames this command consumes. FW uses | 885 | * Compute the total number of frames this command consumes. FW uses |
| 709 | * this number to pull sufficient number of frames from host memory. | 886 | * this number to pull sufficient number of frames from host memory. |
| 710 | */ | 887 | */ |
| 711 | cmd->frame_count = megasas_get_frame_count(pthru->sge_count, | 888 | cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, |
| 712 | PTHRU_FRAME); | 889 | PTHRU_FRAME); |
| 713 | 890 | ||
| 714 | return cmd->frame_count; | 891 | return cmd->frame_count; |
| @@ -739,6 +916,10 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 739 | else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | 916 | else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) |
| 740 | flags = MFI_FRAME_DIR_READ; | 917 | flags = MFI_FRAME_DIR_READ; |
| 741 | 918 | ||
| 919 | if (instance->flag_ieee == 1) { | ||
| 920 | flags |= MFI_FRAME_IEEE; | ||
| 921 | } | ||
| 922 | |||
| 742 | /* | 923 | /* |
| 743 | * Prepare the Logical IO frame: 2nd bit is zero for all read cmds | 924 | * Prepare the Logical IO frame: 2nd bit is zero for all read cmds |
| 744 | */ | 925 | */ |
| @@ -809,7 +990,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 809 | /* | 990 | /* |
| 810 | * Construct SGL | 991 | * Construct SGL |
| 811 | */ | 992 | */ |
| 812 | if (IS_DMA64) { | 993 | if (instance->flag_ieee) { |
| 994 | ldio->flags |= MFI_FRAME_SGL64; | ||
| 995 | ldio->sge_count = megasas_make_sgl_skinny(instance, scp, | ||
| 996 | &ldio->sgl); | ||
| 997 | } else if (IS_DMA64) { | ||
| 813 | ldio->flags |= MFI_FRAME_SGL64; | 998 | ldio->flags |= MFI_FRAME_SGL64; |
| 814 | ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); | 999 | ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); |
| 815 | } else | 1000 | } else |
| @@ -826,7 +1011,8 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
| 826 | * Compute the total number of frames this command consumes. FW uses | 1011 | * Compute the total number of frames this command consumes. FW uses |
| 827 | * this number to pull sufficient number of frames from host memory. | 1012 | * this number to pull sufficient number of frames from host memory. |
| 828 | */ | 1013 | */ |
| 829 | cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME); | 1014 | cmd->frame_count = megasas_get_frame_count(instance, |
| 1015 | ldio->sge_count, IO_FRAME); | ||
| 830 | 1016 | ||
| 831 | return cmd->frame_count; | 1017 | return cmd->frame_count; |
| 832 | } | 1018 | } |
| @@ -983,7 +1169,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | |||
| 983 | */ | 1169 | */ |
| 984 | atomic_inc(&instance->fw_outstanding); | 1170 | atomic_inc(&instance->fw_outstanding); |
| 985 | 1171 | ||
| 986 | instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set); | 1172 | instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, |
| 1173 | cmd->frame_count-1, instance->reg_set); | ||
| 987 | /* | 1174 | /* |
| 988 | * Check if we have pend cmds to be completed | 1175 | * Check if we have pend cmds to be completed |
| 989 | */ | 1176 | */ |
| @@ -1000,24 +1187,76 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | |||
| 1000 | return 0; | 1187 | return 0; |
| 1001 | } | 1188 | } |
| 1002 | 1189 | ||
| 1190 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) | ||
| 1191 | { | ||
| 1192 | int i; | ||
| 1193 | |||
| 1194 | for (i = 0; i < megasas_mgmt_info.max_index; i++) { | ||
| 1195 | |||
| 1196 | if ((megasas_mgmt_info.instance[i]) && | ||
| 1197 | (megasas_mgmt_info.instance[i]->host->host_no == host_no)) | ||
| 1198 | return megasas_mgmt_info.instance[i]; | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | return NULL; | ||
| 1202 | } | ||
| 1203 | |||
| 1003 | static int megasas_slave_configure(struct scsi_device *sdev) | 1204 | static int megasas_slave_configure(struct scsi_device *sdev) |
| 1004 | { | 1205 | { |
| 1206 | u16 pd_index = 0; | ||
| 1207 | struct megasas_instance *instance ; | ||
| 1208 | |||
| 1209 | instance = megasas_lookup_instance(sdev->host->host_no); | ||
| 1210 | |||
| 1005 | /* | 1211 | /* |
| 1006 | * Don't export physical disk devices to the disk driver. | 1212 | * Don't export physical disk devices to the disk driver. |
| 1007 | * | 1213 | * |
| 1008 | * FIXME: Currently we don't export them to the midlayer at all. | 1214 | * FIXME: Currently we don't export them to the midlayer at all. |
| 1009 | * That will be fixed once LSI engineers have audited the | 1215 | * That will be fixed once LSI engineers have audited the |
| 1010 | * firmware for possible issues. | 1216 | * firmware for possible issues. |
| 1011 | */ | 1217 | */ |
| 1012 | if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) | 1218 | if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && |
| 1219 | sdev->type == TYPE_DISK) { | ||
| 1220 | pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + | ||
| 1221 | sdev->id; | ||
| 1222 | if (instance->pd_list[pd_index].driveState == | ||
| 1223 | MR_PD_STATE_SYSTEM) { | ||
| 1224 | blk_queue_rq_timeout(sdev->request_queue, | ||
| 1225 | MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); | ||
| 1226 | return 0; | ||
| 1227 | } | ||
| 1013 | return -ENXIO; | 1228 | return -ENXIO; |
| 1229 | } | ||
| 1014 | 1230 | ||
| 1015 | /* | 1231 | /* |
| 1016 | * The RAID firmware may require extended timeouts. | 1232 | * The RAID firmware may require extended timeouts. |
| 1017 | */ | 1233 | */ |
| 1018 | if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) | 1234 | blk_queue_rq_timeout(sdev->request_queue, |
| 1019 | blk_queue_rq_timeout(sdev->request_queue, | 1235 | MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); |
| 1020 | MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); | 1236 | return 0; |
| 1237 | } | ||
| 1238 | |||
| 1239 | static int megasas_slave_alloc(struct scsi_device *sdev) | ||
| 1240 | { | ||
| 1241 | u16 pd_index = 0; | ||
| 1242 | struct megasas_instance *instance ; | ||
| 1243 | instance = megasas_lookup_instance(sdev->host->host_no); | ||
| 1244 | if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && | ||
| 1245 | (sdev->type == TYPE_DISK)) { | ||
| 1246 | /* | ||
| 1247 | * Open the OS scan to the SYSTEM PD | ||
| 1248 | */ | ||
| 1249 | pd_index = | ||
| 1250 | (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + | ||
| 1251 | sdev->id; | ||
| 1252 | if ((instance->pd_list[pd_index].driveState == | ||
| 1253 | MR_PD_STATE_SYSTEM) && | ||
| 1254 | (instance->pd_list[pd_index].driveType == | ||
| 1255 | TYPE_DISK)) { | ||
| 1256 | return 0; | ||
| 1257 | } | ||
| 1258 | return -ENXIO; | ||
| 1259 | } | ||
| 1021 | return 0; | 1260 | return 0; |
| 1022 | } | 1261 | } |
| 1023 | 1262 | ||
| @@ -1072,7 +1311,14 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) | |||
| 1072 | 1311 | ||
| 1073 | spin_lock_irqsave(instance->host->host_lock, flags); | 1312 | spin_lock_irqsave(instance->host->host_lock, flags); |
| 1074 | instance->flag &= ~MEGASAS_FW_BUSY; | 1313 | instance->flag &= ~MEGASAS_FW_BUSY; |
| 1075 | instance->host->can_queue = | 1314 | if ((instance->pdev->device == |
| 1315 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
| 1316 | (instance->pdev->device == | ||
| 1317 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 1318 | instance->host->can_queue = | ||
| 1319 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
| 1320 | } else | ||
| 1321 | instance->host->can_queue = | ||
| 1076 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | 1322 | instance->max_fw_cmds - MEGASAS_INT_CMDS; |
| 1077 | 1323 | ||
| 1078 | spin_unlock_irqrestore(instance->host->host_lock, flags); | 1324 | spin_unlock_irqrestore(instance->host->host_lock, flags); |
| @@ -1117,8 +1363,16 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) | |||
| 1117 | * Send signal to FW to stop processing any pending cmds. | 1363 | * Send signal to FW to stop processing any pending cmds. |
| 1118 | * The controller will be taken offline by the OS now. | 1364 | * The controller will be taken offline by the OS now. |
| 1119 | */ | 1365 | */ |
| 1120 | writel(MFI_STOP_ADP, | 1366 | if ((instance->pdev->device == |
| 1367 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
| 1368 | (instance->pdev->device == | ||
| 1369 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 1370 | writel(MFI_STOP_ADP, | ||
| 1371 | &instance->reg_set->reserved_0[0]); | ||
| 1372 | } else { | ||
| 1373 | writel(MFI_STOP_ADP, | ||
| 1121 | &instance->reg_set->inbound_doorbell); | 1374 | &instance->reg_set->inbound_doorbell); |
| 1375 | } | ||
| 1122 | megasas_dump_pending_frames(instance); | 1376 | megasas_dump_pending_frames(instance); |
| 1123 | instance->hw_crit_error = 1; | 1377 | instance->hw_crit_error = 1; |
| 1124 | return FAILED; | 1378 | return FAILED; |
| @@ -1266,6 +1520,8 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, | |||
| 1266 | return 0; | 1520 | return 0; |
| 1267 | } | 1521 | } |
| 1268 | 1522 | ||
| 1523 | static void megasas_aen_polling(struct work_struct *work); | ||
| 1524 | |||
| 1269 | /** | 1525 | /** |
| 1270 | * megasas_service_aen - Processes an event notification | 1526 | * megasas_service_aen - Processes an event notification |
| 1271 | * @instance: Adapter soft state | 1527 | * @instance: Adapter soft state |
| @@ -1281,16 +1537,36 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, | |||
| 1281 | static void | 1537 | static void |
| 1282 | megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) | 1538 | megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) |
| 1283 | { | 1539 | { |
| 1540 | unsigned long flags; | ||
| 1284 | /* | 1541 | /* |
| 1285 | * Don't signal app if it is just an aborted previously registered aen | 1542 | * Don't signal app if it is just an aborted previously registered aen |
| 1286 | */ | 1543 | */ |
| 1287 | if (!cmd->abort_aen) | 1544 | if ((!cmd->abort_aen) && (instance->unload == 0)) { |
| 1545 | spin_lock_irqsave(&poll_aen_lock, flags); | ||
| 1546 | megasas_poll_wait_aen = 1; | ||
| 1547 | spin_unlock_irqrestore(&poll_aen_lock, flags); | ||
| 1548 | wake_up(&megasas_poll_wait); | ||
| 1288 | kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); | 1549 | kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); |
| 1550 | } | ||
| 1289 | else | 1551 | else |
| 1290 | cmd->abort_aen = 0; | 1552 | cmd->abort_aen = 0; |
| 1291 | 1553 | ||
| 1292 | instance->aen_cmd = NULL; | 1554 | instance->aen_cmd = NULL; |
| 1293 | megasas_return_cmd(instance, cmd); | 1555 | megasas_return_cmd(instance, cmd); |
| 1556 | |||
| 1557 | if (instance->unload == 0) { | ||
| 1558 | struct megasas_aen_event *ev; | ||
| 1559 | ev = kzalloc(sizeof(*ev), GFP_ATOMIC); | ||
| 1560 | if (!ev) { | ||
| 1561 | printk(KERN_ERR "megasas_service_aen: out of memory\n"); | ||
| 1562 | } else { | ||
| 1563 | ev->instance = instance; | ||
| 1564 | instance->ev = ev; | ||
| 1565 | INIT_WORK(&ev->hotplug_work, megasas_aen_polling); | ||
| 1566 | schedule_delayed_work( | ||
| 1567 | (struct delayed_work *)&ev->hotplug_work, 0); | ||
| 1568 | } | ||
| 1569 | } | ||
| 1294 | } | 1570 | } |
| 1295 | 1571 | ||
| 1296 | /* | 1572 | /* |
| @@ -1302,6 +1578,7 @@ static struct scsi_host_template megasas_template = { | |||
| 1302 | .name = "LSI SAS based MegaRAID driver", | 1578 | .name = "LSI SAS based MegaRAID driver", |
| 1303 | .proc_name = "megaraid_sas", | 1579 | .proc_name = "megaraid_sas", |
| 1304 | .slave_configure = megasas_slave_configure, | 1580 | .slave_configure = megasas_slave_configure, |
| 1581 | .slave_alloc = megasas_slave_alloc, | ||
| 1305 | .queuecommand = megasas_queue_command, | 1582 | .queuecommand = megasas_queue_command, |
| 1306 | .eh_device_reset_handler = megasas_reset_device, | 1583 | .eh_device_reset_handler = megasas_reset_device, |
| 1307 | .eh_bus_reset_handler = megasas_reset_bus_host, | 1584 | .eh_bus_reset_handler = megasas_reset_bus_host, |
| @@ -1370,6 +1647,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
| 1370 | { | 1647 | { |
| 1371 | int exception = 0; | 1648 | int exception = 0; |
| 1372 | struct megasas_header *hdr = &cmd->frame->hdr; | 1649 | struct megasas_header *hdr = &cmd->frame->hdr; |
| 1650 | unsigned long flags; | ||
| 1373 | 1651 | ||
| 1374 | if (cmd->scmd) | 1652 | if (cmd->scmd) |
| 1375 | cmd->scmd->SCp.ptr = NULL; | 1653 | cmd->scmd->SCp.ptr = NULL; |
| @@ -1459,6 +1737,12 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
| 1459 | case MFI_CMD_SMP: | 1737 | case MFI_CMD_SMP: |
| 1460 | case MFI_CMD_STP: | 1738 | case MFI_CMD_STP: |
| 1461 | case MFI_CMD_DCMD: | 1739 | case MFI_CMD_DCMD: |
| 1740 | if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || | ||
| 1741 | cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { | ||
| 1742 | spin_lock_irqsave(&poll_aen_lock, flags); | ||
| 1743 | megasas_poll_wait_aen = 0; | ||
| 1744 | spin_unlock_irqrestore(&poll_aen_lock, flags); | ||
| 1745 | } | ||
| 1462 | 1746 | ||
| 1463 | /* | 1747 | /* |
| 1464 | * See if got an event notification | 1748 | * See if got an event notification |
| @@ -1536,6 +1820,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1536 | u8 max_wait; | 1820 | u8 max_wait; |
| 1537 | u32 fw_state; | 1821 | u32 fw_state; |
| 1538 | u32 cur_state; | 1822 | u32 cur_state; |
| 1823 | u32 abs_state, curr_abs_state; | ||
| 1539 | 1824 | ||
| 1540 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; | 1825 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; |
| 1541 | 1826 | ||
| @@ -1545,6 +1830,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1545 | 1830 | ||
| 1546 | while (fw_state != MFI_STATE_READY) { | 1831 | while (fw_state != MFI_STATE_READY) { |
| 1547 | 1832 | ||
| 1833 | abs_state = | ||
| 1834 | instance->instancet->read_fw_status_reg(instance->reg_set); | ||
| 1835 | |||
| 1548 | switch (fw_state) { | 1836 | switch (fw_state) { |
| 1549 | 1837 | ||
| 1550 | case MFI_STATE_FAULT: | 1838 | case MFI_STATE_FAULT: |
| @@ -1556,18 +1844,36 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1556 | /* | 1844 | /* |
| 1557 | * Set the CLR bit in inbound doorbell | 1845 | * Set the CLR bit in inbound doorbell |
| 1558 | */ | 1846 | */ |
| 1559 | writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, | 1847 | if ((instance->pdev->device == |
| 1560 | &instance->reg_set->inbound_doorbell); | 1848 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
| 1849 | (instance->pdev->device == | ||
| 1850 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 1851 | |||
| 1852 | writel( | ||
| 1853 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, | ||
| 1854 | &instance->reg_set->reserved_0[0]); | ||
| 1855 | } else { | ||
| 1856 | writel( | ||
| 1857 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, | ||
| 1858 | &instance->reg_set->inbound_doorbell); | ||
| 1859 | } | ||
| 1561 | 1860 | ||
| 1562 | max_wait = 2; | 1861 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1563 | cur_state = MFI_STATE_WAIT_HANDSHAKE; | 1862 | cur_state = MFI_STATE_WAIT_HANDSHAKE; |
| 1564 | break; | 1863 | break; |
| 1565 | 1864 | ||
| 1566 | case MFI_STATE_BOOT_MESSAGE_PENDING: | 1865 | case MFI_STATE_BOOT_MESSAGE_PENDING: |
| 1567 | writel(MFI_INIT_HOTPLUG, | 1866 | if ((instance->pdev->device == |
| 1568 | &instance->reg_set->inbound_doorbell); | 1867 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
| 1868 | (instance->pdev->device == | ||
| 1869 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 1870 | writel(MFI_INIT_HOTPLUG, | ||
| 1871 | &instance->reg_set->reserved_0[0]); | ||
| 1872 | } else | ||
| 1873 | writel(MFI_INIT_HOTPLUG, | ||
| 1874 | &instance->reg_set->inbound_doorbell); | ||
| 1569 | 1875 | ||
| 1570 | max_wait = 10; | 1876 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1571 | cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; | 1877 | cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; |
| 1572 | break; | 1878 | break; |
| 1573 | 1879 | ||
| @@ -1576,9 +1882,17 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1576 | * Bring it to READY state; assuming max wait 10 secs | 1882 | * Bring it to READY state; assuming max wait 10 secs |
| 1577 | */ | 1883 | */ |
| 1578 | instance->instancet->disable_intr(instance->reg_set); | 1884 | instance->instancet->disable_intr(instance->reg_set); |
| 1579 | writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); | 1885 | if ((instance->pdev->device == |
| 1886 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
| 1887 | (instance->pdev->device == | ||
| 1888 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 1889 | writel(MFI_RESET_FLAGS, | ||
| 1890 | &instance->reg_set->reserved_0[0]); | ||
| 1891 | } else | ||
| 1892 | writel(MFI_RESET_FLAGS, | ||
| 1893 | &instance->reg_set->inbound_doorbell); | ||
| 1580 | 1894 | ||
| 1581 | max_wait = 60; | 1895 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1582 | cur_state = MFI_STATE_OPERATIONAL; | 1896 | cur_state = MFI_STATE_OPERATIONAL; |
| 1583 | break; | 1897 | break; |
| 1584 | 1898 | ||
| @@ -1586,32 +1900,32 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1586 | /* | 1900 | /* |
| 1587 | * This state should not last for more than 2 seconds | 1901 | * This state should not last for more than 2 seconds |
| 1588 | */ | 1902 | */ |
| 1589 | max_wait = 2; | 1903 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1590 | cur_state = MFI_STATE_UNDEFINED; | 1904 | cur_state = MFI_STATE_UNDEFINED; |
| 1591 | break; | 1905 | break; |
| 1592 | 1906 | ||
| 1593 | case MFI_STATE_BB_INIT: | 1907 | case MFI_STATE_BB_INIT: |
| 1594 | max_wait = 2; | 1908 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1595 | cur_state = MFI_STATE_BB_INIT; | 1909 | cur_state = MFI_STATE_BB_INIT; |
| 1596 | break; | 1910 | break; |
| 1597 | 1911 | ||
| 1598 | case MFI_STATE_FW_INIT: | 1912 | case MFI_STATE_FW_INIT: |
| 1599 | max_wait = 20; | 1913 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1600 | cur_state = MFI_STATE_FW_INIT; | 1914 | cur_state = MFI_STATE_FW_INIT; |
| 1601 | break; | 1915 | break; |
| 1602 | 1916 | ||
| 1603 | case MFI_STATE_FW_INIT_2: | 1917 | case MFI_STATE_FW_INIT_2: |
| 1604 | max_wait = 20; | 1918 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1605 | cur_state = MFI_STATE_FW_INIT_2; | 1919 | cur_state = MFI_STATE_FW_INIT_2; |
| 1606 | break; | 1920 | break; |
| 1607 | 1921 | ||
| 1608 | case MFI_STATE_DEVICE_SCAN: | 1922 | case MFI_STATE_DEVICE_SCAN: |
| 1609 | max_wait = 20; | 1923 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1610 | cur_state = MFI_STATE_DEVICE_SCAN; | 1924 | cur_state = MFI_STATE_DEVICE_SCAN; |
| 1611 | break; | 1925 | break; |
| 1612 | 1926 | ||
| 1613 | case MFI_STATE_FLUSH_CACHE: | 1927 | case MFI_STATE_FLUSH_CACHE: |
| 1614 | max_wait = 20; | 1928 | max_wait = MEGASAS_RESET_WAIT_TIME; |
| 1615 | cur_state = MFI_STATE_FLUSH_CACHE; | 1929 | cur_state = MFI_STATE_FLUSH_CACHE; |
| 1616 | break; | 1930 | break; |
| 1617 | 1931 | ||
| @@ -1627,8 +1941,10 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1627 | for (i = 0; i < (max_wait * 1000); i++) { | 1941 | for (i = 0; i < (max_wait * 1000); i++) { |
| 1628 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & | 1942 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & |
| 1629 | MFI_STATE_MASK ; | 1943 | MFI_STATE_MASK ; |
| 1944 | curr_abs_state = | ||
| 1945 | instance->instancet->read_fw_status_reg(instance->reg_set); | ||
| 1630 | 1946 | ||
| 1631 | if (fw_state == cur_state) { | 1947 | if (abs_state == curr_abs_state) { |
| 1632 | msleep(1); | 1948 | msleep(1); |
| 1633 | } else | 1949 | } else |
| 1634 | break; | 1950 | break; |
| @@ -1637,7 +1953,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
| 1637 | /* | 1953 | /* |
| 1638 | * Return error if fw_state hasn't changed after max_wait | 1954 | * Return error if fw_state hasn't changed after max_wait |
| 1639 | */ | 1955 | */ |
| 1640 | if (fw_state == cur_state) { | 1956 | if (curr_abs_state == abs_state) { |
| 1641 | printk(KERN_DEBUG "FW state [%d] hasn't changed " | 1957 | printk(KERN_DEBUG "FW state [%d] hasn't changed " |
| 1642 | "in %d secs\n", fw_state, max_wait); | 1958 | "in %d secs\n", fw_state, max_wait); |
| 1643 | return -ENODEV; | 1959 | return -ENODEV; |
| @@ -1715,6 +2031,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
| 1715 | sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : | 2031 | sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : |
| 1716 | sizeof(struct megasas_sge32); | 2032 | sizeof(struct megasas_sge32); |
| 1717 | 2033 | ||
| 2034 | if (instance->flag_ieee) { | ||
| 2035 | sge_sz = sizeof(struct megasas_sge_skinny); | ||
| 2036 | } | ||
| 2037 | |||
| 1718 | /* | 2038 | /* |
| 1719 | * Calculated the number of 64byte frames required for SGL | 2039 | * Calculated the number of 64byte frames required for SGL |
| 1720 | */ | 2040 | */ |
| @@ -1777,6 +2097,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
| 1777 | } | 2097 | } |
| 1778 | 2098 | ||
| 1779 | cmd->frame->io.context = cmd->index; | 2099 | cmd->frame->io.context = cmd->index; |
| 2100 | cmd->frame->io.pad_0 = 0; | ||
| 1780 | } | 2101 | } |
| 1781 | 2102 | ||
| 1782 | return 0; | 2103 | return 0; |
| @@ -1882,6 +2203,97 @@ static int megasas_alloc_cmds(struct megasas_instance *instance) | |||
| 1882 | return 0; | 2203 | return 0; |
| 1883 | } | 2204 | } |
| 1884 | 2205 | ||
| 2206 | /* | ||
| 2207 | * megasas_get_pd_list_info - Returns FW's pd_list structure | ||
| 2208 | * @instance: Adapter soft state | ||
| 2209 | * @pd_list: pd_list structure | ||
| 2210 | * | ||
| 2211 | * Issues an internal command (DCMD) to get the FW's controller PD | ||
| 2212 | * list structure. This information is mainly used to find out SYSTEM | ||
| 2213 | * supported by the FW. | ||
| 2214 | */ | ||
| 2215 | static int | ||
| 2216 | megasas_get_pd_list(struct megasas_instance *instance) | ||
| 2217 | { | ||
| 2218 | int ret = 0, pd_index = 0; | ||
| 2219 | struct megasas_cmd *cmd; | ||
| 2220 | struct megasas_dcmd_frame *dcmd; | ||
| 2221 | struct MR_PD_LIST *ci; | ||
| 2222 | struct MR_PD_ADDRESS *pd_addr; | ||
| 2223 | dma_addr_t ci_h = 0; | ||
| 2224 | |||
| 2225 | cmd = megasas_get_cmd(instance); | ||
| 2226 | |||
| 2227 | if (!cmd) { | ||
| 2228 | printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); | ||
| 2229 | return -ENOMEM; | ||
| 2230 | } | ||
| 2231 | |||
| 2232 | dcmd = &cmd->frame->dcmd; | ||
| 2233 | |||
| 2234 | ci = pci_alloc_consistent(instance->pdev, | ||
| 2235 | MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); | ||
| 2236 | |||
| 2237 | if (!ci) { | ||
| 2238 | printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); | ||
| 2239 | megasas_return_cmd(instance, cmd); | ||
| 2240 | return -ENOMEM; | ||
| 2241 | } | ||
| 2242 | |||
| 2243 | memset(ci, 0, sizeof(*ci)); | ||
| 2244 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | ||
| 2245 | |||
| 2246 | dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; | ||
| 2247 | dcmd->mbox.b[1] = 0; | ||
| 2248 | dcmd->cmd = MFI_CMD_DCMD; | ||
| 2249 | dcmd->cmd_status = 0xFF; | ||
| 2250 | dcmd->sge_count = 1; | ||
| 2251 | dcmd->flags = MFI_FRAME_DIR_READ; | ||
| 2252 | dcmd->timeout = 0; | ||
| 2253 | dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); | ||
| 2254 | dcmd->opcode = MR_DCMD_PD_LIST_QUERY; | ||
| 2255 | dcmd->sgl.sge32[0].phys_addr = ci_h; | ||
| 2256 | dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); | ||
| 2257 | |||
| 2258 | if (!megasas_issue_polled(instance, cmd)) { | ||
| 2259 | ret = 0; | ||
| 2260 | } else { | ||
| 2261 | ret = -1; | ||
| 2262 | } | ||
| 2263 | |||
| 2264 | /* | ||
| 2265 | * the following function will get the instance PD LIST. | ||
| 2266 | */ | ||
| 2267 | |||
| 2268 | pd_addr = ci->addr; | ||
| 2269 | |||
| 2270 | if ( ret == 0 && | ||
| 2271 | (ci->count < | ||
| 2272 | (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { | ||
| 2273 | |||
| 2274 | memset(instance->pd_list, 0, | ||
| 2275 | MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); | ||
| 2276 | |||
| 2277 | for (pd_index = 0; pd_index < ci->count; pd_index++) { | ||
| 2278 | |||
| 2279 | instance->pd_list[pd_addr->deviceId].tid = | ||
| 2280 | pd_addr->deviceId; | ||
| 2281 | instance->pd_list[pd_addr->deviceId].driveType = | ||
| 2282 | pd_addr->scsiDevType; | ||
| 2283 | instance->pd_list[pd_addr->deviceId].driveState = | ||
| 2284 | MR_PD_STATE_SYSTEM; | ||
| 2285 | pd_addr++; | ||
| 2286 | } | ||
| 2287 | } | ||
| 2288 | |||
| 2289 | pci_free_consistent(instance->pdev, | ||
| 2290 | MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), | ||
| 2291 | ci, ci_h); | ||
| 2292 | megasas_return_cmd(instance, cmd); | ||
| 2293 | |||
| 2294 | return ret; | ||
| 2295 | } | ||
| 2296 | |||
| 1885 | /** | 2297 | /** |
| 1886 | * megasas_get_controller_info - Returns FW's controller structure | 2298 | * megasas_get_controller_info - Returns FW's controller structure |
| 1887 | * @instance: Adapter soft state | 2299 | * @instance: Adapter soft state |
| @@ -2081,6 +2493,8 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
| 2081 | * Map the message registers | 2493 | * Map the message registers |
| 2082 | */ | 2494 | */ |
| 2083 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || | 2495 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || |
| 2496 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || | ||
| 2497 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
| 2084 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { | 2498 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { |
| 2085 | instance->base_addr = pci_resource_start(instance->pdev, 1); | 2499 | instance->base_addr = pci_resource_start(instance->pdev, 1); |
| 2086 | } else { | 2500 | } else { |
| @@ -2111,6 +2525,10 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
| 2111 | case PCI_DEVICE_ID_LSI_SAS0079GEN2: | 2525 | case PCI_DEVICE_ID_LSI_SAS0079GEN2: |
| 2112 | instance->instancet = &megasas_instance_template_gen2; | 2526 | instance->instancet = &megasas_instance_template_gen2; |
| 2113 | break; | 2527 | break; |
| 2528 | case PCI_DEVICE_ID_LSI_SAS0073SKINNY: | ||
| 2529 | case PCI_DEVICE_ID_LSI_SAS0071SKINNY: | ||
| 2530 | instance->instancet = &megasas_instance_template_skinny; | ||
| 2531 | break; | ||
| 2114 | case PCI_DEVICE_ID_LSI_SAS1064R: | 2532 | case PCI_DEVICE_ID_LSI_SAS1064R: |
| 2115 | case PCI_DEVICE_ID_DELL_PERC5: | 2533 | case PCI_DEVICE_ID_DELL_PERC5: |
| 2116 | default: | 2534 | default: |
| @@ -2166,6 +2584,10 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
| 2166 | if (megasas_issue_init_mfi(instance)) | 2584 | if (megasas_issue_init_mfi(instance)) |
| 2167 | goto fail_fw_init; | 2585 | goto fail_fw_init; |
| 2168 | 2586 | ||
| 2587 | memset(instance->pd_list, 0 , | ||
| 2588 | (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); | ||
| 2589 | megasas_get_pd_list(instance); | ||
| 2590 | |||
| 2169 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); | 2591 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); |
| 2170 | 2592 | ||
| 2171 | /* | 2593 | /* |
| @@ -2409,6 +2831,11 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
| 2409 | dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; | 2831 | dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; |
| 2410 | dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); | 2832 | dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); |
| 2411 | 2833 | ||
| 2834 | if (instance->aen_cmd != NULL) { | ||
| 2835 | megasas_return_cmd(instance, cmd); | ||
| 2836 | return 0; | ||
| 2837 | } | ||
| 2838 | |||
| 2412 | /* | 2839 | /* |
| 2413 | * Store reference to the cmd used to register for AEN. When an | 2840 | * Store reference to the cmd used to register for AEN. When an |
| 2414 | * application wants us to register for AEN, we have to abort this | 2841 | * application wants us to register for AEN, we have to abort this |
| @@ -2419,7 +2846,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
| 2419 | /* | 2846 | /* |
| 2420 | * Issue the aen registration frame | 2847 | * Issue the aen registration frame |
| 2421 | */ | 2848 | */ |
| 2422 | instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); | 2849 | instance->instancet->fire_cmd(instance, |
| 2850 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
| 2423 | 2851 | ||
| 2424 | return 0; | 2852 | return 0; |
| 2425 | } | 2853 | } |
| @@ -2465,7 +2893,13 @@ static int megasas_io_attach(struct megasas_instance *instance) | |||
| 2465 | */ | 2893 | */ |
| 2466 | host->irq = instance->pdev->irq; | 2894 | host->irq = instance->pdev->irq; |
| 2467 | host->unique_id = instance->unique_id; | 2895 | host->unique_id = instance->unique_id; |
| 2468 | host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; | 2896 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
| 2897 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 2898 | host->can_queue = | ||
| 2899 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
| 2900 | } else | ||
| 2901 | host->can_queue = | ||
| 2902 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
| 2469 | host->this_id = instance->init_id; | 2903 | host->this_id = instance->init_id; |
| 2470 | host->sg_tablesize = instance->max_num_sge; | 2904 | host->sg_tablesize = instance->max_num_sge; |
| 2471 | host->max_sectors = instance->max_sectors_per_req; | 2905 | host->max_sectors = instance->max_sectors_per_req; |
| @@ -2572,6 +3006,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2572 | 3006 | ||
| 2573 | *instance->producer = 0; | 3007 | *instance->producer = 0; |
| 2574 | *instance->consumer = 0; | 3008 | *instance->consumer = 0; |
| 3009 | megasas_poll_wait_aen = 0; | ||
| 3010 | instance->flag_ieee = 0; | ||
| 3011 | instance->ev = NULL; | ||
| 2575 | 3012 | ||
| 2576 | instance->evt_detail = pci_alloc_consistent(pdev, | 3013 | instance->evt_detail = pci_alloc_consistent(pdev, |
| 2577 | sizeof(struct | 3014 | sizeof(struct |
| @@ -2595,10 +3032,11 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2595 | init_waitqueue_head(&instance->abort_cmd_wait_q); | 3032 | init_waitqueue_head(&instance->abort_cmd_wait_q); |
| 2596 | 3033 | ||
| 2597 | spin_lock_init(&instance->cmd_pool_lock); | 3034 | spin_lock_init(&instance->cmd_pool_lock); |
| 3035 | spin_lock_init(&instance->fire_lock); | ||
| 2598 | spin_lock_init(&instance->completion_lock); | 3036 | spin_lock_init(&instance->completion_lock); |
| 3037 | spin_lock_init(&poll_aen_lock); | ||
| 2599 | 3038 | ||
| 2600 | mutex_init(&instance->aen_mutex); | 3039 | mutex_init(&instance->aen_mutex); |
| 2601 | sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); | ||
| 2602 | 3040 | ||
| 2603 | /* | 3041 | /* |
| 2604 | * Initialize PCI related and misc parameters | 3042 | * Initialize PCI related and misc parameters |
| @@ -2608,8 +3046,16 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2608 | instance->unique_id = pdev->bus->number << 8 | pdev->devfn; | 3046 | instance->unique_id = pdev->bus->number << 8 | pdev->devfn; |
| 2609 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; | 3047 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; |
| 2610 | 3048 | ||
| 3049 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
| 3050 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
| 3051 | instance->flag_ieee = 1; | ||
| 3052 | sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); | ||
| 3053 | } else | ||
| 3054 | sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); | ||
| 3055 | |||
| 2611 | megasas_dbg_lvl = 0; | 3056 | megasas_dbg_lvl = 0; |
| 2612 | instance->flag = 0; | 3057 | instance->flag = 0; |
| 3058 | instance->unload = 1; | ||
| 2613 | instance->last_time = 0; | 3059 | instance->last_time = 0; |
| 2614 | 3060 | ||
| 2615 | /* | 3061 | /* |
| @@ -2655,6 +3101,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2655 | if (megasas_io_attach(instance)) | 3101 | if (megasas_io_attach(instance)) |
| 2656 | goto fail_io_attach; | 3102 | goto fail_io_attach; |
| 2657 | 3103 | ||
| 3104 | instance->unload = 0; | ||
| 2658 | return 0; | 3105 | return 0; |
| 2659 | 3106 | ||
| 2660 | fail_start_aen: | 3107 | fail_start_aen: |
| @@ -2778,12 +3225,23 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 2778 | 3225 | ||
| 2779 | instance = pci_get_drvdata(pdev); | 3226 | instance = pci_get_drvdata(pdev); |
| 2780 | host = instance->host; | 3227 | host = instance->host; |
| 3228 | instance->unload = 1; | ||
| 2781 | 3229 | ||
| 2782 | if (poll_mode_io) | 3230 | if (poll_mode_io) |
| 2783 | del_timer_sync(&instance->io_completion_timer); | 3231 | del_timer_sync(&instance->io_completion_timer); |
| 2784 | 3232 | ||
| 2785 | megasas_flush_cache(instance); | 3233 | megasas_flush_cache(instance); |
| 2786 | megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); | 3234 | megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); |
| 3235 | |||
| 3236 | /* cancel the delayed work if this work still in queue */ | ||
| 3237 | if (instance->ev != NULL) { | ||
| 3238 | struct megasas_aen_event *ev = instance->ev; | ||
| 3239 | cancel_delayed_work( | ||
| 3240 | (struct delayed_work *)&ev->hotplug_work); | ||
| 3241 | flush_scheduled_work(); | ||
| 3242 | instance->ev = NULL; | ||
| 3243 | } | ||
| 3244 | |||
| 2787 | tasklet_kill(&instance->isr_tasklet); | 3245 | tasklet_kill(&instance->isr_tasklet); |
| 2788 | 3246 | ||
| 2789 | pci_set_drvdata(instance->pdev, instance); | 3247 | pci_set_drvdata(instance->pdev, instance); |
| @@ -2873,6 +3331,8 @@ megasas_resume(struct pci_dev *pdev) | |||
| 2873 | megasas_start_timer(instance, &instance->io_completion_timer, | 3331 | megasas_start_timer(instance, &instance->io_completion_timer, |
| 2874 | megasas_io_completion_timer, | 3332 | megasas_io_completion_timer, |
| 2875 | MEGASAS_COMPLETION_TIMER_INTERVAL); | 3333 | MEGASAS_COMPLETION_TIMER_INTERVAL); |
| 3334 | instance->unload = 0; | ||
| 3335 | |||
| 2876 | return 0; | 3336 | return 0; |
| 2877 | 3337 | ||
| 2878 | fail_irq: | 3338 | fail_irq: |
| @@ -2913,6 +3373,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) | |||
| 2913 | struct megasas_instance *instance; | 3373 | struct megasas_instance *instance; |
| 2914 | 3374 | ||
| 2915 | instance = pci_get_drvdata(pdev); | 3375 | instance = pci_get_drvdata(pdev); |
| 3376 | instance->unload = 1; | ||
| 2916 | host = instance->host; | 3377 | host = instance->host; |
| 2917 | 3378 | ||
| 2918 | if (poll_mode_io) | 3379 | if (poll_mode_io) |
| @@ -2921,6 +3382,16 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) | |||
| 2921 | scsi_remove_host(instance->host); | 3382 | scsi_remove_host(instance->host); |
| 2922 | megasas_flush_cache(instance); | 3383 | megasas_flush_cache(instance); |
| 2923 | megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); | 3384 | megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); |
| 3385 | |||
| 3386 | /* cancel the delayed work if this work still in queue*/ | ||
| 3387 | if (instance->ev != NULL) { | ||
| 3388 | struct megasas_aen_event *ev = instance->ev; | ||
| 3389 | cancel_delayed_work( | ||
| 3390 | (struct delayed_work *)&ev->hotplug_work); | ||
| 3391 | flush_scheduled_work(); | ||
| 3392 | instance->ev = NULL; | ||
| 3393 | } | ||
| 3394 | |||
| 2924 | tasklet_kill(&instance->isr_tasklet); | 3395 | tasklet_kill(&instance->isr_tasklet); |
| 2925 | 3396 | ||
| 2926 | /* | 3397 | /* |
| @@ -2969,6 +3440,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) | |||
| 2969 | static void megasas_shutdown(struct pci_dev *pdev) | 3440 | static void megasas_shutdown(struct pci_dev *pdev) |
| 2970 | { | 3441 | { |
| 2971 | struct megasas_instance *instance = pci_get_drvdata(pdev); | 3442 | struct megasas_instance *instance = pci_get_drvdata(pdev); |
| 3443 | instance->unload = 1; | ||
| 2972 | megasas_flush_cache(instance); | 3444 | megasas_flush_cache(instance); |
| 2973 | megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); | 3445 | megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); |
| 2974 | } | 3446 | } |
| @@ -3016,6 +3488,23 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) | |||
| 3016 | } | 3488 | } |
| 3017 | 3489 | ||
| 3018 | /** | 3490 | /** |
| 3491 | * megasas_mgmt_poll - char node "poll" entry point | ||
| 3492 | * */ | ||
| 3493 | static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) | ||
| 3494 | { | ||
| 3495 | unsigned int mask; | ||
| 3496 | unsigned long flags; | ||
| 3497 | poll_wait(file, &megasas_poll_wait, wait); | ||
| 3498 | spin_lock_irqsave(&poll_aen_lock, flags); | ||
| 3499 | if (megasas_poll_wait_aen) | ||
| 3500 | mask = (POLLIN | POLLRDNORM); | ||
| 3501 | else | ||
| 3502 | mask = 0; | ||
| 3503 | spin_unlock_irqrestore(&poll_aen_lock, flags); | ||
| 3504 | return mask; | ||
| 3505 | } | ||
| 3506 | |||
| 3507 | /** | ||
| 3019 | * megasas_mgmt_fw_ioctl - Issues management ioctls to FW | 3508 | * megasas_mgmt_fw_ioctl - Issues management ioctls to FW |
| 3020 | * @instance: Adapter soft state | 3509 | * @instance: Adapter soft state |
| 3021 | * @argp: User's ioctl packet | 3510 | * @argp: User's ioctl packet |
| @@ -3032,7 +3521,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
| 3032 | int error = 0, i; | 3521 | int error = 0, i; |
| 3033 | void *sense = NULL; | 3522 | void *sense = NULL; |
| 3034 | dma_addr_t sense_handle; | 3523 | dma_addr_t sense_handle; |
| 3035 | u32 *sense_ptr; | 3524 | unsigned long *sense_ptr; |
| 3036 | 3525 | ||
| 3037 | memset(kbuff_arr, 0, sizeof(kbuff_arr)); | 3526 | memset(kbuff_arr, 0, sizeof(kbuff_arr)); |
| 3038 | 3527 | ||
| @@ -3056,6 +3545,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
| 3056 | */ | 3545 | */ |
| 3057 | memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); | 3546 | memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); |
| 3058 | cmd->frame->hdr.context = cmd->index; | 3547 | cmd->frame->hdr.context = cmd->index; |
| 3548 | cmd->frame->hdr.pad_0 = 0; | ||
| 3059 | 3549 | ||
| 3060 | /* | 3550 | /* |
| 3061 | * The management interface between applications and the fw uses | 3551 | * The management interface between applications and the fw uses |
| @@ -3109,7 +3599,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
| 3109 | } | 3599 | } |
| 3110 | 3600 | ||
| 3111 | sense_ptr = | 3601 | sense_ptr = |
| 3112 | (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); | 3602 | (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); |
| 3113 | *sense_ptr = sense_handle; | 3603 | *sense_ptr = sense_handle; |
| 3114 | } | 3604 | } |
| 3115 | 3605 | ||
| @@ -3140,8 +3630,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
| 3140 | * sense_ptr points to the location that has the user | 3630 | * sense_ptr points to the location that has the user |
| 3141 | * sense buffer address | 3631 | * sense buffer address |
| 3142 | */ | 3632 | */ |
| 3143 | sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + | 3633 | sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + |
| 3144 | ioc->sense_off); | 3634 | ioc->sense_off); |
| 3145 | 3635 | ||
| 3146 | if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), | 3636 | if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), |
| 3147 | sense, ioc->sense_len)) { | 3637 | sense, ioc->sense_len)) { |
| @@ -3177,20 +3667,6 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
| 3177 | return error; | 3667 | return error; |
| 3178 | } | 3668 | } |
| 3179 | 3669 | ||
| 3180 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) | ||
| 3181 | { | ||
| 3182 | int i; | ||
| 3183 | |||
| 3184 | for (i = 0; i < megasas_mgmt_info.max_index; i++) { | ||
| 3185 | |||
| 3186 | if ((megasas_mgmt_info.instance[i]) && | ||
| 3187 | (megasas_mgmt_info.instance[i]->host->host_no == host_no)) | ||
| 3188 | return megasas_mgmt_info.instance[i]; | ||
| 3189 | } | ||
| 3190 | |||
| 3191 | return NULL; | ||
| 3192 | } | ||
| 3193 | |||
| 3194 | static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) | 3670 | static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) |
| 3195 | { | 3671 | { |
| 3196 | struct megasas_iocpacket __user *user_ioc = | 3672 | struct megasas_iocpacket __user *user_ioc = |
| @@ -3214,6 +3690,17 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) | |||
| 3214 | goto out_kfree_ioc; | 3690 | goto out_kfree_ioc; |
| 3215 | } | 3691 | } |
| 3216 | 3692 | ||
| 3693 | if (instance->hw_crit_error == 1) { | ||
| 3694 | printk(KERN_DEBUG "Controller in Crit ERROR\n"); | ||
| 3695 | error = -ENODEV; | ||
| 3696 | goto out_kfree_ioc; | ||
| 3697 | } | ||
| 3698 | |||
| 3699 | if (instance->unload == 1) { | ||
| 3700 | error = -ENODEV; | ||
| 3701 | goto out_kfree_ioc; | ||
| 3702 | } | ||
| 3703 | |||
| 3217 | /* | 3704 | /* |
| 3218 | * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds | 3705 | * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds |
| 3219 | */ | 3706 | */ |
| @@ -3249,6 +3736,14 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) | |||
| 3249 | if (!instance) | 3736 | if (!instance) |
| 3250 | return -ENODEV; | 3737 | return -ENODEV; |
| 3251 | 3738 | ||
| 3739 | if (instance->hw_crit_error == 1) { | ||
| 3740 | error = -ENODEV; | ||
| 3741 | } | ||
| 3742 | |||
| 3743 | if (instance->unload == 1) { | ||
| 3744 | return -ENODEV; | ||
| 3745 | } | ||
| 3746 | |||
| 3252 | mutex_lock(&instance->aen_mutex); | 3747 | mutex_lock(&instance->aen_mutex); |
| 3253 | error = megasas_register_aen(instance, aen.seq_num, | 3748 | error = megasas_register_aen(instance, aen.seq_num, |
| 3254 | aen.class_locale_word); | 3749 | aen.class_locale_word); |
| @@ -3337,6 +3832,7 @@ static const struct file_operations megasas_mgmt_fops = { | |||
| 3337 | .open = megasas_mgmt_open, | 3832 | .open = megasas_mgmt_open, |
| 3338 | .fasync = megasas_mgmt_fasync, | 3833 | .fasync = megasas_mgmt_fasync, |
| 3339 | .unlocked_ioctl = megasas_mgmt_ioctl, | 3834 | .unlocked_ioctl = megasas_mgmt_ioctl, |
| 3835 | .poll = megasas_mgmt_poll, | ||
| 3340 | #ifdef CONFIG_COMPAT | 3836 | #ifdef CONFIG_COMPAT |
| 3341 | .compat_ioctl = megasas_mgmt_compat_ioctl, | 3837 | .compat_ioctl = megasas_mgmt_compat_ioctl, |
| 3342 | #endif | 3838 | #endif |
| @@ -3378,6 +3874,15 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, | |||
| 3378 | NULL); | 3874 | NULL); |
| 3379 | 3875 | ||
| 3380 | static ssize_t | 3876 | static ssize_t |
| 3877 | megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) | ||
| 3878 | { | ||
| 3879 | return sprintf(buf, "%u\n", support_poll_for_event); | ||
| 3880 | } | ||
| 3881 | |||
| 3882 | static DRIVER_ATTR(support_poll_for_event, S_IRUGO, | ||
| 3883 | megasas_sysfs_show_support_poll_for_event, NULL); | ||
| 3884 | |||
| 3885 | static ssize_t | ||
| 3381 | megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) | 3886 | megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) |
| 3382 | { | 3887 | { |
| 3383 | return sprintf(buf, "%u\n", megasas_dbg_lvl); | 3888 | return sprintf(buf, "%u\n", megasas_dbg_lvl); |
| @@ -3451,6 +3956,92 @@ out: | |||
| 3451 | return retval; | 3956 | return retval; |
| 3452 | } | 3957 | } |
| 3453 | 3958 | ||
| 3959 | static void | ||
| 3960 | megasas_aen_polling(struct work_struct *work) | ||
| 3961 | { | ||
| 3962 | struct megasas_aen_event *ev = | ||
| 3963 | container_of(work, struct megasas_aen_event, hotplug_work); | ||
| 3964 | struct megasas_instance *instance = ev->instance; | ||
| 3965 | union megasas_evt_class_locale class_locale; | ||
| 3966 | struct Scsi_Host *host; | ||
| 3967 | struct scsi_device *sdev1; | ||
| 3968 | u16 pd_index = 0; | ||
| 3969 | int i, j, doscan = 0; | ||
| 3970 | u32 seq_num; | ||
| 3971 | int error; | ||
| 3972 | |||
| 3973 | if (!instance) { | ||
| 3974 | printk(KERN_ERR "invalid instance!\n"); | ||
| 3975 | kfree(ev); | ||
| 3976 | return; | ||
| 3977 | } | ||
| 3978 | instance->ev = NULL; | ||
| 3979 | host = instance->host; | ||
| 3980 | if (instance->evt_detail) { | ||
| 3981 | |||
| 3982 | switch (instance->evt_detail->code) { | ||
| 3983 | case MR_EVT_PD_INSERTED: | ||
| 3984 | case MR_EVT_PD_REMOVED: | ||
| 3985 | case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: | ||
| 3986 | doscan = 1; | ||
| 3987 | break; | ||
| 3988 | default: | ||
| 3989 | doscan = 0; | ||
| 3990 | break; | ||
| 3991 | } | ||
| 3992 | } else { | ||
| 3993 | printk(KERN_ERR "invalid evt_detail!\n"); | ||
| 3994 | kfree(ev); | ||
| 3995 | return; | ||
| 3996 | } | ||
| 3997 | |||
| 3998 | if (doscan) { | ||
| 3999 | printk(KERN_INFO "scanning ...\n"); | ||
| 4000 | megasas_get_pd_list(instance); | ||
| 4001 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { | ||
| 4002 | for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { | ||
| 4003 | pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; | ||
| 4004 | sdev1 = scsi_device_lookup(host, i, j, 0); | ||
| 4005 | if (instance->pd_list[pd_index].driveState == | ||
| 4006 | MR_PD_STATE_SYSTEM) { | ||
| 4007 | if (!sdev1) { | ||
| 4008 | scsi_add_device(host, i, j, 0); | ||
| 4009 | } | ||
| 4010 | if (sdev1) | ||
| 4011 | scsi_device_put(sdev1); | ||
| 4012 | } else { | ||
| 4013 | if (sdev1) { | ||
| 4014 | scsi_remove_device(sdev1); | ||
| 4015 | scsi_device_put(sdev1); | ||
| 4016 | } | ||
| 4017 | } | ||
| 4018 | } | ||
| 4019 | } | ||
| 4020 | } | ||
| 4021 | |||
| 4022 | if ( instance->aen_cmd != NULL ) { | ||
| 4023 | kfree(ev); | ||
| 4024 | return ; | ||
| 4025 | } | ||
| 4026 | |||
| 4027 | seq_num = instance->evt_detail->seq_num + 1; | ||
| 4028 | |||
| 4029 | /* Register AEN with FW for latest sequence number plus 1 */ | ||
| 4030 | class_locale.members.reserved = 0; | ||
| 4031 | class_locale.members.locale = MR_EVT_LOCALE_ALL; | ||
| 4032 | class_locale.members.class = MR_EVT_CLASS_DEBUG; | ||
| 4033 | mutex_lock(&instance->aen_mutex); | ||
| 4034 | error = megasas_register_aen(instance, seq_num, | ||
| 4035 | class_locale.word); | ||
| 4036 | mutex_unlock(&instance->aen_mutex); | ||
| 4037 | |||
| 4038 | if (error) | ||
| 4039 | printk(KERN_ERR "register aen failed error %x\n", error); | ||
| 4040 | |||
| 4041 | kfree(ev); | ||
| 4042 | } | ||
| 4043 | |||
| 4044 | |||
| 3454 | static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, | 4045 | static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, |
| 3455 | megasas_sysfs_show_poll_mode_io, | 4046 | megasas_sysfs_show_poll_mode_io, |
| 3456 | megasas_sysfs_set_poll_mode_io); | 4047 | megasas_sysfs_set_poll_mode_io); |
| @@ -3468,6 +4059,8 @@ static int __init megasas_init(void) | |||
| 3468 | printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, | 4059 | printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, |
| 3469 | MEGASAS_EXT_VERSION); | 4060 | MEGASAS_EXT_VERSION); |
| 3470 | 4061 | ||
| 4062 | support_poll_for_event = 2; | ||
| 4063 | |||
| 3471 | memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); | 4064 | memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); |
| 3472 | 4065 | ||
| 3473 | /* | 4066 | /* |
| @@ -3500,6 +4093,12 @@ static int __init megasas_init(void) | |||
| 3500 | &driver_attr_release_date); | 4093 | &driver_attr_release_date); |
| 3501 | if (rval) | 4094 | if (rval) |
| 3502 | goto err_dcf_rel_date; | 4095 | goto err_dcf_rel_date; |
| 4096 | |||
| 4097 | rval = driver_create_file(&megasas_pci_driver.driver, | ||
| 4098 | &driver_attr_support_poll_for_event); | ||
| 4099 | if (rval) | ||
| 4100 | goto err_dcf_support_poll_for_event; | ||
| 4101 | |||
| 3503 | rval = driver_create_file(&megasas_pci_driver.driver, | 4102 | rval = driver_create_file(&megasas_pci_driver.driver, |
| 3504 | &driver_attr_dbg_lvl); | 4103 | &driver_attr_dbg_lvl); |
| 3505 | if (rval) | 4104 | if (rval) |
| @@ -3516,7 +4115,12 @@ err_dcf_poll_mode_io: | |||
| 3516 | &driver_attr_dbg_lvl); | 4115 | &driver_attr_dbg_lvl); |
| 3517 | err_dcf_dbg_lvl: | 4116 | err_dcf_dbg_lvl: |
| 3518 | driver_remove_file(&megasas_pci_driver.driver, | 4117 | driver_remove_file(&megasas_pci_driver.driver, |
| 4118 | &driver_attr_support_poll_for_event); | ||
| 4119 | |||
| 4120 | err_dcf_support_poll_for_event: | ||
| 4121 | driver_remove_file(&megasas_pci_driver.driver, | ||
| 3519 | &driver_attr_release_date); | 4122 | &driver_attr_release_date); |
| 4123 | |||
| 3520 | err_dcf_rel_date: | 4124 | err_dcf_rel_date: |
| 3521 | driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); | 4125 | driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); |
| 3522 | err_dcf_attr_ver: | 4126 | err_dcf_attr_ver: |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0d033248fdf1..72b28e436e32 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
| @@ -18,9 +18,9 @@ | |||
| 18 | /* | 18 | /* |
| 19 | * MegaRAID SAS Driver meta data | 19 | * MegaRAID SAS Driver meta data |
| 20 | */ | 20 | */ |
| 21 | #define MEGASAS_VERSION "00.00.04.01" | 21 | #define MEGASAS_VERSION "00.00.04.12-rc1" |
| 22 | #define MEGASAS_RELDATE "July 24, 2008" | 22 | #define MEGASAS_RELDATE "Sep. 17, 2009" |
| 23 | #define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008" | 23 | #define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009" |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * Device IDs | 26 | * Device IDs |
| @@ -30,6 +30,8 @@ | |||
| 30 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 | 30 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 |
| 31 | #define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 | 31 | #define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 |
| 32 | #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 | 32 | #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 |
| 33 | #define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 | ||
| 34 | #define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 | ||
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| 35 | * ===================================== | 37 | * ===================================== |
| @@ -94,6 +96,7 @@ | |||
| 94 | #define MFI_FRAME_DIR_WRITE 0x0008 | 96 | #define MFI_FRAME_DIR_WRITE 0x0008 |
| 95 | #define MFI_FRAME_DIR_READ 0x0010 | 97 | #define MFI_FRAME_DIR_READ 0x0010 |
| 96 | #define MFI_FRAME_DIR_BOTH 0x0018 | 98 | #define MFI_FRAME_DIR_BOTH 0x0018 |
| 99 | #define MFI_FRAME_IEEE 0x0020 | ||
| 97 | 100 | ||
| 98 | /* | 101 | /* |
| 99 | * Definition for cmd_status | 102 | * Definition for cmd_status |
| @@ -131,6 +134,7 @@ | |||
| 131 | #define MR_DCMD_CLUSTER 0x08000000 | 134 | #define MR_DCMD_CLUSTER 0x08000000 |
| 132 | #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 | 135 | #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 |
| 133 | #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 | 136 | #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 |
| 137 | #define MR_DCMD_PD_LIST_QUERY 0x02010100 | ||
| 134 | 138 | ||
| 135 | /* | 139 | /* |
| 136 | * MFI command completion codes | 140 | * MFI command completion codes |
| @@ -251,9 +255,100 @@ enum MR_EVT_ARGS { | |||
| 251 | MR_EVT_ARGS_STR, | 255 | MR_EVT_ARGS_STR, |
| 252 | MR_EVT_ARGS_TIME, | 256 | MR_EVT_ARGS_TIME, |
| 253 | MR_EVT_ARGS_ECC, | 257 | MR_EVT_ARGS_ECC, |
| 258 | MR_EVT_ARGS_LD_PROP, | ||
| 259 | MR_EVT_ARGS_PD_SPARE, | ||
| 260 | MR_EVT_ARGS_PD_INDEX, | ||
| 261 | MR_EVT_ARGS_DIAG_PASS, | ||
| 262 | MR_EVT_ARGS_DIAG_FAIL, | ||
| 263 | MR_EVT_ARGS_PD_LBA_LBA, | ||
| 264 | MR_EVT_ARGS_PORT_PHY, | ||
| 265 | MR_EVT_ARGS_PD_MISSING, | ||
| 266 | MR_EVT_ARGS_PD_ADDRESS, | ||
| 267 | MR_EVT_ARGS_BITMAP, | ||
| 268 | MR_EVT_ARGS_CONNECTOR, | ||
| 269 | MR_EVT_ARGS_PD_PD, | ||
| 270 | MR_EVT_ARGS_PD_FRU, | ||
| 271 | MR_EVT_ARGS_PD_PATHINFO, | ||
| 272 | MR_EVT_ARGS_PD_POWER_STATE, | ||
| 273 | MR_EVT_ARGS_GENERIC, | ||
| 274 | }; | ||
| 254 | 275 | ||
| 276 | /* | ||
| 277 | * define constants for device list query options | ||
| 278 | */ | ||
| 279 | enum MR_PD_QUERY_TYPE { | ||
| 280 | MR_PD_QUERY_TYPE_ALL = 0, | ||
| 281 | MR_PD_QUERY_TYPE_STATE = 1, | ||
| 282 | MR_PD_QUERY_TYPE_POWER_STATE = 2, | ||
| 283 | MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, | ||
| 284 | MR_PD_QUERY_TYPE_SPEED = 4, | ||
| 285 | MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, | ||
| 255 | }; | 286 | }; |
| 256 | 287 | ||
| 288 | #define MR_EVT_CFG_CLEARED 0x0004 | ||
| 289 | #define MR_EVT_LD_STATE_CHANGE 0x0051 | ||
| 290 | #define MR_EVT_PD_INSERTED 0x005b | ||
| 291 | #define MR_EVT_PD_REMOVED 0x0070 | ||
| 292 | #define MR_EVT_LD_CREATED 0x008a | ||
| 293 | #define MR_EVT_LD_DELETED 0x008b | ||
| 294 | #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db | ||
| 295 | #define MR_EVT_LD_OFFLINE 0x00fc | ||
| 296 | #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 | ||
| 297 | #define MAX_LOGICAL_DRIVES 64 | ||
| 298 | |||
| 299 | enum MR_PD_STATE { | ||
| 300 | MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, | ||
| 301 | MR_PD_STATE_UNCONFIGURED_BAD = 0x01, | ||
| 302 | MR_PD_STATE_HOT_SPARE = 0x02, | ||
| 303 | MR_PD_STATE_OFFLINE = 0x10, | ||
| 304 | MR_PD_STATE_FAILED = 0x11, | ||
| 305 | MR_PD_STATE_REBUILD = 0x14, | ||
| 306 | MR_PD_STATE_ONLINE = 0x18, | ||
| 307 | MR_PD_STATE_COPYBACK = 0x20, | ||
| 308 | MR_PD_STATE_SYSTEM = 0x40, | ||
| 309 | }; | ||
| 310 | |||
| 311 | |||
| 312 | /* | ||
| 313 | * defines the physical drive address structure | ||
| 314 | */ | ||
| 315 | struct MR_PD_ADDRESS { | ||
| 316 | u16 deviceId; | ||
| 317 | u16 enclDeviceId; | ||
| 318 | |||
| 319 | union { | ||
| 320 | struct { | ||
| 321 | u8 enclIndex; | ||
| 322 | u8 slotNumber; | ||
| 323 | } mrPdAddress; | ||
| 324 | struct { | ||
| 325 | u8 enclPosition; | ||
| 326 | u8 enclConnectorIndex; | ||
| 327 | } mrEnclAddress; | ||
| 328 | }; | ||
| 329 | u8 scsiDevType; | ||
| 330 | union { | ||
| 331 | u8 connectedPortBitmap; | ||
| 332 | u8 connectedPortNumbers; | ||
| 333 | }; | ||
| 334 | u64 sasAddr[2]; | ||
| 335 | } __packed; | ||
| 336 | |||
| 337 | /* | ||
| 338 | * defines the physical drive list structure | ||
| 339 | */ | ||
| 340 | struct MR_PD_LIST { | ||
| 341 | u32 size; | ||
| 342 | u32 count; | ||
| 343 | struct MR_PD_ADDRESS addr[1]; | ||
| 344 | } __packed; | ||
| 345 | |||
| 346 | struct megasas_pd_list { | ||
| 347 | u16 tid; | ||
| 348 | u8 driveType; | ||
| 349 | u8 driveState; | ||
| 350 | } __packed; | ||
| 351 | |||
| 257 | /* | 352 | /* |
| 258 | * SAS controller properties | 353 | * SAS controller properties |
| 259 | */ | 354 | */ |
| @@ -282,7 +377,7 @@ struct megasas_ctrl_prop { | |||
| 282 | u8 expose_encl_devices; | 377 | u8 expose_encl_devices; |
| 283 | u8 reserved[38]; | 378 | u8 reserved[38]; |
| 284 | 379 | ||
| 285 | } __attribute__ ((packed)); | 380 | } __packed; |
| 286 | 381 | ||
| 287 | /* | 382 | /* |
| 288 | * SAS controller information | 383 | * SAS controller information |
| @@ -525,7 +620,7 @@ struct megasas_ctrl_info { | |||
| 525 | 620 | ||
| 526 | u8 pad[0x800 - 0x6a0]; | 621 | u8 pad[0x800 - 0x6a0]; |
| 527 | 622 | ||
| 528 | } __attribute__ ((packed)); | 623 | } __packed; |
| 529 | 624 | ||
| 530 | /* | 625 | /* |
| 531 | * =============================== | 626 | * =============================== |
| @@ -540,6 +635,8 @@ struct megasas_ctrl_info { | |||
| 540 | #define MEGASAS_DEFAULT_INIT_ID -1 | 635 | #define MEGASAS_DEFAULT_INIT_ID -1 |
| 541 | #define MEGASAS_MAX_LUN 8 | 636 | #define MEGASAS_MAX_LUN 8 |
| 542 | #define MEGASAS_MAX_LD 64 | 637 | #define MEGASAS_MAX_LD 64 |
| 638 | #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ | ||
| 639 | MEGASAS_MAX_DEV_PER_CHANNEL) | ||
| 543 | 640 | ||
| 544 | #define MEGASAS_DBG_LVL 1 | 641 | #define MEGASAS_DBG_LVL 1 |
| 545 | 642 | ||
| @@ -570,6 +667,7 @@ struct megasas_ctrl_info { | |||
| 570 | * is shown below | 667 | * is shown below |
| 571 | */ | 668 | */ |
| 572 | #define MEGASAS_INT_CMDS 32 | 669 | #define MEGASAS_INT_CMDS 32 |
| 670 | #define MEGASAS_SKINNY_INT_CMDS 5 | ||
| 573 | 671 | ||
| 574 | /* | 672 | /* |
| 575 | * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit | 673 | * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit |
| @@ -584,6 +682,8 @@ struct megasas_ctrl_info { | |||
| 584 | #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 | 682 | #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 |
| 585 | #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 | 683 | #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 |
| 586 | #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) | 684 | #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) |
| 685 | #define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 | ||
| 686 | #define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) | ||
| 587 | 687 | ||
| 588 | /* | 688 | /* |
| 589 | * register set for both 1068 and 1078 controllers | 689 | * register set for both 1068 and 1078 controllers |
| @@ -644,10 +744,17 @@ struct megasas_sge64 { | |||
| 644 | 744 | ||
| 645 | } __attribute__ ((packed)); | 745 | } __attribute__ ((packed)); |
| 646 | 746 | ||
| 747 | struct megasas_sge_skinny { | ||
| 748 | u64 phys_addr; | ||
| 749 | u32 length; | ||
| 750 | u32 flag; | ||
| 751 | } __packed; | ||
| 752 | |||
| 647 | union megasas_sgl { | 753 | union megasas_sgl { |
| 648 | 754 | ||
| 649 | struct megasas_sge32 sge32[1]; | 755 | struct megasas_sge32 sge32[1]; |
| 650 | struct megasas_sge64 sge64[1]; | 756 | struct megasas_sge64 sge64[1]; |
| 757 | struct megasas_sge_skinny sge_skinny[1]; | ||
| 651 | 758 | ||
| 652 | } __attribute__ ((packed)); | 759 | } __attribute__ ((packed)); |
| 653 | 760 | ||
| @@ -1061,16 +1168,10 @@ struct megasas_evt_detail { | |||
| 1061 | 1168 | ||
| 1062 | } __attribute__ ((packed)); | 1169 | } __attribute__ ((packed)); |
| 1063 | 1170 | ||
| 1064 | struct megasas_instance_template { | 1171 | struct megasas_aen_event { |
| 1065 | void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *); | 1172 | struct work_struct hotplug_work; |
| 1066 | 1173 | struct megasas_instance *instance; | |
| 1067 | void (*enable_intr)(struct megasas_register_set __iomem *) ; | 1174 | }; |
| 1068 | void (*disable_intr)(struct megasas_register_set __iomem *); | ||
| 1069 | |||
| 1070 | int (*clear_intr)(struct megasas_register_set __iomem *); | ||
| 1071 | |||
| 1072 | u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); | ||
| 1073 | }; | ||
| 1074 | 1175 | ||
| 1075 | struct megasas_instance { | 1176 | struct megasas_instance { |
| 1076 | 1177 | ||
| @@ -1085,17 +1186,21 @@ struct megasas_instance { | |||
| 1085 | unsigned long base_addr; | 1186 | unsigned long base_addr; |
| 1086 | struct megasas_register_set __iomem *reg_set; | 1187 | struct megasas_register_set __iomem *reg_set; |
| 1087 | 1188 | ||
| 1189 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; | ||
| 1088 | s8 init_id; | 1190 | s8 init_id; |
| 1089 | 1191 | ||
| 1090 | u16 max_num_sge; | 1192 | u16 max_num_sge; |
| 1091 | u16 max_fw_cmds; | 1193 | u16 max_fw_cmds; |
| 1092 | u32 max_sectors_per_req; | 1194 | u32 max_sectors_per_req; |
| 1195 | struct megasas_aen_event *ev; | ||
| 1093 | 1196 | ||
| 1094 | struct megasas_cmd **cmd_list; | 1197 | struct megasas_cmd **cmd_list; |
| 1095 | struct list_head cmd_pool; | 1198 | struct list_head cmd_pool; |
| 1096 | spinlock_t cmd_pool_lock; | 1199 | spinlock_t cmd_pool_lock; |
| 1097 | /* used to synch producer, consumer ptrs in dpc */ | 1200 | /* used to synch producer, consumer ptrs in dpc */ |
| 1098 | spinlock_t completion_lock; | 1201 | spinlock_t completion_lock; |
| 1202 | /* used to sync fire the cmd to fw */ | ||
| 1203 | spinlock_t fire_lock; | ||
| 1099 | struct dma_pool *frame_dma_pool; | 1204 | struct dma_pool *frame_dma_pool; |
| 1100 | struct dma_pool *sense_dma_pool; | 1205 | struct dma_pool *sense_dma_pool; |
| 1101 | 1206 | ||
| @@ -1120,11 +1225,25 @@ struct megasas_instance { | |||
| 1120 | struct tasklet_struct isr_tasklet; | 1225 | struct tasklet_struct isr_tasklet; |
| 1121 | 1226 | ||
| 1122 | u8 flag; | 1227 | u8 flag; |
| 1228 | u8 unload; | ||
| 1229 | u8 flag_ieee; | ||
| 1123 | unsigned long last_time; | 1230 | unsigned long last_time; |
| 1124 | 1231 | ||
| 1125 | struct timer_list io_completion_timer; | 1232 | struct timer_list io_completion_timer; |
| 1126 | }; | 1233 | }; |
| 1127 | 1234 | ||
| 1235 | struct megasas_instance_template { | ||
| 1236 | void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \ | ||
| 1237 | u32, struct megasas_register_set __iomem *); | ||
| 1238 | |||
| 1239 | void (*enable_intr)(struct megasas_register_set __iomem *) ; | ||
| 1240 | void (*disable_intr)(struct megasas_register_set __iomem *); | ||
| 1241 | |||
| 1242 | int (*clear_intr)(struct megasas_register_set __iomem *); | ||
| 1243 | |||
| 1244 | u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); | ||
| 1245 | }; | ||
| 1246 | |||
| 1128 | #define MEGASAS_IS_LOGICAL(scp) \ | 1247 | #define MEGASAS_IS_LOGICAL(scp) \ |
| 1129 | (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 | 1248 | (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 |
| 1130 | 1249 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index f9f6c0839276..914168105297 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
| 9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
| 10 | * | 10 | * |
| 11 | * mpi2.h Version: 02.00.12 | 11 | * mpi2.h Version: 02.00.13 |
| 12 | * | 12 | * |
| 13 | * Version History | 13 | * Version History |
| 14 | * --------------- | 14 | * --------------- |
| @@ -52,6 +52,7 @@ | |||
| 52 | * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those | 52 | * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those |
| 53 | * bytes reserved. | 53 | * bytes reserved. |
| 54 | * Added RAID Accelerator functionality. | 54 | * Added RAID Accelerator functionality. |
| 55 | * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. | ||
| 55 | * -------------------------------------------------------------------------- | 56 | * -------------------------------------------------------------------------- |
| 56 | */ | 57 | */ |
| 57 | 58 | ||
| @@ -77,7 +78,7 @@ | |||
| 77 | #define MPI2_VERSION_02_00 (0x0200) | 78 | #define MPI2_VERSION_02_00 (0x0200) |
| 78 | 79 | ||
| 79 | /* versioning for this MPI header set */ | 80 | /* versioning for this MPI header set */ |
| 80 | #define MPI2_HEADER_VERSION_UNIT (0x0C) | 81 | #define MPI2_HEADER_VERSION_UNIT (0x0D) |
| 81 | #define MPI2_HEADER_VERSION_DEV (0x00) | 82 | #define MPI2_HEADER_VERSION_DEV (0x00) |
| 82 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 83 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
| 83 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 84 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index ab47c4679640..1611c57a6fdf 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
| 7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
| 8 | * | 8 | * |
| 9 | * mpi2_cnfg.h Version: 02.00.11 | 9 | * mpi2_cnfg.h Version: 02.00.12 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -100,6 +100,13 @@ | |||
| 100 | * Added expander reduced functionality data to SAS | 100 | * Added expander reduced functionality data to SAS |
| 101 | * Expander Page 0. | 101 | * Expander Page 0. |
| 102 | * Added SAS PHY Page 2 and SAS PHY Page 3. | 102 | * Added SAS PHY Page 2 and SAS PHY Page 3. |
| 103 | * 07-30-09 02.00.12 Added IO Unit Page 7. | ||
| 104 | * Added new device ids. | ||
| 105 | * Added SAS IO Unit Page 5. | ||
| 106 | * Added partial and slumber power management capable flags | ||
| 107 | * to SAS Device Page 0 Flags field. | ||
| 108 | * Added PhyInfo defines for power condition. | ||
| 109 | * Added Ethernet configuration pages. | ||
| 103 | * -------------------------------------------------------------------------- | 110 | * -------------------------------------------------------------------------- |
| 104 | */ | 111 | */ |
| 105 | 112 | ||
| @@ -182,6 +189,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION | |||
| 182 | #define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) | 189 | #define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) |
| 183 | #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) | 190 | #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) |
| 184 | #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) | 191 | #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) |
| 192 | #define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) | ||
| 185 | 193 | ||
| 186 | 194 | ||
| 187 | /***************************************************************************** | 195 | /***************************************************************************** |
| @@ -268,6 +276,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION | |||
| 268 | #define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) | 276 | #define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) |
| 269 | 277 | ||
| 270 | 278 | ||
| 279 | /* Ethernet PageAddress format */ | ||
| 280 | #define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) | ||
| 281 | #define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) | ||
| 282 | |||
| 283 | #define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) | ||
| 284 | |||
| 285 | |||
| 286 | |||
| 271 | /**************************************************************************** | 287 | /**************************************************************************** |
| 272 | * Configuration messages | 288 | * Configuration messages |
| 273 | ****************************************************************************/ | 289 | ****************************************************************************/ |
| @@ -349,6 +365,15 @@ typedef struct _MPI2_CONFIG_REPLY | |||
| 349 | #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) | 365 | #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) |
| 350 | #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) | 366 | #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) |
| 351 | 367 | ||
| 368 | #define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) | ||
| 369 | #define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) | ||
| 370 | #define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) | ||
| 371 | #define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) | ||
| 372 | #define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) | ||
| 373 | #define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) | ||
| 374 | #define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086) | ||
| 375 | #define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087) | ||
| 376 | |||
| 352 | 377 | ||
| 353 | /* Manufacturing Page 0 */ | 378 | /* Manufacturing Page 0 */ |
| 354 | 379 | ||
| @@ -787,6 +812,56 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { | |||
| 787 | #define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) | 812 | #define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) |
| 788 | 813 | ||
| 789 | 814 | ||
| 815 | /* IO Unit Page 7 */ | ||
| 816 | |||
| 817 | typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { | ||
| 818 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | ||
| 819 | U16 Reserved1; /* 0x04 */ | ||
| 820 | U8 PCIeWidth; /* 0x06 */ | ||
| 821 | U8 PCIeSpeed; /* 0x07 */ | ||
| 822 | U32 ProcessorState; /* 0x08 */ | ||
| 823 | U32 Reserved2; /* 0x0C */ | ||
| 824 | U16 IOCTemperature; /* 0x10 */ | ||
| 825 | U8 IOCTemperatureUnits; /* 0x12 */ | ||
| 826 | U8 IOCSpeed; /* 0x13 */ | ||
| 827 | U32 Reserved3; /* 0x14 */ | ||
| 828 | } MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, | ||
| 829 | Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; | ||
| 830 | |||
| 831 | #define MPI2_IOUNITPAGE7_PAGEVERSION (0x00) | ||
| 832 | |||
| 833 | /* defines for IO Unit Page 7 PCIeWidth field */ | ||
| 834 | #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) | ||
| 835 | #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) | ||
| 836 | #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) | ||
| 837 | #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) | ||
| 838 | |||
| 839 | /* defines for IO Unit Page 7 PCIeSpeed field */ | ||
| 840 | #define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) | ||
| 841 | #define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) | ||
| 842 | #define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) | ||
| 843 | |||
| 844 | /* defines for IO Unit Page 7 ProcessorState field */ | ||
| 845 | #define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) | ||
| 846 | #define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) | ||
| 847 | |||
| 848 | #define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) | ||
| 849 | #define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) | ||
| 850 | #define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) | ||
| 851 | |||
| 852 | /* defines for IO Unit Page 7 IOCTemperatureUnits field */ | ||
| 853 | #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) | ||
| 854 | #define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) | ||
| 855 | #define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) | ||
| 856 | |||
| 857 | /* defines for IO Unit Page 7 IOCSpeed field */ | ||
| 858 | #define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) | ||
| 859 | #define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) | ||
| 860 | #define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) | ||
| 861 | #define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) | ||
| 862 | |||
| 863 | |||
| 864 | |||
| 790 | /**************************************************************************** | 865 | /**************************************************************************** |
| 791 | * IOC Config Pages | 866 | * IOC Config Pages |
| 792 | ****************************************************************************/ | 867 | ****************************************************************************/ |
| @@ -1470,6 +1545,12 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 | |||
| 1470 | 1545 | ||
| 1471 | /* values for PhyInfo fields */ | 1546 | /* values for PhyInfo fields */ |
| 1472 | #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) | 1547 | #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) |
| 1548 | |||
| 1549 | #define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) | ||
| 1550 | #define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) | ||
| 1551 | #define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) | ||
| 1552 | #define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) | ||
| 1553 | |||
| 1473 | #define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) | 1554 | #define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) |
| 1474 | #define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) | 1555 | #define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) |
| 1475 | #define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) | 1556 | #define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) |
| @@ -1682,11 +1763,11 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 | |||
| 1682 | /* values for SAS IO Unit Page 1 PortFlags */ | 1763 | /* values for SAS IO Unit Page 1 PortFlags */ |
| 1683 | #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) | 1764 | #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) |
| 1684 | 1765 | ||
| 1685 | /* values for SAS IO Unit Page 2 PhyFlags */ | 1766 | /* values for SAS IO Unit Page 1 PhyFlags */ |
| 1686 | #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) | 1767 | #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) |
| 1687 | #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) | 1768 | #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) |
| 1688 | 1769 | ||
| 1689 | /* values for SAS IO Unit Page 0 MaxMinLinkRate */ | 1770 | /* values for SAS IO Unit Page 1 MaxMinLinkRate */ |
| 1690 | #define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) | 1771 | #define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) |
| 1691 | #define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) | 1772 | #define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) |
| 1692 | #define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) | 1773 | #define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) |
| @@ -1745,6 +1826,74 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 | |||
| 1745 | #define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) | 1826 | #define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) |
| 1746 | 1827 | ||
| 1747 | 1828 | ||
| 1829 | /* SAS IO Unit Page 5 */ | ||
| 1830 | |||
| 1831 | typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { | ||
| 1832 | U8 ControlFlags; /* 0x00 */ | ||
| 1833 | U8 Reserved1; /* 0x01 */ | ||
| 1834 | U16 InactivityTimerExponent; /* 0x02 */ | ||
| 1835 | U8 SATAPartialTimeout; /* 0x04 */ | ||
| 1836 | U8 Reserved2; /* 0x05 */ | ||
| 1837 | U8 SATASlumberTimeout; /* 0x06 */ | ||
| 1838 | U8 Reserved3; /* 0x07 */ | ||
| 1839 | U8 SASPartialTimeout; /* 0x08 */ | ||
| 1840 | U8 Reserved4; /* 0x09 */ | ||
| 1841 | U8 SASSlumberTimeout; /* 0x0A */ | ||
| 1842 | U8 Reserved5; /* 0x0B */ | ||
| 1843 | } MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, | ||
| 1844 | MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, | ||
| 1845 | Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t; | ||
| 1846 | |||
| 1847 | /* defines for ControlFlags field */ | ||
| 1848 | #define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) | ||
| 1849 | #define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) | ||
| 1850 | #define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) | ||
| 1851 | #define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) | ||
| 1852 | |||
| 1853 | /* defines for InactivityTimerExponent field */ | ||
| 1854 | #define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) | ||
| 1855 | #define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) | ||
| 1856 | #define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) | ||
| 1857 | #define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) | ||
| 1858 | #define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) | ||
| 1859 | #define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) | ||
| 1860 | #define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) | ||
| 1861 | #define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) | ||
| 1862 | |||
| 1863 | #define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) | ||
| 1864 | #define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) | ||
| 1865 | #define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) | ||
| 1866 | #define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) | ||
| 1867 | #define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) | ||
| 1868 | #define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) | ||
| 1869 | #define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) | ||
| 1870 | #define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) | ||
| 1871 | |||
| 1872 | /* | ||
| 1873 | * Host code (drivers, BIOS, utilities, etc.) should leave this define set to | ||
| 1874 | * one and check Header.ExtPageLength or NumPhys at runtime. | ||
| 1875 | */ | ||
| 1876 | #ifndef MPI2_SAS_IOUNIT5_PHY_MAX | ||
| 1877 | #define MPI2_SAS_IOUNIT5_PHY_MAX (1) | ||
| 1878 | #endif | ||
| 1879 | |||
| 1880 | typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { | ||
| 1881 | MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ | ||
| 1882 | U8 NumPhys; /* 0x08 */ | ||
| 1883 | U8 Reserved1; /* 0x09 */ | ||
| 1884 | U16 Reserved2; /* 0x0A */ | ||
| 1885 | U32 Reserved3; /* 0x0C */ | ||
| 1886 | MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings | ||
| 1887 | [MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */ | ||
| 1888 | } MPI2_CONFIG_PAGE_SASIOUNIT_5, | ||
| 1889 | MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, | ||
| 1890 | Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; | ||
| 1891 | |||
| 1892 | #define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x00) | ||
| 1893 | |||
| 1894 | |||
| 1895 | |||
| 1896 | |||
| 1748 | /**************************************************************************** | 1897 | /**************************************************************************** |
| 1749 | * SAS Expander Config Pages | 1898 | * SAS Expander Config Pages |
| 1750 | ****************************************************************************/ | 1899 | ****************************************************************************/ |
| @@ -1927,6 +2076,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
| 1927 | /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ | 2076 | /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ |
| 1928 | 2077 | ||
| 1929 | /* values for SAS Device Page 0 Flags field */ | 2078 | /* values for SAS Device Page 0 Flags field */ |
| 2079 | #define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) | ||
| 2080 | #define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) | ||
| 1930 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) | 2081 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) |
| 1931 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) | 2082 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) |
| 1932 | #define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) | 2083 | #define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) |
| @@ -2343,5 +2494,122 @@ typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 | |||
| 2343 | #define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) | 2494 | #define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) |
| 2344 | 2495 | ||
| 2345 | 2496 | ||
| 2497 | /**************************************************************************** | ||
| 2498 | * Ethernet Config Pages | ||
| 2499 | ****************************************************************************/ | ||
| 2500 | |||
| 2501 | /* Ethernet Page 0 */ | ||
| 2502 | |||
| 2503 | /* IP address (union of IPv4 and IPv6) */ | ||
| 2504 | typedef union _MPI2_ETHERNET_IP_ADDR { | ||
| 2505 | U32 IPv4Addr; | ||
| 2506 | U32 IPv6Addr[4]; | ||
| 2507 | } MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR, | ||
| 2508 | Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t; | ||
| 2509 | |||
| 2510 | #define MPI2_ETHERNET_HOST_NAME_LENGTH (32) | ||
| 2511 | |||
| 2512 | typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { | ||
| 2513 | MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ | ||
| 2514 | U8 NumInterfaces; /* 0x08 */ | ||
| 2515 | U8 Reserved0; /* 0x09 */ | ||
| 2516 | U16 Reserved1; /* 0x0A */ | ||
| 2517 | U32 Status; /* 0x0C */ | ||
| 2518 | U8 MediaState; /* 0x10 */ | ||
| 2519 | U8 Reserved2; /* 0x11 */ | ||
| 2520 | U16 Reserved3; /* 0x12 */ | ||
| 2521 | U8 MacAddress[6]; /* 0x14 */ | ||
| 2522 | U8 Reserved4; /* 0x1A */ | ||
| 2523 | U8 Reserved5; /* 0x1B */ | ||
| 2524 | MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */ | ||
| 2525 | MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */ | ||
| 2526 | MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */ | ||
| 2527 | MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */ | ||
| 2528 | MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */ | ||
| 2529 | MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */ | ||
| 2530 | U8 HostName | ||
| 2531 | [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ | ||
| 2532 | } MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0, | ||
| 2533 | Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t; | ||
| 2534 | |||
| 2535 | #define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) | ||
| 2536 | |||
| 2537 | /* values for Ethernet Page 0 Status field */ | ||
| 2538 | #define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) | ||
| 2539 | #define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) | ||
| 2540 | #define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) | ||
| 2541 | #define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) | ||
| 2542 | #define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) | ||
| 2543 | #define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) | ||
| 2544 | #define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) | ||
| 2545 | #define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) | ||
| 2546 | #define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) | ||
| 2547 | #define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) | ||
| 2548 | #define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) | ||
| 2549 | #define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) | ||
| 2550 | |||
| 2551 | /* values for Ethernet Page 0 MediaState field */ | ||
| 2552 | #define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) | ||
| 2553 | #define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) | ||
| 2554 | #define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) | ||
| 2555 | |||
| 2556 | #define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) | ||
| 2557 | #define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) | ||
| 2558 | #define MPI2_ETHPG0_MS_10MBIT (0x01) | ||
| 2559 | #define MPI2_ETHPG0_MS_100MBIT (0x02) | ||
| 2560 | #define MPI2_ETHPG0_MS_1GBIT (0x03) | ||
| 2561 | |||
| 2562 | |||
| 2563 | /* Ethernet Page 1 */ | ||
| 2564 | |||
| 2565 | typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { | ||
| 2566 | MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ | ||
| 2567 | U32 Reserved0; /* 0x08 */ | ||
| 2568 | U32 Flags; /* 0x0C */ | ||
| 2569 | U8 MediaState; /* 0x10 */ | ||
| 2570 | U8 Reserved1; /* 0x11 */ | ||
| 2571 | U16 Reserved2; /* 0x12 */ | ||
| 2572 | U8 MacAddress[6]; /* 0x14 */ | ||
| 2573 | U8 Reserved3; /* 0x1A */ | ||
| 2574 | U8 Reserved4; /* 0x1B */ | ||
| 2575 | MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */ | ||
| 2576 | MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */ | ||
| 2577 | MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */ | ||
| 2578 | MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */ | ||
| 2579 | MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */ | ||
| 2580 | U32 Reserved5; /* 0x6C */ | ||
| 2581 | U32 Reserved6; /* 0x70 */ | ||
| 2582 | U32 Reserved7; /* 0x74 */ | ||
| 2583 | U32 Reserved8; /* 0x78 */ | ||
| 2584 | U8 HostName | ||
| 2585 | [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ | ||
| 2586 | } MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1, | ||
| 2587 | Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t; | ||
| 2588 | |||
| 2589 | #define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) | ||
| 2590 | |||
| 2591 | /* values for Ethernet Page 1 Flags field */ | ||
| 2592 | #define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) | ||
| 2593 | #define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) | ||
| 2594 | #define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) | ||
| 2595 | #define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) | ||
| 2596 | #define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) | ||
| 2597 | #define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) | ||
| 2598 | #define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) | ||
| 2599 | #define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) | ||
| 2600 | #define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) | ||
| 2601 | |||
| 2602 | /* values for Ethernet Page 1 MediaState field */ | ||
| 2603 | #define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) | ||
| 2604 | #define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) | ||
| 2605 | #define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) | ||
| 2606 | |||
| 2607 | #define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) | ||
| 2608 | #define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) | ||
| 2609 | #define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) | ||
| 2610 | #define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) | ||
| 2611 | #define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) | ||
| 2612 | |||
| 2613 | |||
| 2346 | #endif | 2614 | #endif |
| 2347 | 2615 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index c294128bdeb4..ea51ce868690 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
| 7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
| 8 | * | 8 | * |
| 9 | * mpi2_ioc.h Version: 02.00.11 | 9 | * mpi2_ioc.h Version: 02.00.12 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -84,6 +84,9 @@ | |||
| 84 | * Added two new reason codes for SAS Device Status Change | 84 | * Added two new reason codes for SAS Device Status Change |
| 85 | * Event. | 85 | * Event. |
| 86 | * Added new event: SAS PHY Counter. | 86 | * Added new event: SAS PHY Counter. |
| 87 | * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. | ||
| 88 | * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. | ||
| 89 | * Added new product id family for 2208. | ||
| 87 | * -------------------------------------------------------------------------- | 90 | * -------------------------------------------------------------------------- |
| 88 | */ | 91 | */ |
| 89 | 92 | ||
| @@ -274,6 +277,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY | |||
| 274 | #define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) | 277 | #define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) |
| 275 | #define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) | 278 | #define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) |
| 276 | #define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) | 279 | #define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) |
| 280 | #define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) | ||
| 277 | #define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) | 281 | #define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) |
| 278 | #define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) | 282 | #define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) |
| 279 | #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) | 283 | #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) |
| @@ -448,6 +452,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY | |||
| 448 | #define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) | 452 | #define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) |
| 449 | #define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) | 453 | #define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) |
| 450 | #define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) | 454 | #define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) |
| 455 | #define MPI2_EVENT_GPIO_INTERRUPT (0x0023) | ||
| 451 | 456 | ||
| 452 | 457 | ||
| 453 | /* Log Entry Added Event data */ | 458 | /* Log Entry Added Event data */ |
| @@ -469,6 +474,16 @@ typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED | |||
| 469 | MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, | 474 | MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, |
| 470 | Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; | 475 | Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; |
| 471 | 476 | ||
| 477 | /* GPIO Interrupt Event data */ | ||
| 478 | |||
| 479 | typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { | ||
| 480 | U8 GPIONum; /* 0x00 */ | ||
| 481 | U8 Reserved1; /* 0x01 */ | ||
| 482 | U16 Reserved2; /* 0x02 */ | ||
| 483 | } MPI2_EVENT_DATA_GPIO_INTERRUPT, | ||
| 484 | MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, | ||
| 485 | Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; | ||
| 486 | |||
| 472 | /* Hard Reset Received Event data */ | 487 | /* Hard Reset Received Event data */ |
| 473 | 488 | ||
| 474 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED | 489 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED |
| @@ -1117,6 +1132,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER | |||
| 1117 | #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) | 1132 | #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) |
| 1118 | /* SAS */ | 1133 | /* SAS */ |
| 1119 | #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) | 1134 | #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) |
| 1135 | #define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011) | ||
| 1120 | 1136 | ||
| 1121 | /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ | 1137 | /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ |
| 1122 | 1138 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 7134816d9046..5160c33d2a00 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Title: MPI Integrated RAID messages and structures | 6 | * Title: MPI Integrated RAID messages and structures |
| 7 | * Creation Date: April 26, 2007 | 7 | * Creation Date: April 26, 2007 |
| 8 | * | 8 | * |
| 9 | * mpi2_raid.h Version: 02.00.03 | 9 | * mpi2_raid.h Version: 02.00.04 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -20,6 +20,8 @@ | |||
| 20 | * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that | 20 | * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that |
| 21 | * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT | 21 | * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT |
| 22 | * can be sized by the build environment. | 22 | * can be sized by the build environment. |
| 23 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of | ||
| 24 | * VolumeCreationFlags and marked the old one as obsolete. | ||
| 23 | * -------------------------------------------------------------------------- | 25 | * -------------------------------------------------------------------------- |
| 24 | */ | 26 | */ |
| 25 | 27 | ||
| @@ -217,10 +219,14 @@ typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT | |||
| 217 | /* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ | 219 | /* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ |
| 218 | 220 | ||
| 219 | /* defines for the VolumeCreationFlags field */ | 221 | /* defines for the VolumeCreationFlags field */ |
| 222 | #define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) | ||
| 223 | #define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) | ||
| 224 | #define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) | ||
| 225 | #define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) | ||
| 226 | /* The following is an obsolete define. | ||
| 227 | * It must be shifted left 24 bits in order to set the proper bit. | ||
| 228 | */ | ||
| 220 | #define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) | 229 | #define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) |
| 221 | #define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x04) | ||
| 222 | #define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x02) | ||
| 223 | #define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x01) | ||
| 224 | 230 | ||
| 225 | 231 | ||
| 226 | /* RAID Online Capacity Expansion Structure */ | 232 | /* RAID Online Capacity Expansion Structure */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 007e950f7bfa..73fcdbf92632 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * Title: MPI diagnostic tool structures and definitions | 6 | * Title: MPI diagnostic tool structures and definitions |
| 7 | * Creation Date: March 26, 2007 | 7 | * Creation Date: March 26, 2007 |
| 8 | * | 8 | * |
| 9 | * mpi2_tool.h Version: 02.00.03 | 9 | * mpi2_tool.h Version: 02.00.04 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -18,6 +18,10 @@ | |||
| 18 | * structures and defines. | 18 | * structures and defines. |
| 19 | * 02-29-08 02.00.02 Modified various names to make them 32-character unique. | 19 | * 02-29-08 02.00.02 Modified various names to make them 32-character unique. |
| 20 | * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. | 20 | * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. |
| 21 | * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request | ||
| 22 | * and reply messages. | ||
| 23 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. | ||
| 24 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. | ||
| 21 | * -------------------------------------------------------------------------- | 25 | * -------------------------------------------------------------------------- |
| 22 | */ | 26 | */ |
| 23 | 27 | ||
| @@ -282,7 +286,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { | |||
| 282 | 286 | ||
| 283 | typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST | 287 | typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST |
| 284 | { | 288 | { |
| 285 | U8 Reserved1; /* 0x00 */ | 289 | U8 ExtendedType; /* 0x00 */ |
| 286 | U8 BufferType; /* 0x01 */ | 290 | U8 BufferType; /* 0x01 */ |
| 287 | U8 ChainOffset; /* 0x02 */ | 291 | U8 ChainOffset; /* 0x02 */ |
| 288 | U8 Function; /* 0x03 */ | 292 | U8 Function; /* 0x03 */ |
| @@ -301,11 +305,15 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST | |||
| 301 | } MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, | 305 | } MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, |
| 302 | Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; | 306 | Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; |
| 303 | 307 | ||
| 308 | /* values for the ExtendedType field */ | ||
| 309 | #define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) | ||
| 310 | |||
| 304 | /* values for the BufferType field */ | 311 | /* values for the BufferType field */ |
| 305 | #define MPI2_DIAG_BUF_TYPE_TRACE (0x00) | 312 | #define MPI2_DIAG_BUF_TYPE_TRACE (0x00) |
| 306 | #define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) | 313 | #define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) |
| 314 | #define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) | ||
| 307 | /* count of the number of buffer types */ | 315 | /* count of the number of buffer types */ |
| 308 | #define MPI2_DIAG_BUF_TYPE_COUNT (0x02) | 316 | #define MPI2_DIAG_BUF_TYPE_COUNT (0x03) |
| 309 | 317 | ||
| 310 | 318 | ||
| 311 | /**************************************************************************** | 319 | /**************************************************************************** |
| @@ -314,7 +322,7 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST | |||
| 314 | 322 | ||
| 315 | typedef struct _MPI2_DIAG_BUFFER_POST_REPLY | 323 | typedef struct _MPI2_DIAG_BUFFER_POST_REPLY |
| 316 | { | 324 | { |
| 317 | U8 Reserved1; /* 0x00 */ | 325 | U8 ExtendedType; /* 0x00 */ |
| 318 | U8 BufferType; /* 0x01 */ | 326 | U8 BufferType; /* 0x01 */ |
| 319 | U8 MsgLength; /* 0x02 */ | 327 | U8 MsgLength; /* 0x02 */ |
| 320 | U8 Function; /* 0x03 */ | 328 | U8 Function; /* 0x03 */ |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 670241efa4b5..6422e258fd52 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
| @@ -57,6 +57,7 @@ | |||
| 57 | #include <linux/dma-mapping.h> | 57 | #include <linux/dma-mapping.h> |
| 58 | #include <linux/sort.h> | 58 | #include <linux/sort.h> |
| 59 | #include <linux/io.h> | 59 | #include <linux/io.h> |
| 60 | #include <linux/time.h> | ||
| 60 | 61 | ||
| 61 | #include "mpt2sas_base.h" | 62 | #include "mpt2sas_base.h" |
| 62 | 63 | ||
| @@ -77,6 +78,44 @@ static int msix_disable = -1; | |||
| 77 | module_param(msix_disable, int, 0); | 78 | module_param(msix_disable, int, 0); |
| 78 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); | 79 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); |
| 79 | 80 | ||
| 81 | /* diag_buffer_enable is bitwise | ||
| 82 | * bit 0 set = TRACE | ||
| 83 | * bit 1 set = SNAPSHOT | ||
| 84 | * bit 2 set = EXTENDED | ||
| 85 | * | ||
| 86 | * Either bit can be set, or both | ||
| 87 | */ | ||
| 88 | static int diag_buffer_enable; | ||
| 89 | module_param(diag_buffer_enable, int, 0); | ||
| 90 | MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " | ||
| 91 | "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); | ||
| 92 | |||
| 93 | int mpt2sas_fwfault_debug; | ||
| 94 | MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " | ||
| 95 | "and halt firmware - (default=0)"); | ||
| 96 | |||
| 97 | /** | ||
| 98 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. | ||
| 99 | * | ||
| 100 | */ | ||
| 101 | static int | ||
| 102 | _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) | ||
| 103 | { | ||
| 104 | int ret = param_set_int(val, kp); | ||
| 105 | struct MPT2SAS_ADAPTER *ioc; | ||
| 106 | |||
| 107 | if (ret) | ||
| 108 | return ret; | ||
| 109 | |||
| 110 | printk(KERN_INFO "setting logging_level(0x%08x)\n", | ||
| 111 | mpt2sas_fwfault_debug); | ||
| 112 | list_for_each_entry(ioc, &mpt2sas_ioc_list, list) | ||
| 113 | ioc->fwfault_debug = mpt2sas_fwfault_debug; | ||
| 114 | return 0; | ||
| 115 | } | ||
| 116 | module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug, | ||
| 117 | param_get_int, &mpt2sas_fwfault_debug, 0644); | ||
| 118 | |||
| 80 | /** | 119 | /** |
| 81 | * _base_fault_reset_work - workq handling ioc fault conditions | 120 | * _base_fault_reset_work - workq handling ioc fault conditions |
| 82 | * @work: input argument, used to derive ioc | 121 | * @work: input argument, used to derive ioc |
| @@ -121,7 +160,7 @@ _base_fault_reset_work(struct work_struct *work) | |||
| 121 | 160 | ||
| 122 | /** | 161 | /** |
| 123 | * mpt2sas_base_start_watchdog - start the fault_reset_work_q | 162 | * mpt2sas_base_start_watchdog - start the fault_reset_work_q |
| 124 | * @ioc: pointer to scsi command object | 163 | * @ioc: per adapter object |
| 125 | * Context: sleep. | 164 | * Context: sleep. |
| 126 | * | 165 | * |
| 127 | * Return nothing. | 166 | * Return nothing. |
| @@ -155,7 +194,7 @@ mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) | |||
| 155 | 194 | ||
| 156 | /** | 195 | /** |
| 157 | * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q | 196 | * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q |
| 158 | * @ioc: pointer to scsi command object | 197 | * @ioc: per adapter object |
| 159 | * Context: sleep. | 198 | * Context: sleep. |
| 160 | * | 199 | * |
| 161 | * Return nothing. | 200 | * Return nothing. |
| @@ -177,10 +216,55 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) | |||
| 177 | } | 216 | } |
| 178 | } | 217 | } |
| 179 | 218 | ||
| 219 | /** | ||
| 220 | * mpt2sas_base_fault_info - verbose translation of firmware FAULT code | ||
| 221 | * @ioc: per adapter object | ||
| 222 | * @fault_code: fault code | ||
| 223 | * | ||
| 224 | * Return nothing. | ||
| 225 | */ | ||
| 226 | void | ||
| 227 | mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code) | ||
| 228 | { | ||
| 229 | printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n", | ||
| 230 | ioc->name, fault_code); | ||
| 231 | } | ||
| 232 | |||
| 233 | /** | ||
| 234 | * mpt2sas_halt_firmware - halt's mpt controller firmware | ||
| 235 | * @ioc: per adapter object | ||
| 236 | * | ||
| 237 | * For debugging timeout related issues. Writing 0xCOFFEE00 | ||
| 238 | * to the doorbell register will halt controller firmware. With | ||
| 239 | * the purpose to stop both driver and firmware, the enduser can | ||
| 240 | * obtain a ring buffer from controller UART. | ||
| 241 | */ | ||
| 242 | void | ||
| 243 | mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc) | ||
| 244 | { | ||
| 245 | u32 doorbell; | ||
| 246 | |||
| 247 | if (!ioc->fwfault_debug) | ||
| 248 | return; | ||
| 249 | |||
| 250 | dump_stack(); | ||
| 251 | |||
| 252 | doorbell = readl(&ioc->chip->Doorbell); | ||
| 253 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) | ||
| 254 | mpt2sas_base_fault_info(ioc , doorbell); | ||
| 255 | else { | ||
| 256 | writel(0xC0FFEE00, &ioc->chip->Doorbell); | ||
| 257 | printk(MPT2SAS_ERR_FMT "Firmware is halted due to command " | ||
| 258 | "timeout\n", ioc->name); | ||
| 259 | } | ||
| 260 | |||
| 261 | panic("panic in %s\n", __func__); | ||
| 262 | } | ||
| 263 | |||
| 180 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 264 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 181 | /** | 265 | /** |
| 182 | * _base_sas_ioc_info - verbose translation of the ioc status | 266 | * _base_sas_ioc_info - verbose translation of the ioc status |
| 183 | * @ioc: pointer to scsi command object | 267 | * @ioc: per adapter object |
| 184 | * @mpi_reply: reply mf payload returned from firmware | 268 | * @mpi_reply: reply mf payload returned from firmware |
| 185 | * @request_hdr: request mf | 269 | * @request_hdr: request mf |
| 186 | * | 270 | * |
| @@ -394,7 +478,7 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, | |||
| 394 | 478 | ||
| 395 | /** | 479 | /** |
| 396 | * _base_display_event_data - verbose translation of firmware asyn events | 480 | * _base_display_event_data - verbose translation of firmware asyn events |
| 397 | * @ioc: pointer to scsi command object | 481 | * @ioc: per adapter object |
| 398 | * @mpi_reply: reply mf payload returned from firmware | 482 | * @mpi_reply: reply mf payload returned from firmware |
| 399 | * | 483 | * |
| 400 | * Return nothing. | 484 | * Return nothing. |
| @@ -474,7 +558,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, | |||
| 474 | 558 | ||
| 475 | /** | 559 | /** |
| 476 | * _base_sas_log_info - verbose translation of firmware log info | 560 | * _base_sas_log_info - verbose translation of firmware log info |
| 477 | * @ioc: pointer to scsi command object | 561 | * @ioc: per adapter object |
| 478 | * @log_info: log info | 562 | * @log_info: log info |
| 479 | * | 563 | * |
| 480 | * Return nothing. | 564 | * Return nothing. |
| @@ -526,22 +610,8 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) | |||
| 526 | } | 610 | } |
| 527 | 611 | ||
| 528 | /** | 612 | /** |
| 529 | * mpt2sas_base_fault_info - verbose translation of firmware FAULT code | ||
| 530 | * @ioc: pointer to scsi command object | ||
| 531 | * @fault_code: fault code | ||
| 532 | * | ||
| 533 | * Return nothing. | ||
| 534 | */ | ||
| 535 | void | ||
| 536 | mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code) | ||
| 537 | { | ||
| 538 | printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n", | ||
| 539 | ioc->name, fault_code); | ||
| 540 | } | ||
| 541 | |||
| 542 | /** | ||
| 543 | * _base_display_reply_info - | 613 | * _base_display_reply_info - |
| 544 | * @ioc: pointer to scsi command object | 614 | * @ioc: per adapter object |
| 545 | * @smid: system request message index | 615 | * @smid: system request message index |
| 546 | * @msix_index: MSIX table index supplied by the OS | 616 | * @msix_index: MSIX table index supplied by the OS |
| 547 | * @reply: reply message frame(lower 32bit addr) | 617 | * @reply: reply message frame(lower 32bit addr) |
| @@ -570,7 +640,7 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 570 | 640 | ||
| 571 | /** | 641 | /** |
| 572 | * mpt2sas_base_done - base internal command completion routine | 642 | * mpt2sas_base_done - base internal command completion routine |
| 573 | * @ioc: pointer to scsi command object | 643 | * @ioc: per adapter object |
| 574 | * @smid: system request message index | 644 | * @smid: system request message index |
| 575 | * @msix_index: MSIX table index supplied by the OS | 645 | * @msix_index: MSIX table index supplied by the OS |
| 576 | * @reply: reply message frame(lower 32bit addr) | 646 | * @reply: reply message frame(lower 32bit addr) |
| @@ -603,7 +673,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 603 | 673 | ||
| 604 | /** | 674 | /** |
| 605 | * _base_async_event - main callback handler for firmware asyn events | 675 | * _base_async_event - main callback handler for firmware asyn events |
| 606 | * @ioc: pointer to scsi command object | 676 | * @ioc: per adapter object |
| 607 | * @msix_index: MSIX table index supplied by the OS | 677 | * @msix_index: MSIX table index supplied by the OS |
| 608 | * @reply: reply message frame(lower 32bit addr) | 678 | * @reply: reply message frame(lower 32bit addr) |
| 609 | * | 679 | * |
| @@ -684,7 +754,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
| 684 | 754 | ||
| 685 | /** | 755 | /** |
| 686 | * _base_mask_interrupts - disable interrupts | 756 | * _base_mask_interrupts - disable interrupts |
| 687 | * @ioc: pointer to scsi command object | 757 | * @ioc: per adapter object |
| 688 | * | 758 | * |
| 689 | * Disabling ResetIRQ, Reply and Doorbell Interrupts | 759 | * Disabling ResetIRQ, Reply and Doorbell Interrupts |
| 690 | * | 760 | * |
| @@ -704,7 +774,7 @@ _base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc) | |||
| 704 | 774 | ||
| 705 | /** | 775 | /** |
| 706 | * _base_unmask_interrupts - enable interrupts | 776 | * _base_unmask_interrupts - enable interrupts |
| 707 | * @ioc: pointer to scsi command object | 777 | * @ioc: per adapter object |
| 708 | * | 778 | * |
| 709 | * Enabling only Reply Interrupts | 779 | * Enabling only Reply Interrupts |
| 710 | * | 780 | * |
| @@ -1258,12 +1328,13 @@ mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
| 1258 | * @ioc: per adapter object | 1328 | * @ioc: per adapter object |
| 1259 | * @smid: system request message index | 1329 | * @smid: system request message index |
| 1260 | * | 1330 | * |
| 1261 | * Returns phys pointer to sense buffer. | 1331 | * Returns phys pointer to the low 32bit address of the sense buffer. |
| 1262 | */ | 1332 | */ |
| 1263 | dma_addr_t | 1333 | __le32 |
| 1264 | mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) | 1334 | mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) |
| 1265 | { | 1335 | { |
| 1266 | return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE); | 1336 | return cpu_to_le32(ioc->sense_dma + |
| 1337 | ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); | ||
| 1267 | } | 1338 | } |
| 1268 | 1339 | ||
| 1269 | /** | 1340 | /** |
| @@ -1697,6 +1768,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
| 1697 | } | 1768 | } |
| 1698 | 1769 | ||
| 1699 | if (ioc->facts.IOCCapabilities & | 1770 | if (ioc->facts.IOCCapabilities & |
| 1771 | MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { | ||
| 1772 | printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : ""); | ||
| 1773 | i++; | ||
| 1774 | } | ||
| 1775 | |||
| 1776 | if (ioc->facts.IOCCapabilities & | ||
| 1700 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { | 1777 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { |
| 1701 | printk("%sTask Set Full", i ? "," : ""); | 1778 | printk("%sTask Set Full", i ? "," : ""); |
| 1702 | i++; | 1779 | i++; |
| @@ -2871,6 +2948,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 2871 | Mpi2IOCInitRequest_t mpi_request; | 2948 | Mpi2IOCInitRequest_t mpi_request; |
| 2872 | Mpi2IOCInitReply_t mpi_reply; | 2949 | Mpi2IOCInitReply_t mpi_reply; |
| 2873 | int r; | 2950 | int r; |
| 2951 | struct timeval current_time; | ||
| 2952 | u16 ioc_status; | ||
| 2874 | 2953 | ||
| 2875 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, | 2954 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
| 2876 | __func__)); | 2955 | __func__)); |
| @@ -2921,6 +3000,13 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 2921 | cpu_to_le32(ioc->reply_post_free_dma); | 3000 | cpu_to_le32(ioc->reply_post_free_dma); |
| 2922 | #endif | 3001 | #endif |
| 2923 | 3002 | ||
| 3003 | /* This time stamp specifies number of milliseconds | ||
| 3004 | * since epoch ~ midnight January 1, 1970. | ||
| 3005 | */ | ||
| 3006 | do_gettimeofday(¤t_time); | ||
| 3007 | mpi_request.TimeStamp = (current_time.tv_sec * 1000) + | ||
| 3008 | (current_time.tv_usec >> 3); | ||
| 3009 | |||
| 2924 | if (ioc->logging_level & MPT_DEBUG_INIT) { | 3010 | if (ioc->logging_level & MPT_DEBUG_INIT) { |
| 2925 | u32 *mfp; | 3011 | u32 *mfp; |
| 2926 | int i; | 3012 | int i; |
| @@ -2943,7 +3029,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 2943 | return r; | 3029 | return r; |
| 2944 | } | 3030 | } |
| 2945 | 3031 | ||
| 2946 | if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS || | 3032 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
| 3033 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS || | ||
| 2947 | mpi_reply.IOCLogInfo) { | 3034 | mpi_reply.IOCLogInfo) { |
| 2948 | printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); | 3035 | printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); |
| 2949 | r = -EIO; | 3036 | r = -EIO; |
| @@ -3461,11 +3548,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 3461 | return r; | 3548 | return r; |
| 3462 | 3549 | ||
| 3463 | pci_set_drvdata(ioc->pdev, ioc->shost); | 3550 | pci_set_drvdata(ioc->pdev, ioc->shost); |
| 3464 | r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); | 3551 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); |
| 3465 | if (r) | 3552 | if (r) |
| 3466 | goto out_free_resources; | 3553 | goto out_free_resources; |
| 3467 | 3554 | ||
| 3468 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | 3555 | r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); |
| 3469 | if (r) | 3556 | if (r) |
| 3470 | goto out_free_resources; | 3557 | goto out_free_resources; |
| 3471 | 3558 | ||
| @@ -3531,6 +3618,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 3531 | goto out_free_resources; | 3618 | goto out_free_resources; |
| 3532 | 3619 | ||
| 3533 | mpt2sas_base_start_watchdog(ioc); | 3620 | mpt2sas_base_start_watchdog(ioc); |
| 3621 | if (diag_buffer_enable != 0) | ||
| 3622 | mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); | ||
| 3534 | return 0; | 3623 | return 0; |
| 3535 | 3624 | ||
| 3536 | out_free_resources: | 3625 | out_free_resources: |
| @@ -3684,6 +3773,9 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, | |||
| 3684 | dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, | 3773 | dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, |
| 3685 | __func__)); | 3774 | __func__)); |
| 3686 | 3775 | ||
| 3776 | if (mpt2sas_fwfault_debug) | ||
| 3777 | mpt2sas_halt_firmware(ioc); | ||
| 3778 | |||
| 3687 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 3779 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
| 3688 | if (ioc->shost_recovery) { | 3780 | if (ioc->shost_recovery) { |
| 3689 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 3781 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 0cf6bc236e4d..bb4f14656afa 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
| @@ -69,8 +69,8 @@ | |||
| 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
| 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
| 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
| 72 | #define MPT2SAS_DRIVER_VERSION "02.100.03.00" | 72 | #define MPT2SAS_DRIVER_VERSION "03.100.03.00" |
| 73 | #define MPT2SAS_MAJOR_VERSION 02 | 73 | #define MPT2SAS_MAJOR_VERSION 03 |
| 74 | #define MPT2SAS_MINOR_VERSION 100 | 74 | #define MPT2SAS_MINOR_VERSION 100 |
| 75 | #define MPT2SAS_BUILD_VERSION 03 | 75 | #define MPT2SAS_BUILD_VERSION 03 |
| 76 | #define MPT2SAS_RELEASE_VERSION 00 | 76 | #define MPT2SAS_RELEASE_VERSION 00 |
| @@ -278,7 +278,7 @@ struct _internal_cmd { | |||
| 278 | * @sas_address: device sas address | 278 | * @sas_address: device sas address |
| 279 | * @device_name: retrieved from the SAS IDENTIFY frame. | 279 | * @device_name: retrieved from the SAS IDENTIFY frame. |
| 280 | * @handle: device handle | 280 | * @handle: device handle |
| 281 | * @parent_handle: handle to parent device | 281 | * @sas_address_parent: sas address of parent expander or sas host |
| 282 | * @enclosure_handle: enclosure handle | 282 | * @enclosure_handle: enclosure handle |
| 283 | * @enclosure_logical_id: enclosure logical identifier | 283 | * @enclosure_logical_id: enclosure logical identifier |
| 284 | * @volume_handle: volume handle (valid when hidden raid member) | 284 | * @volume_handle: volume handle (valid when hidden raid member) |
| @@ -296,7 +296,7 @@ struct _sas_device { | |||
| 296 | u64 sas_address; | 296 | u64 sas_address; |
| 297 | u64 device_name; | 297 | u64 device_name; |
| 298 | u16 handle; | 298 | u16 handle; |
| 299 | u16 parent_handle; | 299 | u64 sas_address_parent; |
| 300 | u16 enclosure_handle; | 300 | u16 enclosure_handle; |
| 301 | u64 enclosure_logical_id; | 301 | u64 enclosure_logical_id; |
| 302 | u16 volume_handle; | 302 | u16 volume_handle; |
| @@ -352,8 +352,6 @@ struct _boot_device { | |||
| 352 | /** | 352 | /** |
| 353 | * struct _sas_port - wide/narrow sas port information | 353 | * struct _sas_port - wide/narrow sas port information |
| 354 | * @port_list: list of ports belonging to expander | 354 | * @port_list: list of ports belonging to expander |
| 355 | * @handle: device handle for this port | ||
| 356 | * @sas_address: sas address of this port | ||
| 357 | * @num_phys: number of phys belonging to this port | 355 | * @num_phys: number of phys belonging to this port |
| 358 | * @remote_identify: attached device identification | 356 | * @remote_identify: attached device identification |
| 359 | * @rphy: sas transport rphy object | 357 | * @rphy: sas transport rphy object |
| @@ -362,8 +360,6 @@ struct _boot_device { | |||
| 362 | */ | 360 | */ |
| 363 | struct _sas_port { | 361 | struct _sas_port { |
| 364 | struct list_head port_list; | 362 | struct list_head port_list; |
| 365 | u16 handle; | ||
| 366 | u64 sas_address; | ||
| 367 | u8 num_phys; | 363 | u8 num_phys; |
| 368 | struct sas_identify remote_identify; | 364 | struct sas_identify remote_identify; |
| 369 | struct sas_rphy *rphy; | 365 | struct sas_rphy *rphy; |
| @@ -398,7 +394,7 @@ struct _sas_phy { | |||
| 398 | * @num_phys: number phys belonging to this sas_host/expander | 394 | * @num_phys: number phys belonging to this sas_host/expander |
| 399 | * @sas_address: sas address of this sas_host/expander | 395 | * @sas_address: sas address of this sas_host/expander |
| 400 | * @handle: handle for this sas_host/expander | 396 | * @handle: handle for this sas_host/expander |
| 401 | * @parent_handle: parent handle | 397 | * @sas_address_parent: sas address of parent expander or sas host |
| 402 | * @enclosure_handle: handle for this a member of an enclosure | 398 | * @enclosure_handle: handle for this a member of an enclosure |
| 403 | * @device_info: bitwise defining capabilities of this sas_host/expander | 399 | * @device_info: bitwise defining capabilities of this sas_host/expander |
| 404 | * @responding: used in _scsih_expander_device_mark_responding | 400 | * @responding: used in _scsih_expander_device_mark_responding |
| @@ -411,7 +407,7 @@ struct _sas_node { | |||
| 411 | u8 num_phys; | 407 | u8 num_phys; |
| 412 | u64 sas_address; | 408 | u64 sas_address; |
| 413 | u16 handle; | 409 | u16 handle; |
| 414 | u16 parent_handle; | 410 | u64 sas_address_parent; |
| 415 | u16 enclosure_handle; | 411 | u16 enclosure_handle; |
| 416 | u64 enclosure_logical_id; | 412 | u64 enclosure_logical_id; |
| 417 | u8 responding; | 413 | u8 responding; |
| @@ -470,6 +466,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); | |||
| 470 | * @chip_phys: physical addrss prior to mapping | 466 | * @chip_phys: physical addrss prior to mapping |
| 471 | * @pio_chip: I/O mapped register space | 467 | * @pio_chip: I/O mapped register space |
| 472 | * @logging_level: see mpt2sas_debug.h | 468 | * @logging_level: see mpt2sas_debug.h |
| 469 | * @fwfault_debug: debuging FW timeouts | ||
| 473 | * @ir_firmware: IR firmware present | 470 | * @ir_firmware: IR firmware present |
| 474 | * @bars: bitmask of BAR's that must be configured | 471 | * @bars: bitmask of BAR's that must be configured |
| 475 | * @mask_interrupts: ignore interrupt | 472 | * @mask_interrupts: ignore interrupt |
| @@ -495,12 +492,14 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); | |||
| 495 | * @msix_table_backup: backup msix table | 492 | * @msix_table_backup: backup msix table |
| 496 | * @scsi_io_cb_idx: shost generated commands | 493 | * @scsi_io_cb_idx: shost generated commands |
| 497 | * @tm_cb_idx: task management commands | 494 | * @tm_cb_idx: task management commands |
| 495 | * @scsih_cb_idx: scsih internal commands | ||
| 498 | * @transport_cb_idx: transport internal commands | 496 | * @transport_cb_idx: transport internal commands |
| 499 | * @ctl_cb_idx: clt internal commands | 497 | * @ctl_cb_idx: clt internal commands |
| 500 | * @base_cb_idx: base internal commands | 498 | * @base_cb_idx: base internal commands |
| 501 | * @config_cb_idx: base internal commands | 499 | * @config_cb_idx: base internal commands |
| 502 | * @base_cmds: | 500 | * @base_cmds: |
| 503 | * @transport_cmds: | 501 | * @transport_cmds: |
| 502 | * @scsih_cmds: | ||
| 504 | * @tm_cmds: | 503 | * @tm_cmds: |
| 505 | * @ctl_cmds: | 504 | * @ctl_cmds: |
| 506 | * @config_cmds: | 505 | * @config_cmds: |
| @@ -591,6 +590,7 @@ struct MPT2SAS_ADAPTER { | |||
| 591 | unsigned long chip_phys; | 590 | unsigned long chip_phys; |
| 592 | unsigned long pio_chip; | 591 | unsigned long pio_chip; |
| 593 | int logging_level; | 592 | int logging_level; |
| 593 | int fwfault_debug; | ||
| 594 | u8 ir_firmware; | 594 | u8 ir_firmware; |
| 595 | int bars; | 595 | int bars; |
| 596 | u8 mask_interrupts; | 596 | u8 mask_interrupts; |
| @@ -626,6 +626,7 @@ struct MPT2SAS_ADAPTER { | |||
| 626 | u8 scsi_io_cb_idx; | 626 | u8 scsi_io_cb_idx; |
| 627 | u8 tm_cb_idx; | 627 | u8 tm_cb_idx; |
| 628 | u8 transport_cb_idx; | 628 | u8 transport_cb_idx; |
| 629 | u8 scsih_cb_idx; | ||
| 629 | u8 ctl_cb_idx; | 630 | u8 ctl_cb_idx; |
| 630 | u8 base_cb_idx; | 631 | u8 base_cb_idx; |
| 631 | u8 config_cb_idx; | 632 | u8 config_cb_idx; |
| @@ -633,6 +634,7 @@ struct MPT2SAS_ADAPTER { | |||
| 633 | u8 tm_sas_control_cb_idx; | 634 | u8 tm_sas_control_cb_idx; |
| 634 | struct _internal_cmd base_cmds; | 635 | struct _internal_cmd base_cmds; |
| 635 | struct _internal_cmd transport_cmds; | 636 | struct _internal_cmd transport_cmds; |
| 637 | struct _internal_cmd scsih_cmds; | ||
| 636 | struct _internal_cmd tm_cmds; | 638 | struct _internal_cmd tm_cmds; |
| 637 | struct _internal_cmd ctl_cmds; | 639 | struct _internal_cmd ctl_cmds; |
| 638 | struct _internal_cmd config_cmds; | 640 | struct _internal_cmd config_cmds; |
| @@ -773,7 +775,7 @@ int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, | |||
| 773 | void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid); | 775 | void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid); |
| 774 | void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); | 776 | void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); |
| 775 | void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); | 777 | void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); |
| 776 | dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, | 778 | __le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, |
| 777 | u16 smid); | 779 | u16 smid); |
| 778 | 780 | ||
| 779 | /* hi-priority queue */ | 781 | /* hi-priority queue */ |
| @@ -807,6 +809,8 @@ int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, | |||
| 807 | Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); | 809 | Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); |
| 808 | void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type); | 810 | void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type); |
| 809 | 811 | ||
| 812 | void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); | ||
| 813 | |||
| 810 | /* scsih shared API */ | 814 | /* scsih shared API */ |
| 811 | u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | 815 | u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, |
| 812 | u32 reply); | 816 | u32 reply); |
| @@ -886,19 +890,22 @@ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
| 886 | void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, | 890 | void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, |
| 887 | Mpi2EventNotificationReply_t *mpi_reply); | 891 | Mpi2EventNotificationReply_t *mpi_reply); |
| 888 | 892 | ||
| 893 | void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, | ||
| 894 | u8 bits_to_regsiter); | ||
| 895 | |||
| 889 | /* transport shared API */ | 896 | /* transport shared API */ |
| 890 | u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 897 | u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
| 891 | u32 reply); | 898 | u32 reply); |
| 892 | struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, | 899 | struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, |
| 893 | u16 handle, u16 parent_handle); | 900 | u16 handle, u64 sas_address); |
| 894 | void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, | 901 | void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, |
| 895 | u16 parent_handle); | 902 | u64 sas_address_parent); |
| 896 | int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy | 903 | int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy |
| 897 | *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); | 904 | *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); |
| 898 | int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy | 905 | int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy |
| 899 | *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); | 906 | *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); |
| 900 | void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, | 907 | void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, |
| 901 | u16 attached_handle, u8 phy_number, u8 link_rate); | 908 | u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); |
| 902 | extern struct sas_function_template mpt2sas_transport_functions; | 909 | extern struct sas_function_template mpt2sas_transport_functions; |
| 903 | extern struct scsi_transport_template *mpt2sas_transport_template; | 910 | extern struct scsi_transport_template *mpt2sas_transport_template; |
| 904 | extern int scsi_internal_device_block(struct scsi_device *sdev); | 911 | extern int scsi_internal_device_block(struct scsi_device *sdev); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 57d724633906..84a124f8e21f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
| @@ -740,7 +740,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, | |||
| 740 | Mpi2SCSIIORequest_t *scsiio_request = | 740 | Mpi2SCSIIORequest_t *scsiio_request = |
| 741 | (Mpi2SCSIIORequest_t *)mpi_request; | 741 | (Mpi2SCSIIORequest_t *)mpi_request; |
| 742 | scsiio_request->SenseBufferLowAddress = | 742 | scsiio_request->SenseBufferLowAddress = |
| 743 | (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); | 743 | mpt2sas_base_get_sense_buffer_dma(ioc, smid); |
| 744 | priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); | 744 | priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); |
| 745 | memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); | 745 | memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); |
| 746 | mpt2sas_base_put_smid_scsi_io(ioc, smid, | 746 | mpt2sas_base_put_smid_scsi_io(ioc, smid, |
| @@ -848,8 +848,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, | |||
| 848 | printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " | 848 | printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " |
| 849 | "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " | 849 | "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " |
| 850 | "TerminationCount(0x%08x)\n", ioc->name, | 850 | "TerminationCount(0x%08x)\n", ioc->name, |
| 851 | tm_reply->IOCStatus, tm_reply->IOCLogInfo, | 851 | le16_to_cpu(tm_reply->IOCStatus), |
| 852 | tm_reply->TerminationCount); | 852 | le32_to_cpu(tm_reply->IOCLogInfo), |
| 853 | le32_to_cpu(tm_reply->TerminationCount)); | ||
| 853 | } | 854 | } |
| 854 | #endif | 855 | #endif |
| 855 | /* copy out xdata to user */ | 856 | /* copy out xdata to user */ |
| @@ -896,6 +897,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, | |||
| 896 | printk(MPT2SAS_INFO_FMT "issue target reset: handle " | 897 | printk(MPT2SAS_INFO_FMT "issue target reset: handle " |
| 897 | "= (0x%04x)\n", ioc->name, | 898 | "= (0x%04x)\n", ioc->name, |
| 898 | mpi_request->FunctionDependent1); | 899 | mpi_request->FunctionDependent1); |
| 900 | mpt2sas_halt_firmware(ioc); | ||
| 899 | mutex_lock(&ioc->tm_cmds.mutex); | 901 | mutex_lock(&ioc->tm_cmds.mutex); |
| 900 | mpt2sas_scsih_issue_tm(ioc, | 902 | mpt2sas_scsih_issue_tm(ioc, |
| 901 | mpi_request->FunctionDependent1, 0, | 903 | mpi_request->FunctionDependent1, 0, |
| @@ -1229,7 +1231,7 @@ _ctl_btdh_mapping(void __user *arg) | |||
| 1229 | /** | 1231 | /** |
| 1230 | * _ctl_diag_capability - return diag buffer capability | 1232 | * _ctl_diag_capability - return diag buffer capability |
| 1231 | * @ioc: per adapter object | 1233 | * @ioc: per adapter object |
| 1232 | * @buffer_type: specifies either TRACE or SNAPSHOT | 1234 | * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED |
| 1233 | * | 1235 | * |
| 1234 | * returns 1 when diag buffer support is enabled in firmware | 1236 | * returns 1 when diag buffer support is enabled in firmware |
| 1235 | */ | 1237 | */ |
| @@ -1249,24 +1251,25 @@ _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type) | |||
| 1249 | MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) | 1251 | MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) |
| 1250 | rc = 1; | 1252 | rc = 1; |
| 1251 | break; | 1253 | break; |
| 1254 | case MPI2_DIAG_BUF_TYPE_EXTENDED: | ||
| 1255 | if (ioc->facts.IOCCapabilities & | ||
| 1256 | MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) | ||
| 1257 | rc = 1; | ||
| 1252 | } | 1258 | } |
| 1253 | 1259 | ||
| 1254 | return rc; | 1260 | return rc; |
| 1255 | } | 1261 | } |
| 1256 | 1262 | ||
| 1257 | /** | 1263 | /** |
| 1258 | * _ctl_diag_register - application register with driver | 1264 | * _ctl_diag_register_2 - wrapper for registering diag buffer support |
| 1259 | * @arg - user space buffer containing ioctl content | 1265 | * @ioc: per adapter object |
| 1260 | * @state - NON_BLOCKING or BLOCKING | 1266 | * @diag_register: the diag_register struct passed in from user space |
| 1261 | * | 1267 | * |
| 1262 | * This will allow the driver to setup any required buffers that will be | ||
| 1263 | * needed by firmware to communicate with the driver. | ||
| 1264 | */ | 1268 | */ |
| 1265 | static long | 1269 | static long |
| 1266 | _ctl_diag_register(void __user *arg, enum block_state state) | 1270 | _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc, |
| 1271 | struct mpt2_diag_register *diag_register) | ||
| 1267 | { | 1272 | { |
| 1268 | struct mpt2_diag_register karg; | ||
| 1269 | struct MPT2SAS_ADAPTER *ioc; | ||
| 1270 | int rc, i; | 1273 | int rc, i; |
| 1271 | void *request_data = NULL; | 1274 | void *request_data = NULL; |
| 1272 | dma_addr_t request_data_dma; | 1275 | dma_addr_t request_data_dma; |
| @@ -1279,18 +1282,17 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1279 | u16 ioc_status; | 1282 | u16 ioc_status; |
| 1280 | u8 issue_reset = 0; | 1283 | u8 issue_reset = 0; |
| 1281 | 1284 | ||
| 1282 | if (copy_from_user(&karg, arg, sizeof(karg))) { | ||
| 1283 | printk(KERN_ERR "failure at %s:%d/%s()!\n", | ||
| 1284 | __FILE__, __LINE__, __func__); | ||
| 1285 | return -EFAULT; | ||
| 1286 | } | ||
| 1287 | if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) | ||
| 1288 | return -ENODEV; | ||
| 1289 | |||
| 1290 | dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, | 1285 | dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
| 1291 | __func__)); | 1286 | __func__)); |
| 1292 | 1287 | ||
| 1293 | buffer_type = karg.buffer_type; | 1288 | if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { |
| 1289 | printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", | ||
| 1290 | ioc->name, __func__); | ||
| 1291 | rc = -EAGAIN; | ||
| 1292 | goto out; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | buffer_type = diag_register->buffer_type; | ||
| 1294 | if (!_ctl_diag_capability(ioc, buffer_type)) { | 1296 | if (!_ctl_diag_capability(ioc, buffer_type)) { |
| 1295 | printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " | 1297 | printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " |
| 1296 | "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); | 1298 | "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); |
| @@ -1305,24 +1307,12 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1305 | return -EINVAL; | 1307 | return -EINVAL; |
| 1306 | } | 1308 | } |
| 1307 | 1309 | ||
| 1308 | if (karg.requested_buffer_size % 4) { | 1310 | if (diag_register->requested_buffer_size % 4) { |
| 1309 | printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " | 1311 | printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " |
| 1310 | "is not 4 byte aligned\n", ioc->name, __func__); | 1312 | "is not 4 byte aligned\n", ioc->name, __func__); |
| 1311 | return -EINVAL; | 1313 | return -EINVAL; |
| 1312 | } | 1314 | } |
| 1313 | 1315 | ||
| 1314 | if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) | ||
| 1315 | return -EAGAIN; | ||
| 1316 | else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) | ||
| 1317 | return -ERESTARTSYS; | ||
| 1318 | |||
| 1319 | if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { | ||
| 1320 | printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", | ||
| 1321 | ioc->name, __func__); | ||
| 1322 | rc = -EAGAIN; | ||
| 1323 | goto out; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); | 1316 | smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); |
| 1327 | if (!smid) { | 1317 | if (!smid) { |
| 1328 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", | 1318 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", |
| @@ -1338,12 +1328,12 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1338 | ioc->ctl_cmds.smid = smid; | 1328 | ioc->ctl_cmds.smid = smid; |
| 1339 | 1329 | ||
| 1340 | request_data = ioc->diag_buffer[buffer_type]; | 1330 | request_data = ioc->diag_buffer[buffer_type]; |
| 1341 | request_data_sz = karg.requested_buffer_size; | 1331 | request_data_sz = diag_register->requested_buffer_size; |
| 1342 | ioc->unique_id[buffer_type] = karg.unique_id; | 1332 | ioc->unique_id[buffer_type] = diag_register->unique_id; |
| 1343 | ioc->diag_buffer_status[buffer_type] = 0; | 1333 | ioc->diag_buffer_status[buffer_type] = 0; |
| 1344 | memcpy(ioc->product_specific[buffer_type], karg.product_specific, | 1334 | memcpy(ioc->product_specific[buffer_type], |
| 1345 | MPT2_PRODUCT_SPECIFIC_DWORDS); | 1335 | diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS); |
| 1346 | ioc->diagnostic_flags[buffer_type] = karg.diagnostic_flags; | 1336 | ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; |
| 1347 | 1337 | ||
| 1348 | if (request_data) { | 1338 | if (request_data) { |
| 1349 | request_data_dma = ioc->diag_buffer_dma[buffer_type]; | 1339 | request_data_dma = ioc->diag_buffer_dma[buffer_type]; |
| @@ -1373,8 +1363,8 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1373 | } | 1363 | } |
| 1374 | 1364 | ||
| 1375 | mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; | 1365 | mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; |
| 1376 | mpi_request->BufferType = karg.buffer_type; | 1366 | mpi_request->BufferType = diag_register->buffer_type; |
| 1377 | mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags); | 1367 | mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); |
| 1378 | mpi_request->BufferAddress = cpu_to_le64(request_data_dma); | 1368 | mpi_request->BufferAddress = cpu_to_le64(request_data_dma); |
| 1379 | mpi_request->BufferLength = cpu_to_le32(request_data_sz); | 1369 | mpi_request->BufferLength = cpu_to_le32(request_data_sz); |
| 1380 | mpi_request->VF_ID = 0; /* TODO */ | 1370 | mpi_request->VF_ID = 0; /* TODO */ |
| @@ -1422,7 +1412,7 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1422 | } else { | 1412 | } else { |
| 1423 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " | 1413 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " |
| 1424 | "log_info(0x%08x)\n", ioc->name, __func__, | 1414 | "log_info(0x%08x)\n", ioc->name, __func__, |
| 1425 | ioc_status, mpi_reply->IOCLogInfo); | 1415 | ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); |
| 1426 | rc = -EFAULT; | 1416 | rc = -EFAULT; |
| 1427 | } | 1417 | } |
| 1428 | 1418 | ||
| @@ -1438,6 +1428,83 @@ _ctl_diag_register(void __user *arg, enum block_state state) | |||
| 1438 | request_data, request_data_dma); | 1428 | request_data, request_data_dma); |
| 1439 | 1429 | ||
| 1440 | ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; | 1430 | ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; |
| 1431 | return rc; | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | /** | ||
| 1435 | * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time | ||
| 1436 | * @ioc: per adapter object | ||
| 1437 | * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 | ||
| 1438 | * | ||
| 1439 | * This is called when command line option diag_buffer_enable is enabled | ||
| 1440 | * at driver load time. | ||
| 1441 | */ | ||
| 1442 | void | ||
| 1443 | mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register) | ||
| 1444 | { | ||
| 1445 | struct mpt2_diag_register diag_register; | ||
| 1446 | |||
| 1447 | memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); | ||
| 1448 | |||
| 1449 | if (bits_to_register & 1) { | ||
| 1450 | printk(MPT2SAS_INFO_FMT "registering trace buffer support\n", | ||
| 1451 | ioc->name); | ||
| 1452 | diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; | ||
| 1453 | /* register for 1MB buffers */ | ||
| 1454 | diag_register.requested_buffer_size = (1024 * 1024); | ||
| 1455 | diag_register.unique_id = 0x7075900; | ||
| 1456 | _ctl_diag_register_2(ioc, &diag_register); | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | if (bits_to_register & 2) { | ||
| 1460 | printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n", | ||
| 1461 | ioc->name); | ||
| 1462 | diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; | ||
| 1463 | /* register for 2MB buffers */ | ||
| 1464 | diag_register.requested_buffer_size = 2 * (1024 * 1024); | ||
| 1465 | diag_register.unique_id = 0x7075901; | ||
| 1466 | _ctl_diag_register_2(ioc, &diag_register); | ||
| 1467 | } | ||
| 1468 | |||
| 1469 | if (bits_to_register & 4) { | ||
| 1470 | printk(MPT2SAS_INFO_FMT "registering extended buffer support\n", | ||
| 1471 | ioc->name); | ||
| 1472 | diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; | ||
| 1473 | /* register for 2MB buffers */ | ||
| 1474 | diag_register.requested_buffer_size = 2 * (1024 * 1024); | ||
| 1475 | diag_register.unique_id = 0x7075901; | ||
| 1476 | _ctl_diag_register_2(ioc, &diag_register); | ||
| 1477 | } | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | /** | ||
| 1481 | * _ctl_diag_register - application register with driver | ||
| 1482 | * @arg - user space buffer containing ioctl content | ||
| 1483 | * @state - NON_BLOCKING or BLOCKING | ||
| 1484 | * | ||
| 1485 | * This will allow the driver to setup any required buffers that will be | ||
| 1486 | * needed by firmware to communicate with the driver. | ||
| 1487 | */ | ||
| 1488 | static long | ||
| 1489 | _ctl_diag_register(void __user *arg, enum block_state state) | ||
| 1490 | { | ||
| 1491 | struct mpt2_diag_register karg; | ||
| 1492 | struct MPT2SAS_ADAPTER *ioc; | ||
| 1493 | long rc; | ||
| 1494 | |||
| 1495 | if (copy_from_user(&karg, arg, sizeof(karg))) { | ||
| 1496 | printk(KERN_ERR "failure at %s:%d/%s()!\n", | ||
| 1497 | __FILE__, __LINE__, __func__); | ||
| 1498 | return -EFAULT; | ||
| 1499 | } | ||
| 1500 | if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) | ||
| 1501 | return -ENODEV; | ||
| 1502 | |||
| 1503 | if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) | ||
| 1504 | return -EAGAIN; | ||
| 1505 | else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) | ||
| 1506 | return -ERESTARTSYS; | ||
| 1507 | rc = _ctl_diag_register_2(ioc, &karg); | ||
| 1441 | mutex_unlock(&ioc->ctl_cmds.mutex); | 1508 | mutex_unlock(&ioc->ctl_cmds.mutex); |
| 1442 | return rc; | 1509 | return rc; |
| 1443 | } | 1510 | } |
| @@ -1600,7 +1667,7 @@ _ctl_diag_query(void __user *arg) | |||
| 1600 | /** | 1667 | /** |
| 1601 | * _ctl_send_release - Diag Release Message | 1668 | * _ctl_send_release - Diag Release Message |
| 1602 | * @ioc: per adapter object | 1669 | * @ioc: per adapter object |
| 1603 | * @buffer_type - specifies either TRACE or SNAPSHOT | 1670 | * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED |
| 1604 | * @issue_reset - specifies whether host reset is required. | 1671 | * @issue_reset - specifies whether host reset is required. |
| 1605 | * | 1672 | * |
| 1606 | */ | 1673 | */ |
| @@ -1690,7 +1757,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset) | |||
| 1690 | } else { | 1757 | } else { |
| 1691 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " | 1758 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " |
| 1692 | "log_info(0x%08x)\n", ioc->name, __func__, | 1759 | "log_info(0x%08x)\n", ioc->name, __func__, |
| 1693 | ioc_status, mpi_reply->IOCLogInfo); | 1760 | ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); |
| 1694 | rc = -EFAULT; | 1761 | rc = -EFAULT; |
| 1695 | } | 1762 | } |
| 1696 | 1763 | ||
| @@ -1951,7 +2018,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) | |||
| 1951 | } else { | 2018 | } else { |
| 1952 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " | 2019 | printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " |
| 1953 | "log_info(0x%08x)\n", ioc->name, __func__, | 2020 | "log_info(0x%08x)\n", ioc->name, __func__, |
| 1954 | ioc_status, mpi_reply->IOCLogInfo); | 2021 | ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); |
| 1955 | rc = -EFAULT; | 2022 | rc = -EFAULT; |
| 1956 | } | 2023 | } |
| 1957 | 2024 | ||
| @@ -2474,6 +2541,43 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, | |||
| 2474 | static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, | 2541 | static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, |
| 2475 | _ctl_logging_level_show, _ctl_logging_level_store); | 2542 | _ctl_logging_level_show, _ctl_logging_level_store); |
| 2476 | 2543 | ||
| 2544 | /* device attributes */ | ||
| 2545 | /* | ||
| 2546 | * _ctl_fwfault_debug_show - show/store fwfault_debug | ||
| 2547 | * @cdev - pointer to embedded class device | ||
| 2548 | * @buf - the buffer returned | ||
| 2549 | * | ||
| 2550 | * mpt2sas_fwfault_debug is command line option | ||
| 2551 | * A sysfs 'read/write' shost attribute. | ||
| 2552 | */ | ||
| 2553 | static ssize_t | ||
| 2554 | _ctl_fwfault_debug_show(struct device *cdev, | ||
| 2555 | struct device_attribute *attr, char *buf) | ||
| 2556 | { | ||
| 2557 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 2558 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 2559 | |||
| 2560 | return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); | ||
| 2561 | } | ||
| 2562 | static ssize_t | ||
| 2563 | _ctl_fwfault_debug_store(struct device *cdev, | ||
| 2564 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 2565 | { | ||
| 2566 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 2567 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 2568 | int val = 0; | ||
| 2569 | |||
| 2570 | if (sscanf(buf, "%d", &val) != 1) | ||
| 2571 | return -EINVAL; | ||
| 2572 | |||
| 2573 | ioc->fwfault_debug = val; | ||
| 2574 | printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name, | ||
| 2575 | ioc->fwfault_debug); | ||
| 2576 | return strlen(buf); | ||
| 2577 | } | ||
| 2578 | static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, | ||
| 2579 | _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); | ||
| 2580 | |||
| 2477 | struct device_attribute *mpt2sas_host_attrs[] = { | 2581 | struct device_attribute *mpt2sas_host_attrs[] = { |
| 2478 | &dev_attr_version_fw, | 2582 | &dev_attr_version_fw, |
| 2479 | &dev_attr_version_bios, | 2583 | &dev_attr_version_bios, |
| @@ -2487,13 +2591,12 @@ struct device_attribute *mpt2sas_host_attrs[] = { | |||
| 2487 | &dev_attr_io_delay, | 2591 | &dev_attr_io_delay, |
| 2488 | &dev_attr_device_delay, | 2592 | &dev_attr_device_delay, |
| 2489 | &dev_attr_logging_level, | 2593 | &dev_attr_logging_level, |
| 2594 | &dev_attr_fwfault_debug, | ||
| 2490 | &dev_attr_fw_queue_depth, | 2595 | &dev_attr_fw_queue_depth, |
| 2491 | &dev_attr_host_sas_address, | 2596 | &dev_attr_host_sas_address, |
| 2492 | NULL, | 2597 | NULL, |
| 2493 | }; | 2598 | }; |
| 2494 | 2599 | ||
| 2495 | /* device attributes */ | ||
| 2496 | |||
| 2497 | /** | 2600 | /** |
| 2498 | * _ctl_device_sas_address_show - sas address | 2601 | * _ctl_device_sas_address_show - sas address |
| 2499 | * @cdev - pointer to embedded class device | 2602 | * @cdev - pointer to embedded class device |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 211f296dd191..8a5eeb1a5c84 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h | |||
| @@ -313,7 +313,7 @@ struct mpt2_ioctl_btdh_mapping { | |||
| 313 | * struct mpt2_diag_register - application register with driver | 313 | * struct mpt2_diag_register - application register with driver |
| 314 | * @hdr - generic header | 314 | * @hdr - generic header |
| 315 | * @reserved - | 315 | * @reserved - |
| 316 | * @buffer_type - specifies either TRACE or SNAPSHOT | 316 | * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED |
| 317 | * @application_flags - misc flags | 317 | * @application_flags - misc flags |
| 318 | * @diagnostic_flags - specifies flags affecting command processing | 318 | * @diagnostic_flags - specifies flags affecting command processing |
| 319 | * @product_specific - product specific information | 319 | * @product_specific - product specific information |
| @@ -352,7 +352,7 @@ struct mpt2_diag_unregister { | |||
| 352 | * struct mpt2_diag_query - query relevant info associated with diag buffers | 352 | * struct mpt2_diag_query - query relevant info associated with diag buffers |
| 353 | * @hdr - generic header | 353 | * @hdr - generic header |
| 354 | * @reserved - | 354 | * @reserved - |
| 355 | * @buffer_type - specifies either TRACE or SNAPSHOT | 355 | * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED |
| 356 | * @application_flags - misc flags | 356 | * @application_flags - misc flags |
| 357 | * @diagnostic_flags - specifies flags affecting command processing | 357 | * @diagnostic_flags - specifies flags affecting command processing |
| 358 | * @product_specific - product specific information | 358 | * @product_specific - product specific information |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 86ab32d7ab15..efabea1a3ce4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
| @@ -76,6 +76,7 @@ static u8 tm_cb_idx = -1; | |||
| 76 | static u8 ctl_cb_idx = -1; | 76 | static u8 ctl_cb_idx = -1; |
| 77 | static u8 base_cb_idx = -1; | 77 | static u8 base_cb_idx = -1; |
| 78 | static u8 transport_cb_idx = -1; | 78 | static u8 transport_cb_idx = -1; |
| 79 | static u8 scsih_cb_idx = -1; | ||
| 79 | static u8 config_cb_idx = -1; | 80 | static u8 config_cb_idx = -1; |
| 80 | static int mpt_ids; | 81 | static int mpt_ids; |
| 81 | 82 | ||
| @@ -196,10 +197,28 @@ static struct pci_device_id scsih_pci_table[] = { | |||
| 196 | PCI_ANY_ID, PCI_ANY_ID }, | 197 | PCI_ANY_ID, PCI_ANY_ID }, |
| 197 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, | 198 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, |
| 198 | PCI_ANY_ID, PCI_ANY_ID }, | 199 | PCI_ANY_ID, PCI_ANY_ID }, |
| 200 | /* Meteor ~ 2116 */ | ||
| 199 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, | 201 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, |
| 200 | PCI_ANY_ID, PCI_ANY_ID }, | 202 | PCI_ANY_ID, PCI_ANY_ID }, |
| 201 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, | 203 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, |
| 202 | PCI_ANY_ID, PCI_ANY_ID }, | 204 | PCI_ANY_ID, PCI_ANY_ID }, |
| 205 | /* Thunderbolt ~ 2208 */ | ||
| 206 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, | ||
| 207 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 208 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, | ||
| 209 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 210 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, | ||
| 211 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 212 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, | ||
| 213 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 214 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, | ||
| 215 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 216 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, | ||
| 217 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 218 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7, | ||
| 219 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 220 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8, | ||
| 221 | PCI_ANY_ID, PCI_ANY_ID }, | ||
| 203 | {0} /* Terminating entry */ | 222 | {0} /* Terminating entry */ |
| 204 | }; | 223 | }; |
| 205 | MODULE_DEVICE_TABLE(pci, scsih_pci_table); | 224 | MODULE_DEVICE_TABLE(pci, scsih_pci_table); |
| @@ -317,6 +336,47 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name, | |||
| 317 | } | 336 | } |
| 318 | 337 | ||
| 319 | /** | 338 | /** |
| 339 | * _scsih_get_sas_address - set the sas_address for given device handle | ||
| 340 | * @handle: device handle | ||
| 341 | * @sas_address: sas address | ||
| 342 | * | ||
| 343 | * Returns 0 success, non-zero when failure | ||
| 344 | */ | ||
| 345 | static int | ||
| 346 | _scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle, | ||
| 347 | u64 *sas_address) | ||
| 348 | { | ||
| 349 | Mpi2SasDevicePage0_t sas_device_pg0; | ||
| 350 | Mpi2ConfigReply_t mpi_reply; | ||
| 351 | u32 ioc_status; | ||
| 352 | |||
| 353 | if (handle <= ioc->sas_hba.num_phys) { | ||
| 354 | *sas_address = ioc->sas_hba.sas_address; | ||
| 355 | return 0; | ||
| 356 | } else | ||
| 357 | *sas_address = 0; | ||
| 358 | |||
| 359 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, | ||
| 360 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { | ||
| 361 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 362 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 363 | return -ENXIO; | ||
| 364 | } | ||
| 365 | |||
| 366 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
| 367 | MPI2_IOCSTATUS_MASK; | ||
| 368 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
| 369 | printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" | ||
| 370 | "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, | ||
| 371 | __FILE__, __LINE__, __func__); | ||
| 372 | return -EIO; | ||
| 373 | } | ||
| 374 | |||
| 375 | *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); | ||
| 376 | return 0; | ||
| 377 | } | ||
| 378 | |||
| 379 | /** | ||
| 320 | * _scsih_determine_boot_device - determine boot device. | 380 | * _scsih_determine_boot_device - determine boot device. |
| 321 | * @ioc: per adapter object | 381 | * @ioc: per adapter object |
| 322 | * @device: either sas_device or raid_device object | 382 | * @device: either sas_device or raid_device object |
| @@ -510,8 +570,6 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 510 | struct _sas_device *sas_device) | 570 | struct _sas_device *sas_device) |
| 511 | { | 571 | { |
| 512 | unsigned long flags; | 572 | unsigned long flags; |
| 513 | u16 handle, parent_handle; | ||
| 514 | u64 sas_address; | ||
| 515 | 573 | ||
| 516 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" | 574 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" |
| 517 | "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, | 575 | "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, |
| @@ -521,10 +579,8 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 521 | list_add_tail(&sas_device->list, &ioc->sas_device_list); | 579 | list_add_tail(&sas_device->list, &ioc->sas_device_list); |
| 522 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 580 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 523 | 581 | ||
| 524 | handle = sas_device->handle; | 582 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, |
| 525 | parent_handle = sas_device->parent_handle; | 583 | sas_device->sas_address_parent)) |
| 526 | sas_address = sas_device->sas_address; | ||
| 527 | if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) | ||
| 528 | _scsih_sas_device_remove(ioc, sas_device); | 584 | _scsih_sas_device_remove(ioc, sas_device); |
| 529 | } | 585 | } |
| 530 | 586 | ||
| @@ -553,31 +609,6 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 553 | } | 609 | } |
| 554 | 610 | ||
| 555 | /** | 611 | /** |
| 556 | * mpt2sas_scsih_expander_find_by_handle - expander device search | ||
| 557 | * @ioc: per adapter object | ||
| 558 | * @handle: expander handle (assigned by firmware) | ||
| 559 | * Context: Calling function should acquire ioc->sas_device_lock | ||
| 560 | * | ||
| 561 | * This searches for expander device based on handle, then returns the | ||
| 562 | * sas_node object. | ||
| 563 | */ | ||
| 564 | struct _sas_node * | ||
| 565 | mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) | ||
| 566 | { | ||
| 567 | struct _sas_node *sas_expander, *r; | ||
| 568 | |||
| 569 | r = NULL; | ||
| 570 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { | ||
| 571 | if (sas_expander->handle != handle) | ||
| 572 | continue; | ||
| 573 | r = sas_expander; | ||
| 574 | goto out; | ||
| 575 | } | ||
| 576 | out: | ||
| 577 | return r; | ||
| 578 | } | ||
| 579 | |||
| 580 | /** | ||
| 581 | * _scsih_raid_device_find_by_id - raid device search | 612 | * _scsih_raid_device_find_by_id - raid device search |
| 582 | * @ioc: per adapter object | 613 | * @ioc: per adapter object |
| 583 | * @id: sas device target id | 614 | * @id: sas device target id |
| @@ -699,6 +730,31 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc, | |||
| 699 | } | 730 | } |
| 700 | 731 | ||
| 701 | /** | 732 | /** |
| 733 | * mpt2sas_scsih_expander_find_by_handle - expander device search | ||
| 734 | * @ioc: per adapter object | ||
| 735 | * @handle: expander handle (assigned by firmware) | ||
| 736 | * Context: Calling function should acquire ioc->sas_device_lock | ||
| 737 | * | ||
| 738 | * This searches for expander device based on handle, then returns the | ||
| 739 | * sas_node object. | ||
| 740 | */ | ||
| 741 | struct _sas_node * | ||
| 742 | mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) | ||
| 743 | { | ||
| 744 | struct _sas_node *sas_expander, *r; | ||
| 745 | |||
| 746 | r = NULL; | ||
| 747 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { | ||
| 748 | if (sas_expander->handle != handle) | ||
| 749 | continue; | ||
| 750 | r = sas_expander; | ||
| 751 | goto out; | ||
| 752 | } | ||
| 753 | out: | ||
| 754 | return r; | ||
| 755 | } | ||
| 756 | |||
| 757 | /** | ||
| 702 | * mpt2sas_scsih_expander_find_by_sas_address - expander device search | 758 | * mpt2sas_scsih_expander_find_by_sas_address - expander device search |
| 703 | * @ioc: per adapter object | 759 | * @ioc: per adapter object |
| 704 | * @sas_address: sas address | 760 | * @sas_address: sas address |
| @@ -1043,17 +1099,46 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, | |||
| 1043 | * _scsih_change_queue_depth - setting device queue depth | 1099 | * _scsih_change_queue_depth - setting device queue depth |
| 1044 | * @sdev: scsi device struct | 1100 | * @sdev: scsi device struct |
| 1045 | * @qdepth: requested queue depth | 1101 | * @qdepth: requested queue depth |
| 1102 | * @reason: calling context | ||
| 1046 | * | 1103 | * |
| 1047 | * Returns queue depth. | 1104 | * Returns queue depth. |
| 1048 | */ | 1105 | */ |
| 1049 | static int | 1106 | static int |
| 1050 | _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) | 1107 | _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) |
| 1051 | { | 1108 | { |
| 1052 | struct Scsi_Host *shost = sdev->host; | 1109 | struct Scsi_Host *shost = sdev->host; |
| 1053 | int max_depth; | 1110 | int max_depth; |
| 1054 | int tag_type; | 1111 | int tag_type; |
| 1112 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 1113 | struct MPT2SAS_DEVICE *sas_device_priv_data; | ||
| 1114 | struct MPT2SAS_TARGET *sas_target_priv_data; | ||
| 1115 | struct _sas_device *sas_device; | ||
| 1116 | unsigned long flags; | ||
| 1117 | |||
| 1118 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 1119 | return -EOPNOTSUPP; | ||
| 1055 | 1120 | ||
| 1056 | max_depth = shost->can_queue; | 1121 | max_depth = shost->can_queue; |
| 1122 | |||
| 1123 | /* limit max device queue for SATA to 32 */ | ||
| 1124 | sas_device_priv_data = sdev->hostdata; | ||
| 1125 | if (!sas_device_priv_data) | ||
| 1126 | goto not_sata; | ||
| 1127 | sas_target_priv_data = sas_device_priv_data->sas_target; | ||
| 1128 | if (!sas_target_priv_data) | ||
| 1129 | goto not_sata; | ||
| 1130 | if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) | ||
| 1131 | goto not_sata; | ||
| 1132 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 1133 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
| 1134 | sas_device_priv_data->sas_target->sas_address); | ||
| 1135 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 1136 | if (sas_device && sas_device->device_info & | ||
| 1137 | MPI2_SAS_DEVICE_INFO_SATA_DEVICE) | ||
| 1138 | max_depth = MPT2SAS_SATA_QUEUE_DEPTH; | ||
| 1139 | |||
| 1140 | not_sata: | ||
| 1141 | |||
| 1057 | if (!sdev->tagged_supported) | 1142 | if (!sdev->tagged_supported) |
| 1058 | max_depth = 1; | 1143 | max_depth = 1; |
| 1059 | if (qdepth > max_depth) | 1144 | if (qdepth > max_depth) |
| @@ -1488,7 +1573,7 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
| 1488 | r_level, raid_device->handle, | 1573 | r_level, raid_device->handle, |
| 1489 | (unsigned long long)raid_device->wwid, | 1574 | (unsigned long long)raid_device->wwid, |
| 1490 | raid_device->num_pds, ds); | 1575 | raid_device->num_pds, ds); |
| 1491 | _scsih_change_queue_depth(sdev, qdepth); | 1576 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); |
| 1492 | return 0; | 1577 | return 0; |
| 1493 | } | 1578 | } |
| 1494 | 1579 | ||
| @@ -1534,7 +1619,7 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
| 1534 | _scsih_display_sata_capabilities(ioc, sas_device, sdev); | 1619 | _scsih_display_sata_capabilities(ioc, sas_device, sdev); |
| 1535 | } | 1620 | } |
| 1536 | 1621 | ||
| 1537 | _scsih_change_queue_depth(sdev, qdepth); | 1622 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); |
| 1538 | 1623 | ||
| 1539 | if (ssp_target) | 1624 | if (ssp_target) |
| 1540 | sas_read_port_mode_page(sdev); | 1625 | sas_read_port_mode_page(sdev); |
| @@ -1874,6 +1959,8 @@ _scsih_abort(struct scsi_cmnd *scmd) | |||
| 1874 | goto out; | 1959 | goto out; |
| 1875 | } | 1960 | } |
| 1876 | 1961 | ||
| 1962 | mpt2sas_halt_firmware(ioc); | ||
| 1963 | |||
| 1877 | mutex_lock(&ioc->tm_cmds.mutex); | 1964 | mutex_lock(&ioc->tm_cmds.mutex); |
| 1878 | handle = sas_device_priv_data->sas_target->handle; | 1965 | handle = sas_device_priv_data->sas_target->handle; |
| 1879 | mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun, | 1966 | mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun, |
| @@ -2297,7 +2384,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, | |||
| 2297 | u16 handle; | 2384 | u16 handle; |
| 2298 | u16 reason_code; | 2385 | u16 reason_code; |
| 2299 | u8 phy_number; | 2386 | u8 phy_number; |
| 2300 | u8 link_rate; | ||
| 2301 | 2387 | ||
| 2302 | for (i = 0; i < event_data->NumEntries; i++) { | 2388 | for (i = 0; i < event_data->NumEntries; i++) { |
| 2303 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); | 2389 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
| @@ -2308,11 +2394,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, | |||
| 2308 | MPI2_EVENT_SAS_TOPO_RC_MASK; | 2394 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
| 2309 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) | 2395 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) |
| 2310 | _scsih_block_io_device(ioc, handle); | 2396 | _scsih_block_io_device(ioc, handle); |
| 2311 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { | ||
| 2312 | link_rate = event_data->PHY[i].LinkRate >> 4; | ||
| 2313 | if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) | ||
| 2314 | _scsih_ublock_io_device(ioc, handle); | ||
| 2315 | } | ||
| 2316 | } | 2397 | } |
| 2317 | } | 2398 | } |
| 2318 | 2399 | ||
| @@ -2349,16 +2430,10 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2349 | 2430 | ||
| 2350 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 2431 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
| 2351 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | 2432 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
| 2352 | if (!sas_device) { | ||
| 2353 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 2354 | printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", | ||
| 2355 | ioc->name, __func__); | ||
| 2356 | return; | ||
| 2357 | } | ||
| 2358 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 2433 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 2359 | 2434 | ||
| 2360 | /* skip is hidden raid component */ | 2435 | /* skip is hidden raid component */ |
| 2361 | if (sas_device->hidden_raid_component) | 2436 | if (sas_device && sas_device->hidden_raid_component) |
| 2362 | return; | 2437 | return; |
| 2363 | 2438 | ||
| 2364 | smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); | 2439 | smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); |
| @@ -2371,18 +2446,31 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2371 | delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL; | 2446 | delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL; |
| 2372 | list_add_tail(&delayed_tr->list, | 2447 | list_add_tail(&delayed_tr->list, |
| 2373 | &ioc->delayed_tr_list); | 2448 | &ioc->delayed_tr_list); |
| 2374 | if (sas_device->starget) | 2449 | if (sas_device && sas_device->starget) { |
| 2375 | dewtprintk(ioc, starget_printk(KERN_INFO, | 2450 | dewtprintk(ioc, starget_printk(KERN_INFO, |
| 2376 | sas_device->starget, "DELAYED:tr:handle(0x%04x), " | 2451 | sas_device->starget, "DELAYED:tr:handle(0x%04x), " |
| 2377 | "(open)\n", sas_device->handle)); | 2452 | "(open)\n", handle)); |
| 2453 | } else { | ||
| 2454 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | ||
| 2455 | "DELAYED:tr:handle(0x%04x), (open)\n", | ||
| 2456 | ioc->name, handle)); | ||
| 2457 | } | ||
| 2378 | return; | 2458 | return; |
| 2379 | } | 2459 | } |
| 2380 | 2460 | ||
| 2381 | if (sas_device->starget && sas_device->starget->hostdata) { | 2461 | if (sas_device) { |
| 2382 | sas_target_priv_data = sas_device->starget->hostdata; | 2462 | sas_device->state |= MPTSAS_STATE_TR_SEND; |
| 2383 | sas_target_priv_data->tm_busy = 1; | 2463 | sas_device->state |= MPT2SAS_REQ_SAS_CNTRL; |
| 2384 | dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, | 2464 | if (sas_device->starget && sas_device->starget->hostdata) { |
| 2385 | "tr:handle(0x%04x), (open)\n", sas_device->handle)); | 2465 | sas_target_priv_data = sas_device->starget->hostdata; |
| 2466 | sas_target_priv_data->tm_busy = 1; | ||
| 2467 | dewtprintk(ioc, starget_printk(KERN_INFO, | ||
| 2468 | sas_device->starget, "tr:handle(0x%04x), (open)\n", | ||
| 2469 | handle)); | ||
| 2470 | } | ||
| 2471 | } else { | ||
| 2472 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | ||
| 2473 | "tr:handle(0x%04x), (open)\n", ioc->name, handle)); | ||
| 2386 | } | 2474 | } |
| 2387 | 2475 | ||
| 2388 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); | 2476 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); |
| @@ -2390,8 +2478,6 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2390 | mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; | 2478 | mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; |
| 2391 | mpi_request->DevHandle = cpu_to_le16(handle); | 2479 | mpi_request->DevHandle = cpu_to_le16(handle); |
| 2392 | mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; | 2480 | mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; |
| 2393 | sas_device->state |= MPTSAS_STATE_TR_SEND; | ||
| 2394 | sas_device->state |= MPT2SAS_REQ_SAS_CNTRL; | ||
| 2395 | mpt2sas_base_put_smid_hi_priority(ioc, smid); | 2481 | mpt2sas_base_put_smid_hi_priority(ioc, smid); |
| 2396 | } | 2482 | } |
| 2397 | 2483 | ||
| @@ -2426,21 +2512,25 @@ _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, | |||
| 2426 | 2512 | ||
| 2427 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 2513 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
| 2428 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | 2514 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
| 2429 | if (!sas_device) { | ||
| 2430 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 2431 | printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", | ||
| 2432 | ioc->name, __func__); | ||
| 2433 | return 1; | ||
| 2434 | } | ||
| 2435 | sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE; | ||
| 2436 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 2515 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 2437 | 2516 | ||
| 2438 | if (sas_device->starget) | 2517 | if (sas_device) { |
| 2439 | dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, | 2518 | sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE; |
| 2519 | if (sas_device->starget) | ||
| 2520 | dewtprintk(ioc, starget_printk(KERN_INFO, | ||
| 2521 | sas_device->starget, | ||
| 2522 | "sc_complete:handle(0x%04x), " | ||
| 2523 | "ioc_status(0x%04x), loginfo(0x%08x)\n", | ||
| 2524 | handle, le16_to_cpu(mpi_reply->IOCStatus), | ||
| 2525 | le32_to_cpu(mpi_reply->IOCLogInfo))); | ||
| 2526 | } else { | ||
| 2527 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | ||
| 2440 | "sc_complete:handle(0x%04x), " | 2528 | "sc_complete:handle(0x%04x), " |
| 2441 | "ioc_status(0x%04x), loginfo(0x%08x)\n", | 2529 | "ioc_status(0x%04x), loginfo(0x%08x)\n", |
| 2442 | handle, le16_to_cpu(mpi_reply->IOCStatus), | 2530 | ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus), |
| 2443 | le32_to_cpu(mpi_reply->IOCLogInfo))); | 2531 | le32_to_cpu(mpi_reply->IOCLogInfo))); |
| 2532 | } | ||
| 2533 | |||
| 2444 | return 1; | 2534 | return 1; |
| 2445 | } | 2535 | } |
| 2446 | 2536 | ||
| @@ -2478,28 +2568,33 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 2478 | handle = le16_to_cpu(mpi_reply->DevHandle); | 2568 | handle = le16_to_cpu(mpi_reply->DevHandle); |
| 2479 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 2569 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
| 2480 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | 2570 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
| 2481 | if (!sas_device) { | ||
| 2482 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 2483 | printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", | ||
| 2484 | ioc->name, __func__); | ||
| 2485 | return 1; | ||
| 2486 | } | ||
| 2487 | sas_device->state |= MPTSAS_STATE_TR_COMPLETE; | ||
| 2488 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 2571 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 2489 | 2572 | ||
| 2490 | if (sas_device->starget) | 2573 | if (sas_device) { |
| 2491 | dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, | 2574 | sas_device->state |= MPTSAS_STATE_TR_COMPLETE; |
| 2492 | "tr_complete:handle(0x%04x), (%s) ioc_status(0x%04x), " | 2575 | if (sas_device->starget) { |
| 2493 | "loginfo(0x%08x), completed(%d)\n", | 2576 | dewtprintk(ioc, starget_printk(KERN_INFO, |
| 2494 | sas_device->handle, (sas_device->state & | 2577 | sas_device->starget, "tr_complete:handle(0x%04x), " |
| 2495 | MPT2SAS_REQ_SAS_CNTRL) ? "open" : "active", | 2578 | "(%s) ioc_status(0x%04x), loginfo(0x%08x), " |
| 2496 | le16_to_cpu(mpi_reply->IOCStatus), | 2579 | "completed(%d)\n", sas_device->handle, |
| 2580 | (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ? | ||
| 2581 | "open" : "active", | ||
| 2582 | le16_to_cpu(mpi_reply->IOCStatus), | ||
| 2583 | le32_to_cpu(mpi_reply->IOCLogInfo), | ||
| 2584 | le32_to_cpu(mpi_reply->TerminationCount))); | ||
| 2585 | if (sas_device->starget->hostdata) { | ||
| 2586 | sas_target_priv_data = | ||
| 2587 | sas_device->starget->hostdata; | ||
| 2588 | sas_target_priv_data->tm_busy = 0; | ||
| 2589 | } | ||
| 2590 | } | ||
| 2591 | } else { | ||
| 2592 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | ||
| 2593 | "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), " | ||
| 2594 | "loginfo(0x%08x), completed(%d)\n", ioc->name, | ||
| 2595 | handle, le16_to_cpu(mpi_reply->IOCStatus), | ||
| 2497 | le32_to_cpu(mpi_reply->IOCLogInfo), | 2596 | le32_to_cpu(mpi_reply->IOCLogInfo), |
| 2498 | le32_to_cpu(mpi_reply->TerminationCount))); | 2597 | le32_to_cpu(mpi_reply->TerminationCount))); |
| 2499 | |||
| 2500 | if (sas_device->starget && sas_device->starget->hostdata) { | ||
| 2501 | sas_target_priv_data = sas_device->starget->hostdata; | ||
| 2502 | sas_target_priv_data->tm_busy = 0; | ||
| 2503 | } | 2598 | } |
| 2504 | 2599 | ||
| 2505 | if (!list_empty(&ioc->delayed_tr_list)) { | 2600 | if (!list_empty(&ioc->delayed_tr_list)) { |
| @@ -2514,8 +2609,7 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 2514 | } else | 2609 | } else |
| 2515 | rc = 1; | 2610 | rc = 1; |
| 2516 | 2611 | ||
| 2517 | 2612 | if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL)) | |
| 2518 | if (!(sas_device->state & MPT2SAS_REQ_SAS_CNTRL)) | ||
| 2519 | return rc; | 2613 | return rc; |
| 2520 | 2614 | ||
| 2521 | if (ioc->shost_recovery) { | 2615 | if (ioc->shost_recovery) { |
| @@ -2531,12 +2625,14 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 2531 | return rc; | 2625 | return rc; |
| 2532 | } | 2626 | } |
| 2533 | 2627 | ||
| 2628 | if (sas_device) | ||
| 2629 | sas_device->state |= MPTSAS_STATE_CNTRL_SEND; | ||
| 2630 | |||
| 2534 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); | 2631 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); |
| 2535 | memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); | 2632 | memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); |
| 2536 | mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; | 2633 | mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; |
| 2537 | mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; | 2634 | mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; |
| 2538 | mpi_request->DevHandle = mpi_reply->DevHandle; | 2635 | mpi_request->DevHandle = mpi_reply->DevHandle; |
| 2539 | sas_device->state |= MPTSAS_STATE_CNTRL_SEND; | ||
| 2540 | mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); | 2636 | mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); |
| 2541 | return rc; | 2637 | return rc; |
| 2542 | } | 2638 | } |
| @@ -2678,8 +2774,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) | |||
| 2678 | else | 2774 | else |
| 2679 | return; | 2775 | return; |
| 2680 | 2776 | ||
| 2681 | mpi_request->EEDPBlockSize = scmd->device->sector_size; | ||
| 2682 | |||
| 2683 | switch (prot_type) { | 2777 | switch (prot_type) { |
| 2684 | case SCSI_PROT_DIF_TYPE1: | 2778 | case SCSI_PROT_DIF_TYPE1: |
| 2685 | 2779 | ||
| @@ -2687,8 +2781,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) | |||
| 2687 | * enable ref/guard checking | 2781 | * enable ref/guard checking |
| 2688 | * auto increment ref tag | 2782 | * auto increment ref tag |
| 2689 | */ | 2783 | */ |
| 2690 | mpi_request->EEDPFlags = eedp_flags | | 2784 | eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | |
| 2691 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | | ||
| 2692 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | | 2785 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | |
| 2693 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; | 2786 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; |
| 2694 | mpi_request->CDB.EEDP32.PrimaryReferenceTag = | 2787 | mpi_request->CDB.EEDP32.PrimaryReferenceTag = |
| @@ -2701,11 +2794,11 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) | |||
| 2701 | /* | 2794 | /* |
| 2702 | * enable guard checking | 2795 | * enable guard checking |
| 2703 | */ | 2796 | */ |
| 2704 | mpi_request->EEDPFlags = eedp_flags | | 2797 | eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; |
| 2705 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; | ||
| 2706 | |||
| 2707 | break; | 2798 | break; |
| 2708 | } | 2799 | } |
| 2800 | mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size); | ||
| 2801 | mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); | ||
| 2709 | } | 2802 | } |
| 2710 | 2803 | ||
| 2711 | /** | 2804 | /** |
| @@ -2788,7 +2881,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 2788 | } | 2881 | } |
| 2789 | 2882 | ||
| 2790 | /* see if we are busy with task managment stuff */ | 2883 | /* see if we are busy with task managment stuff */ |
| 2791 | if (sas_target_priv_data->tm_busy) | 2884 | if (sas_device_priv_data->block || sas_target_priv_data->tm_busy) |
| 2792 | return SCSI_MLQUEUE_DEVICE_BUSY; | 2885 | return SCSI_MLQUEUE_DEVICE_BUSY; |
| 2793 | else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) | 2886 | else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) |
| 2794 | return SCSI_MLQUEUE_HOST_BUSY; | 2887 | return SCSI_MLQUEUE_HOST_BUSY; |
| @@ -2842,7 +2935,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 2842 | mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; | 2935 | mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; |
| 2843 | mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; | 2936 | mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; |
| 2844 | mpi_request->SenseBufferLowAddress = | 2937 | mpi_request->SenseBufferLowAddress = |
| 2845 | (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); | 2938 | mpt2sas_base_get_sense_buffer_dma(ioc, smid); |
| 2846 | mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; | 2939 | mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; |
| 2847 | mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + | 2940 | mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + |
| 2848 | MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); | 2941 | MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); |
| @@ -2894,7 +2987,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) | |||
| 2894 | 2987 | ||
| 2895 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 2988 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 2896 | /** | 2989 | /** |
| 2897 | * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request | 2990 | * _scsih_scsi_ioc_info - translated non-successfull SCSI_IO request |
| 2898 | * @ioc: per adapter object | 2991 | * @ioc: per adapter object |
| 2899 | * @scmd: pointer to scsi command object | 2992 | * @scmd: pointer to scsi command object |
| 2900 | * @mpi_reply: reply mf payload returned from firmware | 2993 | * @mpi_reply: reply mf payload returned from firmware |
| @@ -3059,7 +3152,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
| 3059 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { | 3152 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { |
| 3060 | response_info = le32_to_cpu(mpi_reply->ResponseInfo); | 3153 | response_info = le32_to_cpu(mpi_reply->ResponseInfo); |
| 3061 | response_bytes = (u8 *)&response_info; | 3154 | response_bytes = (u8 *)&response_info; |
| 3062 | _scsih_response_code(ioc, response_bytes[3]); | 3155 | _scsih_response_code(ioc, response_bytes[0]); |
| 3063 | } | 3156 | } |
| 3064 | } | 3157 | } |
| 3065 | #endif | 3158 | #endif |
| @@ -3177,7 +3270,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3177 | u8 scsi_status; | 3270 | u8 scsi_status; |
| 3178 | u32 log_info; | 3271 | u32 log_info; |
| 3179 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 3272 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
| 3180 | u32 response_code; | 3273 | u32 response_code = 0; |
| 3181 | 3274 | ||
| 3182 | mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); | 3275 | mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); |
| 3183 | scmd = _scsih_scsi_lookup_get(ioc, smid); | 3276 | scmd = _scsih_scsi_lookup_get(ioc, smid); |
| @@ -3199,16 +3292,16 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3199 | } | 3292 | } |
| 3200 | 3293 | ||
| 3201 | /* turning off TLR */ | 3294 | /* turning off TLR */ |
| 3295 | scsi_state = mpi_reply->SCSIState; | ||
| 3296 | if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) | ||
| 3297 | response_code = | ||
| 3298 | le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; | ||
| 3202 | if (!sas_device_priv_data->tlr_snoop_check) { | 3299 | if (!sas_device_priv_data->tlr_snoop_check) { |
| 3203 | sas_device_priv_data->tlr_snoop_check++; | 3300 | sas_device_priv_data->tlr_snoop_check++; |
| 3204 | if (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) { | 3301 | if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && |
| 3205 | response_code = (le32_to_cpu(mpi_reply->ResponseInfo) | 3302 | response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) |
| 3206 | >> 24); | 3303 | sas_device_priv_data->flags &= |
| 3207 | if (response_code == | 3304 | ~MPT_DEVICE_TLR_ON; |
| 3208 | MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) | ||
| 3209 | sas_device_priv_data->flags &= | ||
| 3210 | ~MPT_DEVICE_TLR_ON; | ||
| 3211 | } | ||
| 3212 | } | 3305 | } |
| 3213 | 3306 | ||
| 3214 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); | 3307 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); |
| @@ -3219,7 +3312,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3219 | else | 3312 | else |
| 3220 | log_info = 0; | 3313 | log_info = 0; |
| 3221 | ioc_status &= MPI2_IOCSTATUS_MASK; | 3314 | ioc_status &= MPI2_IOCSTATUS_MASK; |
| 3222 | scsi_state = mpi_reply->SCSIState; | ||
| 3223 | scsi_status = mpi_reply->SCSIStatus; | 3315 | scsi_status = mpi_reply->SCSIStatus; |
| 3224 | 3316 | ||
| 3225 | if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && | 3317 | if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && |
| @@ -3255,10 +3347,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3255 | 3347 | ||
| 3256 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: | 3348 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: |
| 3257 | if (sas_device_priv_data->block) { | 3349 | if (sas_device_priv_data->block) { |
| 3258 | scmd->result = (DID_BUS_BUSY << 16); | 3350 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; |
| 3259 | break; | 3351 | goto out; |
| 3260 | } | 3352 | } |
| 3261 | |||
| 3262 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: | 3353 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
| 3263 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: | 3354 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: |
| 3264 | scmd->result = DID_RESET << 16; | 3355 | scmd->result = DID_RESET << 16; |
| @@ -3304,8 +3395,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3304 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: | 3395 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: |
| 3305 | case MPI2_IOCSTATUS_SUCCESS: | 3396 | case MPI2_IOCSTATUS_SUCCESS: |
| 3306 | scmd->result = (DID_OK << 16) | scsi_status; | 3397 | scmd->result = (DID_OK << 16) | scsi_status; |
| 3307 | if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | | 3398 | if (response_code == |
| 3308 | MPI2_SCSI_STATE_NO_SCSI_STATUS)) | 3399 | MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || |
| 3400 | (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | | ||
| 3401 | MPI2_SCSI_STATE_NO_SCSI_STATUS))) | ||
| 3309 | scmd->result = DID_SOFT_ERROR << 16; | 3402 | scmd->result = DID_SOFT_ERROR << 16; |
| 3310 | else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) | 3403 | else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) |
| 3311 | scmd->result = DID_RESET << 16; | 3404 | scmd->result = DID_RESET << 16; |
| @@ -3344,7 +3437,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3344 | /** | 3437 | /** |
| 3345 | * _scsih_sas_host_refresh - refreshing sas host object contents | 3438 | * _scsih_sas_host_refresh - refreshing sas host object contents |
| 3346 | * @ioc: per adapter object | 3439 | * @ioc: per adapter object |
| 3347 | * @update: update link information | ||
| 3348 | * Context: user | 3440 | * Context: user |
| 3349 | * | 3441 | * |
| 3350 | * During port enable, fw will send topology events for every device. Its | 3442 | * During port enable, fw will send topology events for every device. Its |
| @@ -3354,13 +3446,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
| 3354 | * Return nothing. | 3446 | * Return nothing. |
| 3355 | */ | 3447 | */ |
| 3356 | static void | 3448 | static void |
| 3357 | _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) | 3449 | _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc) |
| 3358 | { | 3450 | { |
| 3359 | u16 sz; | 3451 | u16 sz; |
| 3360 | u16 ioc_status; | 3452 | u16 ioc_status; |
| 3361 | int i; | 3453 | int i; |
| 3362 | Mpi2ConfigReply_t mpi_reply; | 3454 | Mpi2ConfigReply_t mpi_reply; |
| 3363 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; | 3455 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
| 3456 | u16 attached_handle; | ||
| 3364 | 3457 | ||
| 3365 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT | 3458 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT |
| 3366 | "updating handles for sas_host(0x%016llx)\n", | 3459 | "updating handles for sas_host(0x%016llx)\n", |
| @@ -3374,27 +3467,24 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) | |||
| 3374 | ioc->name, __FILE__, __LINE__, __func__); | 3467 | ioc->name, __FILE__, __LINE__, __func__); |
| 3375 | return; | 3468 | return; |
| 3376 | } | 3469 | } |
| 3377 | if (!(mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, | ||
| 3378 | sas_iounit_pg0, sz))) { | ||
| 3379 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
| 3380 | MPI2_IOCSTATUS_MASK; | ||
| 3381 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | ||
| 3382 | goto out; | ||
| 3383 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { | ||
| 3384 | ioc->sas_hba.phy[i].handle = | ||
| 3385 | le16_to_cpu(sas_iounit_pg0->PhyData[i]. | ||
| 3386 | ControllerDevHandle); | ||
| 3387 | if (update) | ||
| 3388 | mpt2sas_transport_update_links( | ||
| 3389 | ioc, | ||
| 3390 | ioc->sas_hba.phy[i].handle, | ||
| 3391 | le16_to_cpu(sas_iounit_pg0->PhyData[i]. | ||
| 3392 | AttachedDevHandle), i, | ||
| 3393 | sas_iounit_pg0->PhyData[i]. | ||
| 3394 | NegotiatedLinkRate >> 4); | ||
| 3395 | } | ||
| 3396 | } | ||
| 3397 | 3470 | ||
| 3471 | if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, | ||
| 3472 | sas_iounit_pg0, sz)) != 0) | ||
| 3473 | goto out; | ||
| 3474 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; | ||
| 3475 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | ||
| 3476 | goto out; | ||
| 3477 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { | ||
| 3478 | if (i == 0) | ||
| 3479 | ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> | ||
| 3480 | PhyData[0].ControllerDevHandle); | ||
| 3481 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; | ||
| 3482 | attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. | ||
| 3483 | AttachedDevHandle); | ||
| 3484 | mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, | ||
| 3485 | attached_handle, i, sas_iounit_pg0->PhyData[i]. | ||
| 3486 | NegotiatedLinkRate >> 4); | ||
| 3487 | } | ||
| 3398 | out: | 3488 | out: |
| 3399 | kfree(sas_iounit_pg0); | 3489 | kfree(sas_iounit_pg0); |
| 3400 | } | 3490 | } |
| @@ -3507,19 +3597,21 @@ _scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc) | |||
| 3507 | ioc->name, __FILE__, __LINE__, __func__); | 3597 | ioc->name, __FILE__, __LINE__, __func__); |
| 3508 | goto out; | 3598 | goto out; |
| 3509 | } | 3599 | } |
| 3510 | ioc->sas_hba.phy[i].handle = | 3600 | |
| 3511 | le16_to_cpu(sas_iounit_pg0->PhyData[i].ControllerDevHandle); | 3601 | if (i == 0) |
| 3602 | ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> | ||
| 3603 | PhyData[0].ControllerDevHandle); | ||
| 3604 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; | ||
| 3512 | ioc->sas_hba.phy[i].phy_id = i; | 3605 | ioc->sas_hba.phy[i].phy_id = i; |
| 3513 | mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], | 3606 | mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], |
| 3514 | phy_pg0, ioc->sas_hba.parent_dev); | 3607 | phy_pg0, ioc->sas_hba.parent_dev); |
| 3515 | } | 3608 | } |
| 3516 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, | 3609 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, |
| 3517 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.phy[0].handle))) { | 3610 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { |
| 3518 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 3611 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
| 3519 | ioc->name, __FILE__, __LINE__, __func__); | 3612 | ioc->name, __FILE__, __LINE__, __func__); |
| 3520 | goto out; | 3613 | goto out; |
| 3521 | } | 3614 | } |
| 3522 | ioc->sas_hba.handle = le16_to_cpu(sas_device_pg0.DevHandle); | ||
| 3523 | ioc->sas_hba.enclosure_handle = | 3615 | ioc->sas_hba.enclosure_handle = |
| 3524 | le16_to_cpu(sas_device_pg0.EnclosureHandle); | 3616 | le16_to_cpu(sas_device_pg0.EnclosureHandle); |
| 3525 | ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); | 3617 | ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
| @@ -3562,7 +3654,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3562 | Mpi2SasEnclosurePage0_t enclosure_pg0; | 3654 | Mpi2SasEnclosurePage0_t enclosure_pg0; |
| 3563 | u32 ioc_status; | 3655 | u32 ioc_status; |
| 3564 | u16 parent_handle; | 3656 | u16 parent_handle; |
| 3565 | __le64 sas_address; | 3657 | __le64 sas_address, sas_address_parent = 0; |
| 3566 | int i; | 3658 | int i; |
| 3567 | unsigned long flags; | 3659 | unsigned long flags; |
| 3568 | struct _sas_port *mpt2sas_port = NULL; | 3660 | struct _sas_port *mpt2sas_port = NULL; |
| @@ -3591,10 +3683,16 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3591 | 3683 | ||
| 3592 | /* handle out of order topology events */ | 3684 | /* handle out of order topology events */ |
| 3593 | parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); | 3685 | parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); |
| 3594 | if (parent_handle >= ioc->sas_hba.num_phys) { | 3686 | if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) |
| 3687 | != 0) { | ||
| 3688 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 3689 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 3690 | return -1; | ||
| 3691 | } | ||
| 3692 | if (sas_address_parent != ioc->sas_hba.sas_address) { | ||
| 3595 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 3693 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 3596 | sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, | 3694 | sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, |
| 3597 | parent_handle); | 3695 | sas_address_parent); |
| 3598 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 3696 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 3599 | if (!sas_expander) { | 3697 | if (!sas_expander) { |
| 3600 | rc = _scsih_expander_add(ioc, parent_handle); | 3698 | rc = _scsih_expander_add(ioc, parent_handle); |
| @@ -3622,14 +3720,12 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3622 | 3720 | ||
| 3623 | sas_expander->handle = handle; | 3721 | sas_expander->handle = handle; |
| 3624 | sas_expander->num_phys = expander_pg0.NumPhys; | 3722 | sas_expander->num_phys = expander_pg0.NumPhys; |
| 3625 | sas_expander->parent_handle = parent_handle; | 3723 | sas_expander->sas_address_parent = sas_address_parent; |
| 3626 | sas_expander->enclosure_handle = | ||
| 3627 | le16_to_cpu(expander_pg0.EnclosureHandle); | ||
| 3628 | sas_expander->sas_address = sas_address; | 3724 | sas_expander->sas_address = sas_address; |
| 3629 | 3725 | ||
| 3630 | printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x)," | 3726 | printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x)," |
| 3631 | " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, | 3727 | " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, |
| 3632 | handle, sas_expander->parent_handle, (unsigned long long) | 3728 | handle, parent_handle, (unsigned long long) |
| 3633 | sas_expander->sas_address, sas_expander->num_phys); | 3729 | sas_expander->sas_address, sas_expander->num_phys); |
| 3634 | 3730 | ||
| 3635 | if (!sas_expander->num_phys) | 3731 | if (!sas_expander->num_phys) |
| @@ -3645,7 +3741,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3645 | 3741 | ||
| 3646 | INIT_LIST_HEAD(&sas_expander->sas_port_list); | 3742 | INIT_LIST_HEAD(&sas_expander->sas_port_list); |
| 3647 | mpt2sas_port = mpt2sas_transport_port_add(ioc, handle, | 3743 | mpt2sas_port = mpt2sas_transport_port_add(ioc, handle, |
| 3648 | sas_expander->parent_handle); | 3744 | sas_address_parent); |
| 3649 | if (!mpt2sas_port) { | 3745 | if (!mpt2sas_port) { |
| 3650 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 3746 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
| 3651 | ioc->name, __FILE__, __LINE__, __func__); | 3747 | ioc->name, __FILE__, __LINE__, __func__); |
| @@ -3691,20 +3787,54 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3691 | 3787 | ||
| 3692 | if (mpt2sas_port) | 3788 | if (mpt2sas_port) |
| 3693 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, | 3789 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, |
| 3694 | sas_expander->parent_handle); | 3790 | sas_address_parent); |
| 3695 | kfree(sas_expander); | 3791 | kfree(sas_expander); |
| 3696 | return rc; | 3792 | return rc; |
| 3697 | } | 3793 | } |
| 3698 | 3794 | ||
| 3699 | /** | 3795 | /** |
| 3796 | * _scsih_done - scsih callback handler. | ||
| 3797 | * @ioc: per adapter object | ||
| 3798 | * @smid: system request message index | ||
| 3799 | * @msix_index: MSIX table index supplied by the OS | ||
| 3800 | * @reply: reply message frame(lower 32bit addr) | ||
| 3801 | * | ||
| 3802 | * Callback handler when sending internal generated message frames. | ||
| 3803 | * The callback index passed is `ioc->scsih_cb_idx` | ||
| 3804 | * | ||
| 3805 | * Return 1 meaning mf should be freed from _base_interrupt | ||
| 3806 | * 0 means the mf is freed from this function. | ||
| 3807 | */ | ||
| 3808 | static u8 | ||
| 3809 | _scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | ||
| 3810 | { | ||
| 3811 | MPI2DefaultReply_t *mpi_reply; | ||
| 3812 | |||
| 3813 | mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); | ||
| 3814 | if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED) | ||
| 3815 | return 1; | ||
| 3816 | if (ioc->scsih_cmds.smid != smid) | ||
| 3817 | return 1; | ||
| 3818 | ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE; | ||
| 3819 | if (mpi_reply) { | ||
| 3820 | memcpy(ioc->scsih_cmds.reply, mpi_reply, | ||
| 3821 | mpi_reply->MsgLength*4); | ||
| 3822 | ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID; | ||
| 3823 | } | ||
| 3824 | ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING; | ||
| 3825 | complete(&ioc->scsih_cmds.done); | ||
| 3826 | return 1; | ||
| 3827 | } | ||
| 3828 | |||
| 3829 | /** | ||
| 3700 | * _scsih_expander_remove - removing expander object | 3830 | * _scsih_expander_remove - removing expander object |
| 3701 | * @ioc: per adapter object | 3831 | * @ioc: per adapter object |
| 3702 | * @handle: expander handle | 3832 | * @sas_address: expander sas_address |
| 3703 | * | 3833 | * |
| 3704 | * Return nothing. | 3834 | * Return nothing. |
| 3705 | */ | 3835 | */ |
| 3706 | static void | 3836 | static void |
| 3707 | _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) | 3837 | _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) |
| 3708 | { | 3838 | { |
| 3709 | struct _sas_node *sas_expander; | 3839 | struct _sas_node *sas_expander; |
| 3710 | unsigned long flags; | 3840 | unsigned long flags; |
| @@ -3713,7 +3843,8 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3713 | return; | 3843 | return; |
| 3714 | 3844 | ||
| 3715 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 3845 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 3716 | sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); | 3846 | sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, |
| 3847 | sas_address); | ||
| 3717 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 3848 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 3718 | _scsih_expander_node_remove(ioc, sas_expander); | 3849 | _scsih_expander_node_remove(ioc, sas_expander); |
| 3719 | } | 3850 | } |
| @@ -3805,8 +3936,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) | |||
| 3805 | } | 3936 | } |
| 3806 | 3937 | ||
| 3807 | sas_device->handle = handle; | 3938 | sas_device->handle = handle; |
| 3808 | sas_device->parent_handle = | 3939 | if (_scsih_get_sas_address(ioc, le16_to_cpu |
| 3809 | le16_to_cpu(sas_device_pg0.ParentDevHandle); | 3940 | (sas_device_pg0.ParentDevHandle), |
| 3941 | &sas_device->sas_address_parent) != 0) | ||
| 3942 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 3943 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 3810 | sas_device->enclosure_handle = | 3944 | sas_device->enclosure_handle = |
| 3811 | le16_to_cpu(sas_device_pg0.EnclosureHandle); | 3945 | le16_to_cpu(sas_device_pg0.EnclosureHandle); |
| 3812 | sas_device->slot = | 3946 | sas_device->slot = |
| @@ -3836,43 +3970,39 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) | |||
| 3836 | /** | 3970 | /** |
| 3837 | * _scsih_remove_device - removing sas device object | 3971 | * _scsih_remove_device - removing sas device object |
| 3838 | * @ioc: per adapter object | 3972 | * @ioc: per adapter object |
| 3839 | * @handle: sas device handle | 3973 | * @sas_device: the sas_device object |
| 3840 | * | 3974 | * |
| 3841 | * Return nothing. | 3975 | * Return nothing. |
| 3842 | */ | 3976 | */ |
| 3843 | static void | 3977 | static void |
| 3844 | _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) | 3978 | _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device |
| 3979 | *sas_device) | ||
| 3845 | { | 3980 | { |
| 3846 | struct MPT2SAS_TARGET *sas_target_priv_data; | 3981 | struct MPT2SAS_TARGET *sas_target_priv_data; |
| 3847 | struct _sas_device *sas_device; | ||
| 3848 | unsigned long flags; | ||
| 3849 | Mpi2SasIoUnitControlReply_t mpi_reply; | 3982 | Mpi2SasIoUnitControlReply_t mpi_reply; |
| 3850 | Mpi2SasIoUnitControlRequest_t mpi_request; | 3983 | Mpi2SasIoUnitControlRequest_t mpi_request; |
| 3851 | u16 device_handle; | 3984 | u16 device_handle, handle; |
| 3852 | 3985 | ||
| 3853 | /* lookup sas_device */ | 3986 | if (!sas_device) |
| 3854 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 3855 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | ||
| 3856 | if (!sas_device) { | ||
| 3857 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 3858 | return; | 3987 | return; |
| 3859 | } | ||
| 3860 | 3988 | ||
| 3861 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle" | 3989 | handle = sas_device->handle; |
| 3862 | "(0x%04x)\n", ioc->name, __func__, handle)); | 3990 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x)," |
| 3991 | " sas_addr(0x%016llx)\n", ioc->name, __func__, handle, | ||
| 3992 | (unsigned long long) sas_device->sas_address)); | ||
| 3863 | 3993 | ||
| 3864 | if (sas_device->starget && sas_device->starget->hostdata) { | 3994 | if (sas_device->starget && sas_device->starget->hostdata) { |
| 3865 | sas_target_priv_data = sas_device->starget->hostdata; | 3995 | sas_target_priv_data = sas_device->starget->hostdata; |
| 3866 | sas_target_priv_data->deleted = 1; | 3996 | sas_target_priv_data->deleted = 1; |
| 3867 | } | 3997 | } |
| 3868 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 3869 | 3998 | ||
| 3870 | if (ioc->remove_host) | 3999 | if (ioc->remove_host || ioc->shost_recovery || !handle) |
| 3871 | goto out; | 4000 | goto out; |
| 3872 | 4001 | ||
| 3873 | if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) { | 4002 | if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) { |
| 3874 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip " | 4003 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip " |
| 3875 | "target_reset handle(0x%04x)\n", ioc->name, handle)); | 4004 | "target_reset handle(0x%04x)\n", ioc->name, |
| 4005 | handle)); | ||
| 3876 | goto skip_tr; | 4006 | goto skip_tr; |
| 3877 | } | 4007 | } |
| 3878 | 4008 | ||
| @@ -3925,10 +4055,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 3925 | _scsih_ublock_io_device(ioc, handle); | 4055 | _scsih_ublock_io_device(ioc, handle); |
| 3926 | 4056 | ||
| 3927 | mpt2sas_transport_port_remove(ioc, sas_device->sas_address, | 4057 | mpt2sas_transport_port_remove(ioc, sas_device->sas_address, |
| 3928 | sas_device->parent_handle); | 4058 | sas_device->sas_address_parent); |
| 3929 | 4059 | ||
| 3930 | printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" | 4060 | printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" |
| 3931 | "(0x%016llx)\n", ioc->name, sas_device->handle, | 4061 | "(0x%016llx)\n", ioc->name, handle, |
| 3932 | (unsigned long long) sas_device->sas_address); | 4062 | (unsigned long long) sas_device->sas_address); |
| 3933 | _scsih_sas_device_remove(ioc, sas_device); | 4063 | _scsih_sas_device_remove(ioc, sas_device); |
| 3934 | 4064 | ||
| @@ -3952,7 +4082,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, | |||
| 3952 | u16 reason_code; | 4082 | u16 reason_code; |
| 3953 | u8 phy_number; | 4083 | u8 phy_number; |
| 3954 | char *status_str = NULL; | 4084 | char *status_str = NULL; |
| 3955 | char link_rate[25]; | 4085 | u8 link_rate, prev_link_rate; |
| 3956 | 4086 | ||
| 3957 | switch (event_data->ExpStatus) { | 4087 | switch (event_data->ExpStatus) { |
| 3958 | case MPI2_EVENT_SAS_TOPO_ES_ADDED: | 4088 | case MPI2_EVENT_SAS_TOPO_ES_ADDED: |
| @@ -3962,6 +4092,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, | |||
| 3962 | status_str = "remove"; | 4092 | status_str = "remove"; |
| 3963 | break; | 4093 | break; |
| 3964 | case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: | 4094 | case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: |
| 4095 | case 0: | ||
| 3965 | status_str = "responding"; | 4096 | status_str = "responding"; |
| 3966 | break; | 4097 | break; |
| 3967 | case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: | 4098 | case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: |
| @@ -3987,30 +4118,30 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, | |||
| 3987 | MPI2_EVENT_SAS_TOPO_RC_MASK; | 4118 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
| 3988 | switch (reason_code) { | 4119 | switch (reason_code) { |
| 3989 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: | 4120 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
| 3990 | snprintf(link_rate, 25, ": add, link(0x%02x)", | 4121 | status_str = "target add"; |
| 3991 | (event_data->PHY[i].LinkRate >> 4)); | ||
| 3992 | status_str = link_rate; | ||
| 3993 | break; | 4122 | break; |
| 3994 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: | 4123 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: |
| 3995 | status_str = ": remove"; | 4124 | status_str = "target remove"; |
| 3996 | break; | 4125 | break; |
| 3997 | case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: | 4126 | case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: |
| 3998 | status_str = ": remove_delay"; | 4127 | status_str = "delay target remove"; |
| 3999 | break; | 4128 | break; |
| 4000 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: | 4129 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: |
| 4001 | snprintf(link_rate, 25, ": link(0x%02x)", | 4130 | status_str = "link rate change"; |
| 4002 | (event_data->PHY[i].LinkRate >> 4)); | ||
| 4003 | status_str = link_rate; | ||
| 4004 | break; | 4131 | break; |
| 4005 | case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: | 4132 | case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: |
| 4006 | status_str = ": responding"; | 4133 | status_str = "target responding"; |
| 4007 | break; | 4134 | break; |
| 4008 | default: | 4135 | default: |
| 4009 | status_str = ": unknown"; | 4136 | status_str = "unknown"; |
| 4010 | break; | 4137 | break; |
| 4011 | } | 4138 | } |
| 4012 | printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x)%s\n", | 4139 | link_rate = event_data->PHY[i].LinkRate >> 4; |
| 4013 | phy_number, handle, status_str); | 4140 | prev_link_rate = event_data->PHY[i].LinkRate & 0xF; |
| 4141 | printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x): %s:" | ||
| 4142 | " link rate: new(0x%02x), old(0x%02x)\n", phy_number, | ||
| 4143 | handle, status_str, link_rate, prev_link_rate); | ||
| 4144 | |||
| 4014 | } | 4145 | } |
| 4015 | } | 4146 | } |
| 4016 | #endif | 4147 | #endif |
| @@ -4031,8 +4162,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4031 | u16 reason_code; | 4162 | u16 reason_code; |
| 4032 | u8 phy_number; | 4163 | u8 phy_number; |
| 4033 | struct _sas_node *sas_expander; | 4164 | struct _sas_node *sas_expander; |
| 4165 | struct _sas_device *sas_device; | ||
| 4166 | u64 sas_address; | ||
| 4034 | unsigned long flags; | 4167 | unsigned long flags; |
| 4035 | u8 link_rate_; | 4168 | u8 link_rate, prev_link_rate; |
| 4036 | Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; | 4169 | Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; |
| 4037 | 4170 | ||
| 4038 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4171 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| @@ -4040,10 +4173,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4040 | _scsih_sas_topology_change_event_debug(ioc, event_data); | 4173 | _scsih_sas_topology_change_event_debug(ioc, event_data); |
| 4041 | #endif | 4174 | #endif |
| 4042 | 4175 | ||
| 4176 | if (ioc->shost_recovery) | ||
| 4177 | return; | ||
| 4178 | |||
| 4043 | if (!ioc->sas_hba.num_phys) | 4179 | if (!ioc->sas_hba.num_phys) |
| 4044 | _scsih_sas_host_add(ioc); | 4180 | _scsih_sas_host_add(ioc); |
| 4045 | else | 4181 | else |
| 4046 | _scsih_sas_host_refresh(ioc, 0); | 4182 | _scsih_sas_host_refresh(ioc); |
| 4047 | 4183 | ||
| 4048 | if (fw_event->ignore) { | 4184 | if (fw_event->ignore) { |
| 4049 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander " | 4185 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander " |
| @@ -4058,6 +4194,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4058 | if (_scsih_expander_add(ioc, parent_handle) != 0) | 4194 | if (_scsih_expander_add(ioc, parent_handle) != 0) |
| 4059 | return; | 4195 | return; |
| 4060 | 4196 | ||
| 4197 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | ||
| 4198 | sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, | ||
| 4199 | parent_handle); | ||
| 4200 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | ||
| 4201 | if (sas_expander) | ||
| 4202 | sas_address = sas_expander->sas_address; | ||
| 4203 | else if (parent_handle < ioc->sas_hba.num_phys) | ||
| 4204 | sas_address = ioc->sas_hba.sas_address; | ||
| 4205 | else | ||
| 4206 | return; | ||
| 4207 | |||
| 4061 | /* handle siblings events */ | 4208 | /* handle siblings events */ |
| 4062 | for (i = 0; i < event_data->NumEntries; i++) { | 4209 | for (i = 0; i < event_data->NumEntries; i++) { |
| 4063 | if (fw_event->ignore) { | 4210 | if (fw_event->ignore) { |
| @@ -4077,48 +4224,47 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4077 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); | 4224 | handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); |
| 4078 | if (!handle) | 4225 | if (!handle) |
| 4079 | continue; | 4226 | continue; |
| 4080 | link_rate_ = event_data->PHY[i].LinkRate >> 4; | 4227 | link_rate = event_data->PHY[i].LinkRate >> 4; |
| 4228 | prev_link_rate = event_data->PHY[i].LinkRate & 0xF; | ||
| 4081 | switch (reason_code) { | 4229 | switch (reason_code) { |
| 4082 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: | 4230 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: |
| 4231 | |||
| 4232 | if (link_rate == prev_link_rate) | ||
| 4233 | break; | ||
| 4234 | |||
| 4235 | mpt2sas_transport_update_links(ioc, sas_address, | ||
| 4236 | handle, phy_number, link_rate); | ||
| 4237 | |||
| 4238 | if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) | ||
| 4239 | _scsih_ublock_io_device(ioc, handle); | ||
| 4240 | break; | ||
| 4083 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: | 4241 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
| 4084 | if (!parent_handle) { | 4242 | |
| 4085 | if (phy_number < ioc->sas_hba.num_phys) | 4243 | mpt2sas_transport_update_links(ioc, sas_address, |
| 4086 | mpt2sas_transport_update_links( | 4244 | handle, phy_number, link_rate); |
| 4087 | ioc, | 4245 | |
| 4088 | ioc->sas_hba.phy[phy_number].handle, | 4246 | _scsih_add_device(ioc, handle, phy_number, 0); |
| 4089 | handle, phy_number, link_rate_); | ||
| 4090 | } else { | ||
| 4091 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | ||
| 4092 | sas_expander = | ||
| 4093 | mpt2sas_scsih_expander_find_by_handle(ioc, | ||
| 4094 | parent_handle); | ||
| 4095 | spin_unlock_irqrestore(&ioc->sas_node_lock, | ||
| 4096 | flags); | ||
| 4097 | if (sas_expander) { | ||
| 4098 | if (phy_number < sas_expander->num_phys) | ||
| 4099 | mpt2sas_transport_update_links( | ||
| 4100 | ioc, | ||
| 4101 | sas_expander-> | ||
| 4102 | phy[phy_number].handle, | ||
| 4103 | handle, phy_number, | ||
| 4104 | link_rate_); | ||
| 4105 | } | ||
| 4106 | } | ||
| 4107 | if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { | ||
| 4108 | if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5) | ||
| 4109 | break; | ||
| 4110 | _scsih_add_device(ioc, handle, phy_number, 0); | ||
| 4111 | } | ||
| 4112 | break; | 4247 | break; |
| 4113 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: | 4248 | case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: |
| 4114 | _scsih_remove_device(ioc, handle); | 4249 | |
| 4250 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 4251 | sas_device = _scsih_sas_device_find_by_handle(ioc, | ||
| 4252 | handle); | ||
| 4253 | if (!sas_device) { | ||
| 4254 | spin_unlock_irqrestore(&ioc->sas_device_lock, | ||
| 4255 | flags); | ||
| 4256 | break; | ||
| 4257 | } | ||
| 4258 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 4259 | _scsih_remove_device(ioc, sas_device); | ||
| 4115 | break; | 4260 | break; |
| 4116 | } | 4261 | } |
| 4117 | } | 4262 | } |
| 4118 | 4263 | ||
| 4119 | /* handle expander removal */ | 4264 | /* handle expander removal */ |
| 4120 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) | 4265 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && |
| 4121 | _scsih_expander_remove(ioc, parent_handle); | 4266 | sas_expander) |
| 4267 | _scsih_expander_remove(ioc, sas_address); | ||
| 4122 | 4268 | ||
| 4123 | } | 4269 | } |
| 4124 | 4270 | ||
| @@ -4170,6 +4316,12 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc, | |||
| 4170 | case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: | 4316 | case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: |
| 4171 | reason_str = "internal async notification"; | 4317 | reason_str = "internal async notification"; |
| 4172 | break; | 4318 | break; |
| 4319 | case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: | ||
| 4320 | reason_str = "expander reduced functionality"; | ||
| 4321 | break; | ||
| 4322 | case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: | ||
| 4323 | reason_str = "expander reduced functionality complete"; | ||
| 4324 | break; | ||
| 4173 | default: | 4325 | default: |
| 4174 | reason_str = "unknown reason"; | 4326 | reason_str = "unknown reason"; |
| 4175 | break; | 4327 | break; |
| @@ -4197,11 +4349,43 @@ static void | |||
| 4197 | _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, | 4349 | _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, |
| 4198 | struct fw_event_work *fw_event) | 4350 | struct fw_event_work *fw_event) |
| 4199 | { | 4351 | { |
| 4352 | struct MPT2SAS_TARGET *target_priv_data; | ||
| 4353 | struct _sas_device *sas_device; | ||
| 4354 | __le64 sas_address; | ||
| 4355 | unsigned long flags; | ||
| 4356 | Mpi2EventDataSasDeviceStatusChange_t *event_data = | ||
| 4357 | fw_event->event_data; | ||
| 4358 | |||
| 4200 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4359 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 4201 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 4360 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| 4202 | _scsih_sas_device_status_change_event_debug(ioc, | 4361 | _scsih_sas_device_status_change_event_debug(ioc, |
| 4203 | fw_event->event_data); | 4362 | event_data); |
| 4204 | #endif | 4363 | #endif |
| 4364 | |||
| 4365 | if (!(event_data->ReasonCode == | ||
| 4366 | MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && | ||
| 4367 | event_data->ReasonCode == | ||
| 4368 | MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)) | ||
| 4369 | return; | ||
| 4370 | |||
| 4371 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 4372 | sas_address = le64_to_cpu(event_data->SASAddress); | ||
| 4373 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
| 4374 | sas_address); | ||
| 4375 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 4376 | |||
| 4377 | if (!sas_device || !sas_device->starget) | ||
| 4378 | return; | ||
| 4379 | |||
| 4380 | target_priv_data = sas_device->starget->hostdata; | ||
| 4381 | if (!target_priv_data) | ||
| 4382 | return; | ||
| 4383 | |||
| 4384 | if (event_data->ReasonCode == | ||
| 4385 | MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) | ||
| 4386 | target_priv_data->tm_busy = 1; | ||
| 4387 | else | ||
| 4388 | target_priv_data->tm_busy = 0; | ||
| 4205 | } | 4389 | } |
| 4206 | 4390 | ||
| 4207 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4391 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| @@ -4281,6 +4465,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4281 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4465 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 4282 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; | 4466 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; |
| 4283 | #endif | 4467 | #endif |
| 4468 | u16 ioc_status; | ||
| 4284 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " | 4469 | dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " |
| 4285 | "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, | 4470 | "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, |
| 4286 | event_data->PortWidth)); | 4471 | event_data->PortWidth)); |
| @@ -4314,8 +4499,9 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4314 | mpt2sas_scsih_issue_tm(ioc, handle, lun, | 4499 | mpt2sas_scsih_issue_tm(ioc, handle, lun, |
| 4315 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); | 4500 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); |
| 4316 | ioc->tm_cmds.status = MPT2_CMD_NOT_USED; | 4501 | ioc->tm_cmds.status = MPT2_CMD_NOT_USED; |
| 4317 | 4502 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) | |
| 4318 | if ((mpi_reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) && | 4503 | & MPI2_IOCSTATUS_MASK; |
| 4504 | if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && | ||
| 4319 | (mpi_reply->ResponseCode == | 4505 | (mpi_reply->ResponseCode == |
| 4320 | MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || | 4506 | MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || |
| 4321 | mpi_reply->ResponseCode == | 4507 | mpi_reply->ResponseCode == |
| @@ -4570,7 +4756,7 @@ _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc, | |||
| 4570 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 4756 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 4571 | if (!sas_device) | 4757 | if (!sas_device) |
| 4572 | return; | 4758 | return; |
| 4573 | _scsih_remove_device(ioc, handle); | 4759 | _scsih_remove_device(ioc, sas_device); |
| 4574 | } | 4760 | } |
| 4575 | 4761 | ||
| 4576 | /** | 4762 | /** |
| @@ -4591,6 +4777,8 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 4591 | Mpi2ConfigReply_t mpi_reply; | 4777 | Mpi2ConfigReply_t mpi_reply; |
| 4592 | Mpi2SasDevicePage0_t sas_device_pg0; | 4778 | Mpi2SasDevicePage0_t sas_device_pg0; |
| 4593 | u32 ioc_status; | 4779 | u32 ioc_status; |
| 4780 | u64 sas_address; | ||
| 4781 | u16 parent_handle; | ||
| 4594 | 4782 | ||
| 4595 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 4783 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
| 4596 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | 4784 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
| @@ -4615,9 +4803,10 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 4615 | return; | 4803 | return; |
| 4616 | } | 4804 | } |
| 4617 | 4805 | ||
| 4618 | mpt2sas_transport_update_links(ioc, | 4806 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
| 4619 | le16_to_cpu(sas_device_pg0.ParentDevHandle), | 4807 | if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) |
| 4620 | handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | 4808 | mpt2sas_transport_update_links(ioc, sas_address, handle, |
| 4809 | sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | ||
| 4621 | 4810 | ||
| 4622 | _scsih_add_device(ioc, handle, 0, 1); | 4811 | _scsih_add_device(ioc, handle, 0, 1); |
| 4623 | } | 4812 | } |
| @@ -4857,7 +5046,7 @@ static void | |||
| 4857 | _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, | 5046 | _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, |
| 4858 | struct fw_event_work *fw_event) | 5047 | struct fw_event_work *fw_event) |
| 4859 | { | 5048 | { |
| 4860 | u16 handle; | 5049 | u16 handle, parent_handle; |
| 4861 | u32 state; | 5050 | u32 state; |
| 4862 | struct _sas_device *sas_device; | 5051 | struct _sas_device *sas_device; |
| 4863 | unsigned long flags; | 5052 | unsigned long flags; |
| @@ -4865,6 +5054,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4865 | Mpi2SasDevicePage0_t sas_device_pg0; | 5054 | Mpi2SasDevicePage0_t sas_device_pg0; |
| 4866 | u32 ioc_status; | 5055 | u32 ioc_status; |
| 4867 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; | 5056 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; |
| 5057 | u64 sas_address; | ||
| 4868 | 5058 | ||
| 4869 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) | 5059 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) |
| 4870 | return; | 5060 | return; |
| @@ -4906,9 +5096,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 4906 | return; | 5096 | return; |
| 4907 | } | 5097 | } |
| 4908 | 5098 | ||
| 4909 | mpt2sas_transport_update_links(ioc, | 5099 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); |
| 4910 | le16_to_cpu(sas_device_pg0.ParentDevHandle), | 5100 | if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) |
| 4911 | handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | 5101 | mpt2sas_transport_update_links(ioc, sas_address, handle, |
| 5102 | sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | ||
| 4912 | 5103 | ||
| 4913 | _scsih_add_device(ioc, handle, 0, 1); | 5104 | _scsih_add_device(ioc, handle, 0, 1); |
| 4914 | 5105 | ||
| @@ -4948,11 +5139,17 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc, | |||
| 4948 | case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: | 5139 | case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: |
| 4949 | reason_str = "consistency check"; | 5140 | reason_str = "consistency check"; |
| 4950 | break; | 5141 | break; |
| 4951 | default: | 5142 | case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: |
| 4952 | reason_str = "unknown reason"; | 5143 | reason_str = "background init"; |
| 5144 | break; | ||
| 5145 | case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: | ||
| 5146 | reason_str = "make data consistent"; | ||
| 4953 | break; | 5147 | break; |
| 4954 | } | 5148 | } |
| 4955 | 5149 | ||
| 5150 | if (!reason_str) | ||
| 5151 | return; | ||
| 5152 | |||
| 4956 | printk(MPT2SAS_INFO_FMT "raid operational status: (%s)" | 5153 | printk(MPT2SAS_INFO_FMT "raid operational status: (%s)" |
| 4957 | "\thandle(0x%04x), percent complete(%d)\n", | 5154 | "\thandle(0x%04x), percent complete(%d)\n", |
| 4958 | ioc->name, reason_str, | 5155 | ioc->name, reason_str, |
| @@ -5252,18 +5449,23 @@ _scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, | |||
| 5252 | { | 5449 | { |
| 5253 | struct _sas_node *sas_expander; | 5450 | struct _sas_node *sas_expander; |
| 5254 | unsigned long flags; | 5451 | unsigned long flags; |
| 5452 | int i; | ||
| 5255 | 5453 | ||
| 5256 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 5454 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 5257 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { | 5455 | list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { |
| 5258 | if (sas_expander->sas_address == sas_address) { | 5456 | if (sas_expander->sas_address != sas_address) |
| 5259 | sas_expander->responding = 1; | 5457 | continue; |
| 5260 | if (sas_expander->handle != handle) { | 5458 | sas_expander->responding = 1; |
| 5261 | printk(KERN_INFO "old handle(0x%04x)\n", | 5459 | if (sas_expander->handle == handle) |
| 5262 | sas_expander->handle); | ||
| 5263 | sas_expander->handle = handle; | ||
| 5264 | } | ||
| 5265 | goto out; | 5460 | goto out; |
| 5266 | } | 5461 | printk(KERN_INFO "\texpander(0x%016llx): handle changed" |
| 5462 | " from(0x%04x) to (0x%04x)!!!\n", | ||
| 5463 | (unsigned long long)sas_expander->sas_address, | ||
| 5464 | sas_expander->handle, handle); | ||
| 5465 | sas_expander->handle = handle; | ||
| 5466 | for (i = 0 ; i < sas_expander->num_phys ; i++) | ||
| 5467 | sas_expander->phy[i].handle = handle; | ||
| 5468 | goto out; | ||
| 5267 | } | 5469 | } |
| 5268 | out: | 5470 | out: |
| 5269 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 5471 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| @@ -5340,7 +5542,9 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 5340 | (unsigned long long) | 5542 | (unsigned long long) |
| 5341 | sas_device->enclosure_logical_id, | 5543 | sas_device->enclosure_logical_id, |
| 5342 | sas_device->slot); | 5544 | sas_device->slot); |
| 5343 | _scsih_remove_device(ioc, sas_device->handle); | 5545 | /* invalidate the device handle */ |
| 5546 | sas_device->handle = 0; | ||
| 5547 | _scsih_remove_device(ioc, sas_device); | ||
| 5344 | } | 5548 | } |
| 5345 | 5549 | ||
| 5346 | list_for_each_entry_safe(raid_device, raid_device_next, | 5550 | list_for_each_entry_safe(raid_device, raid_device_next, |
| @@ -5366,7 +5570,7 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 5366 | sas_expander->responding = 0; | 5570 | sas_expander->responding = 0; |
| 5367 | continue; | 5571 | continue; |
| 5368 | } | 5572 | } |
| 5369 | _scsih_expander_remove(ioc, sas_expander->handle); | 5573 | _scsih_expander_remove(ioc, sas_expander->sas_address); |
| 5370 | goto retry_expander_search; | 5574 | goto retry_expander_search; |
| 5371 | } | 5575 | } |
| 5372 | } | 5576 | } |
| @@ -5406,7 +5610,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) | |||
| 5406 | case MPT2_IOC_DONE_RESET: | 5610 | case MPT2_IOC_DONE_RESET: |
| 5407 | dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " | 5611 | dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " |
| 5408 | "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); | 5612 | "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); |
| 5409 | _scsih_sas_host_refresh(ioc, 0); | 5613 | _scsih_sas_host_refresh(ioc); |
| 5410 | _scsih_search_responding_sas_devices(ioc); | 5614 | _scsih_search_responding_sas_devices(ioc); |
| 5411 | _scsih_search_responding_raid_devices(ioc); | 5615 | _scsih_search_responding_raid_devices(ioc); |
| 5412 | _scsih_search_responding_expanders(ioc); | 5616 | _scsih_search_responding_expanders(ioc); |
| @@ -5646,7 +5850,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | |||
| 5646 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 5850 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 5647 | if (!sas_device) | 5851 | if (!sas_device) |
| 5648 | continue; | 5852 | continue; |
| 5649 | _scsih_remove_device(ioc, sas_device->handle); | 5853 | _scsih_remove_device(ioc, sas_device); |
| 5650 | if (ioc->shost_recovery) | 5854 | if (ioc->shost_recovery) |
| 5651 | return; | 5855 | return; |
| 5652 | goto retry_device_search; | 5856 | goto retry_device_search; |
| @@ -5669,7 +5873,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | |||
| 5669 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 5873 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 5670 | if (!expander_sibling) | 5874 | if (!expander_sibling) |
| 5671 | continue; | 5875 | continue; |
| 5672 | _scsih_expander_remove(ioc, expander_sibling->handle); | 5876 | _scsih_expander_remove(ioc, |
| 5877 | expander_sibling->sas_address); | ||
| 5673 | if (ioc->shost_recovery) | 5878 | if (ioc->shost_recovery) |
| 5674 | return; | 5879 | return; |
| 5675 | goto retry_expander_search; | 5880 | goto retry_expander_search; |
| @@ -5677,7 +5882,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | |||
| 5677 | } | 5882 | } |
| 5678 | 5883 | ||
| 5679 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, | 5884 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, |
| 5680 | sas_expander->parent_handle); | 5885 | sas_expander->sas_address_parent); |
| 5681 | 5886 | ||
| 5682 | printk(MPT2SAS_INFO_FMT "expander_remove: handle" | 5887 | printk(MPT2SAS_INFO_FMT "expander_remove: handle" |
| 5683 | "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, | 5888 | "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, |
| @@ -5690,9 +5895,99 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | |||
| 5690 | } | 5895 | } |
| 5691 | 5896 | ||
| 5692 | /** | 5897 | /** |
| 5898 | * _scsih_ir_shutdown - IR shutdown notification | ||
| 5899 | * @ioc: per adapter object | ||
| 5900 | * | ||
| 5901 | * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that | ||
| 5902 | * the host system is shutting down. | ||
| 5903 | * | ||
| 5904 | * Return nothing. | ||
| 5905 | */ | ||
| 5906 | static void | ||
| 5907 | _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) | ||
| 5908 | { | ||
| 5909 | Mpi2RaidActionRequest_t *mpi_request; | ||
| 5910 | Mpi2RaidActionReply_t *mpi_reply; | ||
| 5911 | u16 smid; | ||
| 5912 | |||
| 5913 | /* is IR firmware build loaded ? */ | ||
| 5914 | if (!ioc->ir_firmware) | ||
| 5915 | return; | ||
| 5916 | |||
| 5917 | /* are there any volumes ? */ | ||
| 5918 | if (list_empty(&ioc->raid_device_list)) | ||
| 5919 | return; | ||
| 5920 | |||
| 5921 | mutex_lock(&ioc->scsih_cmds.mutex); | ||
| 5922 | |||
| 5923 | if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { | ||
| 5924 | printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n", | ||
| 5925 | ioc->name, __func__); | ||
| 5926 | goto out; | ||
| 5927 | } | ||
| 5928 | ioc->scsih_cmds.status = MPT2_CMD_PENDING; | ||
| 5929 | |||
| 5930 | smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx); | ||
| 5931 | if (!smid) { | ||
| 5932 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", | ||
| 5933 | ioc->name, __func__); | ||
| 5934 | ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; | ||
| 5935 | goto out; | ||
| 5936 | } | ||
| 5937 | |||
| 5938 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); | ||
| 5939 | ioc->scsih_cmds.smid = smid; | ||
| 5940 | memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); | ||
| 5941 | |||
| 5942 | mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; | ||
| 5943 | mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; | ||
| 5944 | |||
| 5945 | printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); | ||
| 5946 | init_completion(&ioc->scsih_cmds.done); | ||
| 5947 | mpt2sas_base_put_smid_default(ioc, smid); | ||
| 5948 | wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); | ||
| 5949 | |||
| 5950 | if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) { | ||
| 5951 | printk(MPT2SAS_ERR_FMT "%s: timeout\n", | ||
| 5952 | ioc->name, __func__); | ||
| 5953 | goto out; | ||
| 5954 | } | ||
| 5955 | |||
| 5956 | if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { | ||
| 5957 | mpi_reply = ioc->scsih_cmds.reply; | ||
| 5958 | |||
| 5959 | printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " | ||
| 5960 | "ioc_status(0x%04x), loginfo(0x%08x)\n", | ||
| 5961 | ioc->name, le16_to_cpu(mpi_reply->IOCStatus), | ||
| 5962 | le32_to_cpu(mpi_reply->IOCLogInfo)); | ||
| 5963 | } | ||
| 5964 | |||
| 5965 | out: | ||
| 5966 | ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; | ||
| 5967 | mutex_unlock(&ioc->scsih_cmds.mutex); | ||
| 5968 | } | ||
| 5969 | |||
| 5970 | /** | ||
| 5971 | * _scsih_shutdown - routine call during system shutdown | ||
| 5972 | * @pdev: PCI device struct | ||
| 5973 | * | ||
| 5974 | * Return nothing. | ||
| 5975 | */ | ||
| 5976 | static void | ||
| 5977 | _scsih_shutdown(struct pci_dev *pdev) | ||
| 5978 | { | ||
| 5979 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
| 5980 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 5981 | |||
| 5982 | _scsih_ir_shutdown(ioc); | ||
| 5983 | mpt2sas_base_detach(ioc); | ||
| 5984 | } | ||
| 5985 | |||
| 5986 | /** | ||
| 5693 | * _scsih_remove - detach and remove add host | 5987 | * _scsih_remove - detach and remove add host |
| 5694 | * @pdev: PCI device struct | 5988 | * @pdev: PCI device struct |
| 5695 | * | 5989 | * |
| 5990 | * Routine called when unloading the driver. | ||
| 5696 | * Return nothing. | 5991 | * Return nothing. |
| 5697 | */ | 5992 | */ |
| 5698 | static void __devexit | 5993 | static void __devexit |
| @@ -5726,7 +6021,7 @@ _scsih_remove(struct pci_dev *pdev) | |||
| 5726 | mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | 6021 | mpt2sas_scsih_sas_device_find_by_sas_address(ioc, |
| 5727 | mpt2sas_port->remote_identify.sas_address); | 6022 | mpt2sas_port->remote_identify.sas_address); |
| 5728 | if (sas_device) { | 6023 | if (sas_device) { |
| 5729 | _scsih_remove_device(ioc, sas_device->handle); | 6024 | _scsih_remove_device(ioc, sas_device); |
| 5730 | goto retry_again; | 6025 | goto retry_again; |
| 5731 | } | 6026 | } |
| 5732 | } else { | 6027 | } else { |
| @@ -5735,7 +6030,7 @@ _scsih_remove(struct pci_dev *pdev) | |||
| 5735 | mpt2sas_port->remote_identify.sas_address); | 6030 | mpt2sas_port->remote_identify.sas_address); |
| 5736 | if (expander_sibling) { | 6031 | if (expander_sibling) { |
| 5737 | _scsih_expander_remove(ioc, | 6032 | _scsih_expander_remove(ioc, |
| 5738 | expander_sibling->handle); | 6033 | expander_sibling->sas_address); |
| 5739 | goto retry_again; | 6034 | goto retry_again; |
| 5740 | } | 6035 | } |
| 5741 | } | 6036 | } |
| @@ -5749,7 +6044,7 @@ _scsih_remove(struct pci_dev *pdev) | |||
| 5749 | } | 6044 | } |
| 5750 | 6045 | ||
| 5751 | sas_remove_host(shost); | 6046 | sas_remove_host(shost); |
| 5752 | mpt2sas_base_detach(ioc); | 6047 | _scsih_shutdown(pdev); |
| 5753 | list_del(&ioc->list); | 6048 | list_del(&ioc->list); |
| 5754 | scsi_remove_host(shost); | 6049 | scsi_remove_host(shost); |
| 5755 | scsi_host_put(shost); | 6050 | scsi_host_put(shost); |
| @@ -5770,7 +6065,8 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 5770 | void *device; | 6065 | void *device; |
| 5771 | struct _sas_device *sas_device; | 6066 | struct _sas_device *sas_device; |
| 5772 | struct _raid_device *raid_device; | 6067 | struct _raid_device *raid_device; |
| 5773 | u16 handle, parent_handle; | 6068 | u16 handle; |
| 6069 | u64 sas_address_parent; | ||
| 5774 | u64 sas_address; | 6070 | u64 sas_address; |
| 5775 | unsigned long flags; | 6071 | unsigned long flags; |
| 5776 | int rc; | 6072 | int rc; |
| @@ -5799,17 +6095,17 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 5799 | } else { | 6095 | } else { |
| 5800 | sas_device = device; | 6096 | sas_device = device; |
| 5801 | handle = sas_device->handle; | 6097 | handle = sas_device->handle; |
| 5802 | parent_handle = sas_device->parent_handle; | 6098 | sas_address_parent = sas_device->sas_address_parent; |
| 5803 | sas_address = sas_device->sas_address; | 6099 | sas_address = sas_device->sas_address; |
| 5804 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 6100 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
| 5805 | list_move_tail(&sas_device->list, &ioc->sas_device_list); | 6101 | list_move_tail(&sas_device->list, &ioc->sas_device_list); |
| 5806 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 6102 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 5807 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, | 6103 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, |
| 5808 | sas_device->parent_handle)) { | 6104 | sas_device->sas_address_parent)) { |
| 5809 | _scsih_sas_device_remove(ioc, sas_device); | 6105 | _scsih_sas_device_remove(ioc, sas_device); |
| 5810 | } else if (!sas_device->starget) { | 6106 | } else if (!sas_device->starget) { |
| 5811 | mpt2sas_transport_port_remove(ioc, sas_address, | 6107 | mpt2sas_transport_port_remove(ioc, sas_address, |
| 5812 | parent_handle); | 6108 | sas_address_parent); |
| 5813 | _scsih_sas_device_remove(ioc, sas_device); | 6109 | _scsih_sas_device_remove(ioc, sas_device); |
| 5814 | } | 6110 | } |
| 5815 | } | 6111 | } |
| @@ -5849,8 +6145,6 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) | |||
| 5849 | { | 6145 | { |
| 5850 | struct _sas_device *sas_device, *next; | 6146 | struct _sas_device *sas_device, *next; |
| 5851 | unsigned long flags; | 6147 | unsigned long flags; |
| 5852 | u16 handle, parent_handle; | ||
| 5853 | u64 sas_address; | ||
| 5854 | 6148 | ||
| 5855 | /* SAS Device List */ | 6149 | /* SAS Device List */ |
| 5856 | list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, | 6150 | list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, |
| @@ -5859,14 +6153,13 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) | |||
| 5859 | list_move_tail(&sas_device->list, &ioc->sas_device_list); | 6153 | list_move_tail(&sas_device->list, &ioc->sas_device_list); |
| 5860 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 6154 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 5861 | 6155 | ||
| 5862 | handle = sas_device->handle; | 6156 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, |
| 5863 | parent_handle = sas_device->parent_handle; | 6157 | sas_device->sas_address_parent)) { |
| 5864 | sas_address = sas_device->sas_address; | ||
| 5865 | if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) { | ||
| 5866 | _scsih_sas_device_remove(ioc, sas_device); | 6158 | _scsih_sas_device_remove(ioc, sas_device); |
| 5867 | } else if (!sas_device->starget) { | 6159 | } else if (!sas_device->starget) { |
| 5868 | mpt2sas_transport_port_remove(ioc, sas_address, | 6160 | mpt2sas_transport_port_remove(ioc, |
| 5869 | parent_handle); | 6161 | sas_device->sas_address, |
| 6162 | sas_device->sas_address_parent); | ||
| 5870 | _scsih_sas_device_remove(ioc, sas_device); | 6163 | _scsih_sas_device_remove(ioc, sas_device); |
| 5871 | } | 6164 | } |
| 5872 | } | 6165 | } |
| @@ -5935,6 +6228,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 5935 | ioc->ctl_cb_idx = ctl_cb_idx; | 6228 | ioc->ctl_cb_idx = ctl_cb_idx; |
| 5936 | ioc->base_cb_idx = base_cb_idx; | 6229 | ioc->base_cb_idx = base_cb_idx; |
| 5937 | ioc->transport_cb_idx = transport_cb_idx; | 6230 | ioc->transport_cb_idx = transport_cb_idx; |
| 6231 | ioc->scsih_cb_idx = scsih_cb_idx; | ||
| 5938 | ioc->config_cb_idx = config_cb_idx; | 6232 | ioc->config_cb_idx = config_cb_idx; |
| 5939 | ioc->tm_tr_cb_idx = tm_tr_cb_idx; | 6233 | ioc->tm_tr_cb_idx = tm_tr_cb_idx; |
| 5940 | ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; | 6234 | ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; |
| @@ -6072,6 +6366,7 @@ static struct pci_driver scsih_driver = { | |||
| 6072 | .id_table = scsih_pci_table, | 6366 | .id_table = scsih_pci_table, |
| 6073 | .probe = _scsih_probe, | 6367 | .probe = _scsih_probe, |
| 6074 | .remove = __devexit_p(_scsih_remove), | 6368 | .remove = __devexit_p(_scsih_remove), |
| 6369 | .shutdown = _scsih_shutdown, | ||
| 6075 | #ifdef CONFIG_PM | 6370 | #ifdef CONFIG_PM |
| 6076 | .suspend = _scsih_suspend, | 6371 | .suspend = _scsih_suspend, |
| 6077 | .resume = _scsih_resume, | 6372 | .resume = _scsih_resume, |
| @@ -6113,6 +6408,9 @@ _scsih_init(void) | |||
| 6113 | transport_cb_idx = mpt2sas_base_register_callback_handler( | 6408 | transport_cb_idx = mpt2sas_base_register_callback_handler( |
| 6114 | mpt2sas_transport_done); | 6409 | mpt2sas_transport_done); |
| 6115 | 6410 | ||
| 6411 | /* scsih internal commands callback handler */ | ||
| 6412 | scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done); | ||
| 6413 | |||
| 6116 | /* configuration page API internal commands callback handler */ | 6414 | /* configuration page API internal commands callback handler */ |
| 6117 | config_cb_idx = mpt2sas_base_register_callback_handler( | 6415 | config_cb_idx = mpt2sas_base_register_callback_handler( |
| 6118 | mpt2sas_config_done); | 6416 | mpt2sas_config_done); |
| @@ -6152,6 +6450,7 @@ _scsih_exit(void) | |||
| 6152 | mpt2sas_base_release_callback_handler(tm_cb_idx); | 6450 | mpt2sas_base_release_callback_handler(tm_cb_idx); |
| 6153 | mpt2sas_base_release_callback_handler(base_cb_idx); | 6451 | mpt2sas_base_release_callback_handler(base_cb_idx); |
| 6154 | mpt2sas_base_release_callback_handler(transport_cb_idx); | 6452 | mpt2sas_base_release_callback_handler(transport_cb_idx); |
| 6453 | mpt2sas_base_release_callback_handler(scsih_cb_idx); | ||
| 6155 | mpt2sas_base_release_callback_handler(config_cb_idx); | 6454 | mpt2sas_base_release_callback_handler(config_cb_idx); |
| 6156 | mpt2sas_base_release_callback_handler(ctl_cb_idx); | 6455 | mpt2sas_base_release_callback_handler(ctl_cb_idx); |
| 6157 | 6456 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index eb98188c7f3f..3a82872bad44 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
| @@ -59,24 +59,23 @@ | |||
| 59 | 59 | ||
| 60 | #include "mpt2sas_base.h" | 60 | #include "mpt2sas_base.h" |
| 61 | /** | 61 | /** |
| 62 | * _transport_sas_node_find_by_handle - sas node search | 62 | * _transport_sas_node_find_by_sas_address - sas node search |
| 63 | * @ioc: per adapter object | 63 | * @ioc: per adapter object |
| 64 | * @handle: expander or hba handle (assigned by firmware) | 64 | * @sas_address: sas address of expander or sas host |
| 65 | * Context: Calling function should acquire ioc->sas_node_lock. | 65 | * Context: Calling function should acquire ioc->sas_node_lock. |
| 66 | * | 66 | * |
| 67 | * Search for either hba phys or expander device based on handle, then returns | 67 | * Search for either hba phys or expander device based on handle, then returns |
| 68 | * the sas_node object. | 68 | * the sas_node object. |
| 69 | */ | 69 | */ |
| 70 | static struct _sas_node * | 70 | static struct _sas_node * |
| 71 | _transport_sas_node_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) | 71 | _transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, |
| 72 | u64 sas_address) | ||
| 72 | { | 73 | { |
| 73 | int i; | 74 | if (ioc->sas_hba.sas_address == sas_address) |
| 74 | 75 | return &ioc->sas_hba; | |
| 75 | for (i = 0; i < ioc->sas_hba.num_phys; i++) | 76 | else |
| 76 | if (ioc->sas_hba.phy[i].handle == handle) | 77 | return mpt2sas_scsih_expander_find_by_sas_address(ioc, |
| 77 | return &ioc->sas_hba; | 78 | sas_address); |
| 78 | |||
| 79 | return mpt2sas_scsih_expander_find_by_handle(ioc, handle); | ||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | /** | 81 | /** |
| @@ -259,8 +258,7 @@ struct rep_manu_reply{ | |||
| 259 | u8 response_length; | 258 | u8 response_length; |
| 260 | u16 expander_change_count; | 259 | u16 expander_change_count; |
| 261 | u8 reserved0[2]; | 260 | u8 reserved0[2]; |
| 262 | u8 sas_format:1; | 261 | u8 sas_format; |
| 263 | u8 reserved1:7; | ||
| 264 | u8 reserved2[3]; | 262 | u8 reserved2[3]; |
| 265 | u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; | 263 | u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; |
| 266 | u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; | 264 | u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; |
| @@ -375,7 +373,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, | |||
| 375 | mpi_request->VP_ID = 0; | 373 | mpi_request->VP_ID = 0; |
| 376 | sas_address_le = (u64 *)&mpi_request->SASAddress; | 374 | sas_address_le = (u64 *)&mpi_request->SASAddress; |
| 377 | *sas_address_le = cpu_to_le64(sas_address); | 375 | *sas_address_le = cpu_to_le64(sas_address); |
| 378 | mpi_request->RequestDataLength = sizeof(struct rep_manu_request); | 376 | mpi_request->RequestDataLength = |
| 377 | cpu_to_le16(sizeof(struct rep_manu_request)); | ||
| 379 | psge = &mpi_request->SGL; | 378 | psge = &mpi_request->SGL; |
| 380 | 379 | ||
| 381 | /* WRITE sgel first */ | 380 | /* WRITE sgel first */ |
| @@ -438,8 +437,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, | |||
| 438 | SAS_EXPANDER_PRODUCT_ID_LEN); | 437 | SAS_EXPANDER_PRODUCT_ID_LEN); |
| 439 | strncpy(edev->product_rev, manufacture_reply->product_rev, | 438 | strncpy(edev->product_rev, manufacture_reply->product_rev, |
| 440 | SAS_EXPANDER_PRODUCT_REV_LEN); | 439 | SAS_EXPANDER_PRODUCT_REV_LEN); |
| 441 | edev->level = manufacture_reply->sas_format; | 440 | edev->level = manufacture_reply->sas_format & 1; |
| 442 | if (manufacture_reply->sas_format) { | 441 | if (edev->level) { |
| 443 | strncpy(edev->component_vendor_id, | 442 | strncpy(edev->component_vendor_id, |
| 444 | manufacture_reply->component_vendor_id, | 443 | manufacture_reply->component_vendor_id, |
| 445 | SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); | 444 | SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); |
| @@ -469,7 +468,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, | |||
| 469 | * mpt2sas_transport_port_add - insert port to the list | 468 | * mpt2sas_transport_port_add - insert port to the list |
| 470 | * @ioc: per adapter object | 469 | * @ioc: per adapter object |
| 471 | * @handle: handle of attached device | 470 | * @handle: handle of attached device |
| 472 | * @parent_handle: parent handle(either hba or expander) | 471 | * @sas_address: sas address of parent expander or sas host |
| 473 | * Context: This function will acquire ioc->sas_node_lock. | 472 | * Context: This function will acquire ioc->sas_node_lock. |
| 474 | * | 473 | * |
| 475 | * Adding new port object to the sas_node->sas_port_list. | 474 | * Adding new port object to the sas_node->sas_port_list. |
| @@ -478,7 +477,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, | |||
| 478 | */ | 477 | */ |
| 479 | struct _sas_port * | 478 | struct _sas_port * |
| 480 | mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, | 479 | mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, |
| 481 | u16 parent_handle) | 480 | u64 sas_address) |
| 482 | { | 481 | { |
| 483 | struct _sas_phy *mpt2sas_phy, *next; | 482 | struct _sas_phy *mpt2sas_phy, *next; |
| 484 | struct _sas_port *mpt2sas_port; | 483 | struct _sas_port *mpt2sas_port; |
| @@ -488,9 +487,6 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
| 488 | int i; | 487 | int i; |
| 489 | struct sas_port *port; | 488 | struct sas_port *port; |
| 490 | 489 | ||
| 491 | if (!parent_handle) | ||
| 492 | return NULL; | ||
| 493 | |||
| 494 | mpt2sas_port = kzalloc(sizeof(struct _sas_port), | 490 | mpt2sas_port = kzalloc(sizeof(struct _sas_port), |
| 495 | GFP_KERNEL); | 491 | GFP_KERNEL); |
| 496 | if (!mpt2sas_port) { | 492 | if (!mpt2sas_port) { |
| @@ -502,17 +498,16 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
| 502 | INIT_LIST_HEAD(&mpt2sas_port->port_list); | 498 | INIT_LIST_HEAD(&mpt2sas_port->port_list); |
| 503 | INIT_LIST_HEAD(&mpt2sas_port->phy_list); | 499 | INIT_LIST_HEAD(&mpt2sas_port->phy_list); |
| 504 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 500 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 505 | sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); | 501 | sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); |
| 506 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 502 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 507 | 503 | ||
| 508 | if (!sas_node) { | 504 | if (!sas_node) { |
| 509 | printk(MPT2SAS_ERR_FMT "%s: Could not find parent(0x%04x)!\n", | 505 | printk(MPT2SAS_ERR_FMT "%s: Could not find " |
| 510 | ioc->name, __func__, parent_handle); | 506 | "parent sas_address(0x%016llx)!\n", ioc->name, |
| 507 | __func__, (unsigned long long)sas_address); | ||
| 511 | goto out_fail; | 508 | goto out_fail; |
| 512 | } | 509 | } |
| 513 | 510 | ||
| 514 | mpt2sas_port->handle = parent_handle; | ||
| 515 | mpt2sas_port->sas_address = sas_node->sas_address; | ||
| 516 | if ((_transport_set_identify(ioc, handle, | 511 | if ((_transport_set_identify(ioc, handle, |
| 517 | &mpt2sas_port->remote_identify))) { | 512 | &mpt2sas_port->remote_identify))) { |
| 518 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 513 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
| @@ -604,7 +599,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
| 604 | * mpt2sas_transport_port_remove - remove port from the list | 599 | * mpt2sas_transport_port_remove - remove port from the list |
| 605 | * @ioc: per adapter object | 600 | * @ioc: per adapter object |
| 606 | * @sas_address: sas address of attached device | 601 | * @sas_address: sas address of attached device |
| 607 | * @parent_handle: handle to the upstream parent(either hba or expander) | 602 | * @sas_address_parent: sas address of parent expander or sas host |
| 608 | * Context: This function will acquire ioc->sas_node_lock. | 603 | * Context: This function will acquire ioc->sas_node_lock. |
| 609 | * | 604 | * |
| 610 | * Removing object and freeing associated memory from the | 605 | * Removing object and freeing associated memory from the |
| @@ -614,7 +609,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
| 614 | */ | 609 | */ |
| 615 | void | 610 | void |
| 616 | mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, | 611 | mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, |
| 617 | u16 parent_handle) | 612 | u64 sas_address_parent) |
| 618 | { | 613 | { |
| 619 | int i; | 614 | int i; |
| 620 | unsigned long flags; | 615 | unsigned long flags; |
| @@ -624,7 +619,8 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, | |||
| 624 | struct _sas_phy *mpt2sas_phy, *next_phy; | 619 | struct _sas_phy *mpt2sas_phy, *next_phy; |
| 625 | 620 | ||
| 626 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 621 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 627 | sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); | 622 | sas_node = _transport_sas_node_find_by_sas_address(ioc, |
| 623 | sas_address_parent); | ||
| 628 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 624 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 629 | if (!sas_node) | 625 | if (!sas_node) |
| 630 | return; | 626 | return; |
| @@ -650,8 +646,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, | |||
| 650 | &mpt2sas_port->phy_list, port_siblings) { | 646 | &mpt2sas_port->phy_list, port_siblings) { |
| 651 | if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) | 647 | if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) |
| 652 | dev_printk(KERN_INFO, &mpt2sas_port->port->dev, | 648 | dev_printk(KERN_INFO, &mpt2sas_port->port->dev, |
| 653 | "remove: parent_handle(0x%04x), " | 649 | "remove: sas_addr(0x%016llx), phy(%d)\n", |
| 654 | "sas_addr(0x%016llx), phy(%d)\n", parent_handle, | ||
| 655 | (unsigned long long) | 650 | (unsigned long long) |
| 656 | mpt2sas_port->remote_identify.sas_address, | 651 | mpt2sas_port->remote_identify.sas_address, |
| 657 | mpt2sas_phy->phy_id); | 652 | mpt2sas_phy->phy_id); |
| @@ -799,8 +794,8 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy | |||
| 799 | /** | 794 | /** |
| 800 | * mpt2sas_transport_update_links - refreshing phy link changes | 795 | * mpt2sas_transport_update_links - refreshing phy link changes |
| 801 | * @ioc: per adapter object | 796 | * @ioc: per adapter object |
| 802 | * @handle: handle to sas_host or expander | 797 | * @sas_address: sas address of parent expander or sas host |
| 803 | * @attached_handle: attached device handle | 798 | * @handle: attached device handle |
| 804 | * @phy_numberv: phy number | 799 | * @phy_numberv: phy number |
| 805 | * @link_rate: new link rate | 800 | * @link_rate: new link rate |
| 806 | * | 801 | * |
| @@ -808,28 +803,25 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy | |||
| 808 | */ | 803 | */ |
| 809 | void | 804 | void |
| 810 | mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, | 805 | mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, |
| 811 | u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate) | 806 | u64 sas_address, u16 handle, u8 phy_number, u8 link_rate) |
| 812 | { | 807 | { |
| 813 | unsigned long flags; | 808 | unsigned long flags; |
| 814 | struct _sas_node *sas_node; | 809 | struct _sas_node *sas_node; |
| 815 | struct _sas_phy *mpt2sas_phy; | 810 | struct _sas_phy *mpt2sas_phy; |
| 816 | 811 | ||
| 817 | if (ioc->shost_recovery) { | 812 | if (ioc->shost_recovery) |
| 818 | printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", | ||
| 819 | __func__, ioc->name); | ||
| 820 | return; | 813 | return; |
| 821 | } | ||
| 822 | 814 | ||
| 823 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 815 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
| 824 | sas_node = _transport_sas_node_find_by_handle(ioc, handle); | 816 | sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); |
| 825 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 817 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
| 826 | if (!sas_node) | 818 | if (!sas_node) |
| 827 | return; | 819 | return; |
| 828 | 820 | ||
| 829 | mpt2sas_phy = &sas_node->phy[phy_number]; | 821 | mpt2sas_phy = &sas_node->phy[phy_number]; |
| 830 | mpt2sas_phy->attached_handle = attached_handle; | 822 | mpt2sas_phy->attached_handle = handle; |
| 831 | if (attached_handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) | 823 | if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) |
| 832 | _transport_set_identify(ioc, mpt2sas_phy->attached_handle, | 824 | _transport_set_identify(ioc, handle, |
| 833 | &mpt2sas_phy->remote_identify); | 825 | &mpt2sas_phy->remote_identify); |
| 834 | else | 826 | else |
| 835 | memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct | 827 | memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct |
| @@ -841,13 +833,11 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, | |||
| 841 | 833 | ||
| 842 | if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) | 834 | if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) |
| 843 | dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, | 835 | dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, |
| 844 | "refresh: handle(0x%04x), sas_addr(0x%016llx),\n" | 836 | "refresh: parent sas_addr(0x%016llx),\n" |
| 845 | "\tlink_rate(0x%02x), phy(%d)\n" | 837 | "\tlink_rate(0x%02x), phy(%d)\n" |
| 846 | "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", | 838 | "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", |
| 847 | handle, (unsigned long long) | 839 | (unsigned long long)sas_address, |
| 848 | mpt2sas_phy->identify.sas_address, link_rate, | 840 | link_rate, phy_number, handle, (unsigned long long) |
| 849 | phy_number, attached_handle, | ||
| 850 | (unsigned long long) | ||
| 851 | mpt2sas_phy->remote_identify.sas_address); | 841 | mpt2sas_phy->remote_identify.sas_address); |
| 852 | } | 842 | } |
| 853 | 843 | ||
| @@ -1126,7 +1116,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
| 1126 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), | 1116 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), |
| 1127 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); | 1117 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); |
| 1128 | if (!dma_addr_out) { | 1118 | if (!dma_addr_out) { |
| 1129 | mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); | 1119 | mpt2sas_base_free_smid(ioc, smid); |
| 1130 | goto unmap; | 1120 | goto unmap; |
| 1131 | } | 1121 | } |
| 1132 | 1122 | ||
| @@ -1144,7 +1134,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
| 1144 | dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), | 1134 | dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), |
| 1145 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); | 1135 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); |
| 1146 | if (!dma_addr_in) { | 1136 | if (!dma_addr_in) { |
| 1147 | mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); | 1137 | mpt2sas_base_free_smid(ioc, smid); |
| 1148 | goto unmap; | 1138 | goto unmap; |
| 1149 | } | 1139 | } |
| 1150 | 1140 | ||
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index e3c482aa87b5..a2d569828308 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
| @@ -6495,7 +6495,7 @@ static void ncr_int_ma (struct ncb *np) | |||
| 6495 | ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids | 6495 | ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids |
| 6496 | ** bloat for such a should_not_happen situation). | 6496 | ** bloat for such a should_not_happen situation). |
| 6497 | ** In all other situation, we reset the BUS. | 6497 | ** In all other situation, we reset the BUS. |
| 6498 | ** Are these assumptions reasonnable ? (Wait and see ...) | 6498 | ** Are these assumptions reasonable ? (Wait and see ...) |
| 6499 | */ | 6499 | */ |
| 6500 | unexpected_phase: | 6500 | unexpected_phase: |
| 6501 | dsp -= 8; | 6501 | dsp -= 8; |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 2be7d5b018d2..2c98a6ee973b 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
| @@ -1419,7 +1419,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) | |||
| 1419 | nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); | 1419 | nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); |
| 1420 | /* | 1420 | /* |
| 1421 | * TODO: To be implemented improving bus master | 1421 | * TODO: To be implemented improving bus master |
| 1422 | * transfer reliablity when BMCNTERR is occurred in | 1422 | * transfer reliability when BMCNTERR is occurred in |
| 1423 | * AutoSCSI phase described in specification. | 1423 | * AutoSCSI phase described in specification. |
| 1424 | */ | 1424 | */ |
| 1425 | } | 1425 | } |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 7a117c18114c..950202a70bcf 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
| @@ -73,7 +73,8 @@ static const char *_osd_ver_desc(struct osd_request *or) | |||
| 73 | 73 | ||
| 74 | #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) | 74 | #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) |
| 75 | 75 | ||
| 76 | static int _osd_print_system_info(struct osd_dev *od, void *caps) | 76 | static int _osd_get_print_system_info(struct osd_dev *od, |
| 77 | void *caps, struct osd_dev_info *odi) | ||
| 77 | { | 78 | { |
| 78 | struct osd_request *or; | 79 | struct osd_request *or; |
| 79 | struct osd_attr get_attrs[] = { | 80 | struct osd_attr get_attrs[] = { |
| @@ -137,8 +138,12 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) | |||
| 137 | OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", | 138 | OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", |
| 138 | (char *)pFirst); | 139 | (char *)pFirst); |
| 139 | 140 | ||
| 140 | pFirst = get_attrs[a].val_ptr; | 141 | odi->osdname_len = get_attrs[a].len; |
| 141 | OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); | 142 | /* Avoid NULL for memcmp optimization 0-length is good enough */ |
| 143 | odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); | ||
| 144 | if (odi->osdname_len) | ||
| 145 | memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); | ||
| 146 | OSD_INFO("OSD_NAME [%s]\n", odi->osdname); | ||
| 142 | a++; | 147 | a++; |
| 143 | 148 | ||
| 144 | pFirst = get_attrs[a++].val_ptr; | 149 | pFirst = get_attrs[a++].val_ptr; |
| @@ -171,6 +176,14 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) | |||
| 171 | sid_dump, sizeof(sid_dump), true); | 176 | sid_dump, sizeof(sid_dump), true); |
| 172 | OSD_INFO("OSD_SYSTEM_ID(%d)\n" | 177 | OSD_INFO("OSD_SYSTEM_ID(%d)\n" |
| 173 | " [%s]\n", len, sid_dump); | 178 | " [%s]\n", len, sid_dump); |
| 179 | |||
| 180 | if (unlikely(len > sizeof(odi->systemid))) { | ||
| 181 | OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). " | ||
| 182 | "device idetification might not work\n", len); | ||
| 183 | len = sizeof(odi->systemid); | ||
| 184 | } | ||
| 185 | odi->systemid_len = len; | ||
| 186 | memcpy(odi->systemid, get_attrs[a].val_ptr, len); | ||
| 174 | a++; | 187 | a++; |
| 175 | } | 188 | } |
| 176 | out: | 189 | out: |
| @@ -178,16 +191,17 @@ out: | |||
| 178 | return ret; | 191 | return ret; |
| 179 | } | 192 | } |
| 180 | 193 | ||
| 181 | int osd_auto_detect_ver(struct osd_dev *od, void *caps) | 194 | int osd_auto_detect_ver(struct osd_dev *od, |
| 195 | void *caps, struct osd_dev_info *odi) | ||
| 182 | { | 196 | { |
| 183 | int ret; | 197 | int ret; |
| 184 | 198 | ||
| 185 | /* Auto-detect the osd version */ | 199 | /* Auto-detect the osd version */ |
| 186 | ret = _osd_print_system_info(od, caps); | 200 | ret = _osd_get_print_system_info(od, caps, odi); |
| 187 | if (ret) { | 201 | if (ret) { |
| 188 | osd_dev_set_ver(od, OSD_VER1); | 202 | osd_dev_set_ver(od, OSD_VER1); |
| 189 | OSD_DEBUG("converting to OSD1\n"); | 203 | OSD_DEBUG("converting to OSD1\n"); |
| 190 | ret = _osd_print_system_info(od, caps); | 204 | ret = _osd_get_print_system_info(od, caps, odi); |
| 191 | } | 205 | } |
| 192 | 206 | ||
| 193 | return ret; | 207 | return ret; |
| @@ -461,7 +475,8 @@ EXPORT_SYMBOL(osd_end_request); | |||
| 461 | 475 | ||
| 462 | int osd_execute_request(struct osd_request *or) | 476 | int osd_execute_request(struct osd_request *or) |
| 463 | { | 477 | { |
| 464 | return blk_execute_rq(or->request->q, NULL, or->request, 0); | 478 | return or->async_error = |
| 479 | blk_execute_rq(or->request->q, NULL, or->request, 0); | ||
| 465 | } | 480 | } |
| 466 | EXPORT_SYMBOL(osd_execute_request); | 481 | EXPORT_SYMBOL(osd_execute_request); |
| 467 | 482 | ||
| @@ -471,8 +486,12 @@ static void osd_request_async_done(struct request *req, int error) | |||
| 471 | 486 | ||
| 472 | or->async_error = error; | 487 | or->async_error = error; |
| 473 | 488 | ||
| 474 | if (error) | 489 | if (unlikely(error)) { |
| 475 | OSD_DEBUG("osd_request_async_done error recieved %d\n", error); | 490 | OSD_DEBUG("osd_request_async_done error recieved %d " |
| 491 | "errors 0x%x\n", error, req->errors); | ||
| 492 | if (!req->errors) /* don't miss out on this one */ | ||
| 493 | req->errors = error; | ||
| 494 | } | ||
| 476 | 495 | ||
| 477 | if (or->async_done) | 496 | if (or->async_done) |
| 478 | or->async_done(or, or->async_private); | 497 | or->async_done(or, or->async_private); |
| @@ -1153,6 +1172,7 @@ int osd_req_decode_get_attr_list(struct osd_request *or, | |||
| 1153 | "c=%d r=%d n=%d\n", | 1172 | "c=%d r=%d n=%d\n", |
| 1154 | cur_bytes, returned_bytes, n); | 1173 | cur_bytes, returned_bytes, n); |
| 1155 | oa->val_ptr = NULL; | 1174 | oa->val_ptr = NULL; |
| 1175 | cur_bytes = returned_bytes; /* break the caller loop */ | ||
| 1156 | break; | 1176 | break; |
| 1157 | } | 1177 | } |
| 1158 | 1178 | ||
| @@ -1436,6 +1456,15 @@ int osd_finalize_request(struct osd_request *or, | |||
| 1436 | } | 1456 | } |
| 1437 | EXPORT_SYMBOL(osd_finalize_request); | 1457 | EXPORT_SYMBOL(osd_finalize_request); |
| 1438 | 1458 | ||
| 1459 | static bool _is_osd_security_code(int code) | ||
| 1460 | { | ||
| 1461 | return (code == osd_security_audit_value_frozen) || | ||
| 1462 | (code == osd_security_working_key_frozen) || | ||
| 1463 | (code == osd_nonce_not_unique) || | ||
| 1464 | (code == osd_nonce_timestamp_out_of_range) || | ||
| 1465 | (code == osd_invalid_dataout_buffer_integrity_check_value); | ||
| 1466 | } | ||
| 1467 | |||
| 1439 | #define OSD_SENSE_PRINT1(fmt, a...) \ | 1468 | #define OSD_SENSE_PRINT1(fmt, a...) \ |
| 1440 | do { \ | 1469 | do { \ |
| 1441 | if (__cur_sense_need_output) \ | 1470 | if (__cur_sense_need_output) \ |
| @@ -1458,9 +1487,16 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1458 | #else | 1487 | #else |
| 1459 | bool __cur_sense_need_output = !silent; | 1488 | bool __cur_sense_need_output = !silent; |
| 1460 | #endif | 1489 | #endif |
| 1490 | int ret; | ||
| 1461 | 1491 | ||
| 1462 | if (!or->request->errors) | 1492 | if (likely(!or->request->errors)) { |
| 1493 | osi->out_resid = 0; | ||
| 1494 | osi->in_resid = 0; | ||
| 1463 | return 0; | 1495 | return 0; |
| 1496 | } | ||
| 1497 | |||
| 1498 | osi = osi ? : &local_osi; | ||
| 1499 | memset(osi, 0, sizeof(*osi)); | ||
| 1464 | 1500 | ||
| 1465 | ssdb = or->request->sense; | 1501 | ssdb = or->request->sense; |
| 1466 | sense_len = or->request->sense_len; | 1502 | sense_len = or->request->sense_len; |
| @@ -1468,17 +1504,15 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1468 | OSD_ERR("Block-layer returned error(0x%x) but " | 1504 | OSD_ERR("Block-layer returned error(0x%x) but " |
| 1469 | "sense_len(%u) || key(%d) is empty\n", | 1505 | "sense_len(%u) || key(%d) is empty\n", |
| 1470 | or->request->errors, sense_len, ssdb->sense_key); | 1506 | or->request->errors, sense_len, ssdb->sense_key); |
| 1471 | return -EIO; | 1507 | goto analyze; |
| 1472 | } | 1508 | } |
| 1473 | 1509 | ||
| 1474 | if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { | 1510 | if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { |
| 1475 | OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", | 1511 | OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", |
| 1476 | ssdb->response_code, sense_len); | 1512 | ssdb->response_code, sense_len); |
| 1477 | return -EIO; | 1513 | goto analyze; |
| 1478 | } | 1514 | } |
| 1479 | 1515 | ||
| 1480 | osi = osi ? : &local_osi; | ||
| 1481 | memset(osi, 0, sizeof(*osi)); | ||
| 1482 | osi->key = ssdb->sense_key; | 1516 | osi->key = ssdb->sense_key; |
| 1483 | osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); | 1517 | osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); |
| 1484 | original_sense_len = ssdb->additional_sense_length + 8; | 1518 | original_sense_len = ssdb->additional_sense_length + 8; |
| @@ -1488,9 +1522,10 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1488 | __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); | 1522 | __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); |
| 1489 | #endif | 1523 | #endif |
| 1490 | OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " | 1524 | OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " |
| 1491 | "additional_code=0x%x\n", | 1525 | "additional_code=0x%x async_error=%d errors=0x%x\n", |
| 1492 | osi->key, original_sense_len, sense_len, | 1526 | osi->key, original_sense_len, sense_len, |
| 1493 | osi->additional_code); | 1527 | osi->additional_code, or->async_error, |
| 1528 | or->request->errors); | ||
| 1494 | 1529 | ||
| 1495 | if (original_sense_len < sense_len) | 1530 | if (original_sense_len < sense_len) |
| 1496 | sense_len = original_sense_len; | 1531 | sense_len = original_sense_len; |
| @@ -1569,15 +1604,14 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1569 | { | 1604 | { |
| 1570 | struct osd_sense_attributes_data_descriptor | 1605 | struct osd_sense_attributes_data_descriptor |
| 1571 | *osadd = cur_descriptor; | 1606 | *osadd = cur_descriptor; |
| 1572 | int len = min(cur_len, sense_len); | 1607 | unsigned len = min(cur_len, sense_len); |
| 1573 | int i = 0; | ||
| 1574 | struct osd_sense_attr *pattr = osadd->sense_attrs; | 1608 | struct osd_sense_attr *pattr = osadd->sense_attrs; |
| 1575 | 1609 | ||
| 1576 | while (len < 0) { | 1610 | while (len >= sizeof(*pattr)) { |
| 1577 | u32 attr_page = be32_to_cpu(pattr->attr_page); | 1611 | u32 attr_page = be32_to_cpu(pattr->attr_page); |
| 1578 | u32 attr_id = be32_to_cpu(pattr->attr_id); | 1612 | u32 attr_id = be32_to_cpu(pattr->attr_id); |
| 1579 | 1613 | ||
| 1580 | if (i++ == 0) { | 1614 | if (!osi->attr.attr_page) { |
| 1581 | osi->attr.attr_page = attr_page; | 1615 | osi->attr.attr_page = attr_page; |
| 1582 | osi->attr.attr_id = attr_id; | 1616 | osi->attr.attr_id = attr_id; |
| 1583 | } | 1617 | } |
| @@ -1588,6 +1622,8 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1588 | bad_attr_list++; | 1622 | bad_attr_list++; |
| 1589 | max_attr--; | 1623 | max_attr--; |
| 1590 | } | 1624 | } |
| 1625 | |||
| 1626 | len -= sizeof(*pattr); | ||
| 1591 | OSD_SENSE_PRINT2( | 1627 | OSD_SENSE_PRINT2( |
| 1592 | "osd_sense_attribute_identification" | 1628 | "osd_sense_attribute_identification" |
| 1593 | "attr_page=0x%x attr_id=0x%x\n", | 1629 | "attr_page=0x%x attr_id=0x%x\n", |
| @@ -1621,7 +1657,50 @@ int osd_req_decode_sense_full(struct osd_request *or, | |||
| 1621 | cur_descriptor += cur_len; | 1657 | cur_descriptor += cur_len; |
| 1622 | } | 1658 | } |
| 1623 | 1659 | ||
| 1624 | return (osi->key > scsi_sk_recovered_error) ? -EIO : 0; | 1660 | analyze: |
| 1661 | if (!osi->key) { | ||
| 1662 | /* scsi sense is Empty, the request was never issued to target | ||
| 1663 | * linux return code might tell us what happened. | ||
| 1664 | */ | ||
| 1665 | if (or->async_error == -ENOMEM) | ||
| 1666 | osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; | ||
| 1667 | else | ||
| 1668 | osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; | ||
| 1669 | ret = or->async_error; | ||
| 1670 | } else if (osi->key <= scsi_sk_recovered_error) { | ||
| 1671 | osi->osd_err_pri = 0; | ||
| 1672 | ret = 0; | ||
| 1673 | } else if (osi->additional_code == scsi_invalid_field_in_cdb) { | ||
| 1674 | if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) { | ||
| 1675 | osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES; | ||
| 1676 | ret = -EFAULT; /* caller should recover from this */ | ||
| 1677 | } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) { | ||
| 1678 | osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND; | ||
| 1679 | ret = -ENOENT; | ||
| 1680 | } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) { | ||
| 1681 | osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS; | ||
| 1682 | ret = -EACCES; | ||
| 1683 | } else { | ||
| 1684 | osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; | ||
| 1685 | ret = -EINVAL; | ||
| 1686 | } | ||
| 1687 | } else if (osi->additional_code == osd_quota_error) { | ||
| 1688 | osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE; | ||
| 1689 | ret = -ENOSPC; | ||
| 1690 | } else if (_is_osd_security_code(osi->additional_code)) { | ||
| 1691 | osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; | ||
| 1692 | ret = -EINVAL; | ||
| 1693 | } else { | ||
| 1694 | osi->osd_err_pri = OSD_ERR_PRI_EIO; | ||
| 1695 | ret = -EIO; | ||
| 1696 | } | ||
| 1697 | |||
| 1698 | if (or->out.req) | ||
| 1699 | osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; | ||
| 1700 | if (or->in.req) | ||
| 1701 | osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; | ||
| 1702 | |||
| 1703 | return ret; | ||
| 1625 | } | 1704 | } |
| 1626 | EXPORT_SYMBOL(osd_req_decode_sense_full); | 1705 | EXPORT_SYMBOL(osd_req_decode_sense_full); |
| 1627 | 1706 | ||
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 0bdef3390902..0a90702b3d71 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c | |||
| @@ -71,8 +71,7 @@ | |||
| 71 | #define SCSI_OSD_MAX_MINOR 64 | 71 | #define SCSI_OSD_MAX_MINOR 64 |
| 72 | 72 | ||
| 73 | static const char osd_name[] = "osd"; | 73 | static const char osd_name[] = "osd"; |
| 74 | static const char *osd_version_string = "open-osd 0.1.0"; | 74 | static const char *osd_version_string = "open-osd 0.2.0"; |
| 75 | const char osd_symlink[] = "scsi_osd"; | ||
| 76 | 75 | ||
| 77 | MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); | 76 | MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); |
| 78 | MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); | 77 | MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); |
| @@ -82,15 +81,25 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD); | |||
| 82 | 81 | ||
| 83 | struct osd_uld_device { | 82 | struct osd_uld_device { |
| 84 | int minor; | 83 | int minor; |
| 85 | struct kref kref; | 84 | struct device class_dev; |
| 86 | struct cdev cdev; | 85 | struct cdev cdev; |
| 87 | struct osd_dev od; | 86 | struct osd_dev od; |
| 87 | struct osd_dev_info odi; | ||
| 88 | struct gendisk *disk; | 88 | struct gendisk *disk; |
| 89 | struct device *class_member; | ||
| 90 | }; | 89 | }; |
| 91 | 90 | ||
| 92 | static void __uld_get(struct osd_uld_device *oud); | 91 | struct osd_dev_handle { |
| 93 | static void __uld_put(struct osd_uld_device *oud); | 92 | struct osd_dev od; |
| 93 | struct file *file; | ||
| 94 | struct osd_uld_device *oud; | ||
| 95 | } ; | ||
| 96 | |||
| 97 | static DEFINE_IDA(osd_minor_ida); | ||
| 98 | |||
| 99 | static struct class osd_uld_class = { | ||
| 100 | .owner = THIS_MODULE, | ||
| 101 | .name = "scsi_osd", | ||
| 102 | }; | ||
| 94 | 103 | ||
| 95 | /* | 104 | /* |
| 96 | * Char Device operations | 105 | * Char Device operations |
| @@ -101,7 +110,7 @@ static int osd_uld_open(struct inode *inode, struct file *file) | |||
| 101 | struct osd_uld_device *oud = container_of(inode->i_cdev, | 110 | struct osd_uld_device *oud = container_of(inode->i_cdev, |
| 102 | struct osd_uld_device, cdev); | 111 | struct osd_uld_device, cdev); |
| 103 | 112 | ||
| 104 | __uld_get(oud); | 113 | get_device(&oud->class_dev); |
| 105 | /* cache osd_uld_device on file handle */ | 114 | /* cache osd_uld_device on file handle */ |
| 106 | file->private_data = oud; | 115 | file->private_data = oud; |
| 107 | OSD_DEBUG("osd_uld_open %p\n", oud); | 116 | OSD_DEBUG("osd_uld_open %p\n", oud); |
| @@ -114,7 +123,7 @@ static int osd_uld_release(struct inode *inode, struct file *file) | |||
| 114 | 123 | ||
| 115 | OSD_DEBUG("osd_uld_release %p\n", file->private_data); | 124 | OSD_DEBUG("osd_uld_release %p\n", file->private_data); |
| 116 | file->private_data = NULL; | 125 | file->private_data = NULL; |
| 117 | __uld_put(oud); | 126 | put_device(&oud->class_dev); |
| 118 | return 0; | 127 | return 0; |
| 119 | } | 128 | } |
| 120 | 129 | ||
| @@ -177,7 +186,7 @@ static const struct file_operations osd_fops = { | |||
| 177 | struct osd_dev *osduld_path_lookup(const char *name) | 186 | struct osd_dev *osduld_path_lookup(const char *name) |
| 178 | { | 187 | { |
| 179 | struct osd_uld_device *oud; | 188 | struct osd_uld_device *oud; |
| 180 | struct osd_dev *od; | 189 | struct osd_dev_handle *odh; |
| 181 | struct file *file; | 190 | struct file *file; |
| 182 | int error; | 191 | int error; |
| 183 | 192 | ||
| @@ -186,8 +195,8 @@ struct osd_dev *osduld_path_lookup(const char *name) | |||
| 186 | return ERR_PTR(-EINVAL); | 195 | return ERR_PTR(-EINVAL); |
| 187 | } | 196 | } |
| 188 | 197 | ||
| 189 | od = kzalloc(sizeof(*od), GFP_KERNEL); | 198 | odh = kzalloc(sizeof(*odh), GFP_KERNEL); |
| 190 | if (!od) | 199 | if (unlikely(!odh)) |
| 191 | return ERR_PTR(-ENOMEM); | 200 | return ERR_PTR(-ENOMEM); |
| 192 | 201 | ||
| 193 | file = filp_open(name, O_RDWR, 0); | 202 | file = filp_open(name, O_RDWR, 0); |
| @@ -203,33 +212,134 @@ struct osd_dev *osduld_path_lookup(const char *name) | |||
| 203 | 212 | ||
| 204 | oud = file->private_data; | 213 | oud = file->private_data; |
| 205 | 214 | ||
| 206 | *od = oud->od; | 215 | odh->od = oud->od; |
| 207 | od->file = file; | 216 | odh->file = file; |
| 217 | odh->oud = oud; | ||
| 208 | 218 | ||
| 209 | return od; | 219 | return &odh->od; |
| 210 | 220 | ||
| 211 | close_file: | 221 | close_file: |
| 212 | fput(file); | 222 | fput(file); |
| 213 | free_od: | 223 | free_od: |
| 214 | kfree(od); | 224 | kfree(odh); |
| 215 | return ERR_PTR(error); | 225 | return ERR_PTR(error); |
| 216 | } | 226 | } |
| 217 | EXPORT_SYMBOL(osduld_path_lookup); | 227 | EXPORT_SYMBOL(osduld_path_lookup); |
| 218 | 228 | ||
| 219 | void osduld_put_device(struct osd_dev *od) | 229 | static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len, |
| 230 | const u8 *a2, unsigned a2_len) | ||
| 220 | { | 231 | { |
| 232 | if (!a2_len) /* User string is Empty means don't care */ | ||
| 233 | return true; | ||
| 234 | |||
| 235 | if (a1_len != a2_len) | ||
| 236 | return false; | ||
| 237 | |||
| 238 | return 0 == memcmp(a1, a2, a1_len); | ||
| 239 | } | ||
| 240 | |||
| 241 | struct find_oud_t { | ||
| 242 | const struct osd_dev_info *odi; | ||
| 243 | struct device *dev; | ||
| 244 | struct osd_uld_device *oud; | ||
| 245 | } ; | ||
| 246 | |||
| 247 | int _mach_odi(struct device *dev, void *find_data) | ||
| 248 | { | ||
| 249 | struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, | ||
| 250 | class_dev); | ||
| 251 | struct find_oud_t *fot = find_data; | ||
| 252 | const struct osd_dev_info *odi = fot->odi; | ||
| 253 | |||
| 254 | if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, | ||
| 255 | odi->systemid, odi->systemid_len) && | ||
| 256 | _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, | ||
| 257 | odi->osdname, odi->osdname_len)) { | ||
| 258 | OSD_DEBUG("found device sysid_len=%d osdname=%d\n", | ||
| 259 | odi->systemid_len, odi->osdname_len); | ||
| 260 | fot->oud = oud; | ||
| 261 | return 1; | ||
| 262 | } else { | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | } | ||
| 266 | |||
| 267 | /* osduld_info_lookup - Loop through all devices, return the requested osd_dev. | ||
| 268 | * | ||
| 269 | * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't | ||
| 270 | * care. .e.g if they're both zero /dev/osd0 is returned. | ||
| 271 | */ | ||
| 272 | struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi) | ||
| 273 | { | ||
| 274 | struct find_oud_t find = {.odi = odi}; | ||
| 275 | |||
| 276 | find.dev = class_find_device(&osd_uld_class, NULL, &find, _mach_odi); | ||
| 277 | if (likely(find.dev)) { | ||
| 278 | struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL); | ||
| 279 | |||
| 280 | if (unlikely(!odh)) { | ||
| 281 | put_device(find.dev); | ||
| 282 | return ERR_PTR(-ENOMEM); | ||
| 283 | } | ||
| 221 | 284 | ||
| 285 | odh->od = find.oud->od; | ||
| 286 | odh->oud = find.oud; | ||
| 287 | |||
| 288 | return &odh->od; | ||
| 289 | } | ||
| 290 | |||
| 291 | return ERR_PTR(-ENODEV); | ||
| 292 | } | ||
| 293 | EXPORT_SYMBOL(osduld_info_lookup); | ||
| 294 | |||
| 295 | void osduld_put_device(struct osd_dev *od) | ||
| 296 | { | ||
| 222 | if (od && !IS_ERR(od)) { | 297 | if (od && !IS_ERR(od)) { |
| 223 | struct osd_uld_device *oud = od->file->private_data; | 298 | struct osd_dev_handle *odh = |
| 299 | container_of(od, struct osd_dev_handle, od); | ||
| 300 | struct osd_uld_device *oud = odh->oud; | ||
| 224 | 301 | ||
| 225 | BUG_ON(od->scsi_device != oud->od.scsi_device); | 302 | BUG_ON(od->scsi_device != oud->od.scsi_device); |
| 226 | 303 | ||
| 227 | fput(od->file); | 304 | /* If scsi has released the device (logout), and exofs has last |
| 228 | kfree(od); | 305 | * reference on oud it will be freed by above osd_uld_release |
| 306 | * within fput below. But this will oops in cdev_release which | ||
| 307 | * is called after the fops->release. A get_/put_ pair makes | ||
| 308 | * sure we have a cdev for the duration of fput | ||
| 309 | */ | ||
| 310 | if (odh->file) { | ||
| 311 | get_device(&oud->class_dev); | ||
| 312 | fput(odh->file); | ||
| 313 | } | ||
| 314 | put_device(&oud->class_dev); | ||
| 315 | kfree(odh); | ||
| 229 | } | 316 | } |
| 230 | } | 317 | } |
| 231 | EXPORT_SYMBOL(osduld_put_device); | 318 | EXPORT_SYMBOL(osduld_put_device); |
| 232 | 319 | ||
| 320 | const struct osd_dev_info *osduld_device_info(struct osd_dev *od) | ||
| 321 | { | ||
| 322 | struct osd_dev_handle *odh = | ||
| 323 | container_of(od, struct osd_dev_handle, od); | ||
| 324 | return &odh->oud->odi; | ||
| 325 | } | ||
| 326 | EXPORT_SYMBOL(osduld_device_info); | ||
| 327 | |||
| 328 | bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi) | ||
| 329 | { | ||
| 330 | struct osd_dev_handle *odh = | ||
| 331 | container_of(od, struct osd_dev_handle, od); | ||
| 332 | struct osd_uld_device *oud = odh->oud; | ||
| 333 | |||
| 334 | return (oud->odi.systemid_len == odi->systemid_len) && | ||
| 335 | _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, | ||
| 336 | odi->systemid, odi->systemid_len) && | ||
| 337 | (oud->odi.osdname_len == odi->osdname_len) && | ||
| 338 | _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, | ||
| 339 | odi->osdname, odi->osdname_len); | ||
| 340 | } | ||
| 341 | EXPORT_SYMBOL(osduld_device_same); | ||
| 342 | |||
| 233 | /* | 343 | /* |
| 234 | * Scsi Device operations | 344 | * Scsi Device operations |
| 235 | */ | 345 | */ |
| @@ -250,14 +360,35 @@ static int __detect_osd(struct osd_uld_device *oud) | |||
| 250 | OSD_ERR("warning: scsi_test_unit_ready failed\n"); | 360 | OSD_ERR("warning: scsi_test_unit_ready failed\n"); |
| 251 | 361 | ||
| 252 | osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true); | 362 | osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true); |
| 253 | if (osd_auto_detect_ver(&oud->od, caps)) | 363 | if (osd_auto_detect_ver(&oud->od, caps, &oud->odi)) |
| 254 | return -ENODEV; | 364 | return -ENODEV; |
| 255 | 365 | ||
| 256 | return 0; | 366 | return 0; |
| 257 | } | 367 | } |
| 258 | 368 | ||
| 259 | static struct class *osd_sysfs_class; | 369 | static void __remove(struct device *dev) |
| 260 | static DEFINE_IDA(osd_minor_ida); | 370 | { |
| 371 | struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, | ||
| 372 | class_dev); | ||
| 373 | struct scsi_device *scsi_device = oud->od.scsi_device; | ||
| 374 | |||
| 375 | kfree(oud->odi.osdname); | ||
| 376 | |||
| 377 | if (oud->cdev.owner) | ||
| 378 | cdev_del(&oud->cdev); | ||
| 379 | |||
| 380 | osd_dev_fini(&oud->od); | ||
| 381 | scsi_device_put(scsi_device); | ||
| 382 | |||
| 383 | OSD_INFO("osd_remove %s\n", | ||
| 384 | oud->disk ? oud->disk->disk_name : NULL); | ||
| 385 | |||
| 386 | if (oud->disk) | ||
| 387 | put_disk(oud->disk); | ||
| 388 | ida_remove(&osd_minor_ida, oud->minor); | ||
| 389 | |||
| 390 | kfree(oud); | ||
| 391 | } | ||
| 261 | 392 | ||
| 262 | static int osd_probe(struct device *dev) | 393 | static int osd_probe(struct device *dev) |
| 263 | { | 394 | { |
| @@ -289,7 +420,6 @@ static int osd_probe(struct device *dev) | |||
| 289 | if (NULL == oud) | 420 | if (NULL == oud) |
| 290 | goto err_retract_minor; | 421 | goto err_retract_minor; |
| 291 | 422 | ||
| 292 | kref_init(&oud->kref); | ||
| 293 | dev_set_drvdata(dev, oud); | 423 | dev_set_drvdata(dev, oud); |
| 294 | oud->minor = minor; | 424 | oud->minor = minor; |
| 295 | 425 | ||
| @@ -327,18 +457,25 @@ static int osd_probe(struct device *dev) | |||
| 327 | OSD_ERR("cdev_add failed\n"); | 457 | OSD_ERR("cdev_add failed\n"); |
| 328 | goto err_put_disk; | 458 | goto err_put_disk; |
| 329 | } | 459 | } |
| 330 | kobject_get(&oud->cdev.kobj); /* 2nd ref see osd_remove() */ | 460 | |
| 331 | 461 | /* class device member */ | |
| 332 | /* class_member */ | 462 | oud->class_dev.devt = oud->cdev.dev; |
| 333 | oud->class_member = device_create(osd_sysfs_class, dev, | 463 | oud->class_dev.class = &osd_uld_class; |
| 334 | MKDEV(SCSI_OSD_MAJOR, oud->minor), "%s", disk->disk_name); | 464 | oud->class_dev.parent = dev; |
| 335 | if (IS_ERR(oud->class_member)) { | 465 | oud->class_dev.release = __remove; |
| 336 | OSD_ERR("class_device_create failed\n"); | 466 | error = dev_set_name(&oud->class_dev, disk->disk_name); |
| 337 | error = PTR_ERR(oud->class_member); | 467 | if (error) { |
| 468 | OSD_ERR("dev_set_name failed => %d\n", error); | ||
| 469 | goto err_put_cdev; | ||
| 470 | } | ||
| 471 | |||
| 472 | error = device_register(&oud->class_dev); | ||
| 473 | if (error) { | ||
| 474 | OSD_ERR("device_register failed => %d\n", error); | ||
| 338 | goto err_put_cdev; | 475 | goto err_put_cdev; |
| 339 | } | 476 | } |
| 340 | 477 | ||
| 341 | dev_set_drvdata(oud->class_member, oud); | 478 | get_device(&oud->class_dev); |
| 342 | 479 | ||
| 343 | OSD_INFO("osd_probe %s\n", disk->disk_name); | 480 | OSD_INFO("osd_probe %s\n", disk->disk_name); |
| 344 | return 0; | 481 | return 0; |
| @@ -367,54 +504,12 @@ static int osd_remove(struct device *dev) | |||
| 367 | scsi_device); | 504 | scsi_device); |
| 368 | } | 505 | } |
| 369 | 506 | ||
| 370 | if (oud->class_member) | 507 | device_unregister(&oud->class_dev); |
| 371 | device_destroy(osd_sysfs_class, | ||
| 372 | MKDEV(SCSI_OSD_MAJOR, oud->minor)); | ||
| 373 | 508 | ||
| 374 | /* We have 2 references to the cdev. One is released here | 509 | put_device(&oud->class_dev); |
| 375 | * and also takes down the /dev/osdX mapping. The second | ||
| 376 | * Will be released in __remove() after all users have released | ||
| 377 | * the osd_uld_device. | ||
| 378 | */ | ||
| 379 | if (oud->cdev.owner) | ||
| 380 | cdev_del(&oud->cdev); | ||
| 381 | |||
| 382 | __uld_put(oud); | ||
| 383 | return 0; | 510 | return 0; |
| 384 | } | 511 | } |
| 385 | 512 | ||
| 386 | static void __remove(struct kref *kref) | ||
| 387 | { | ||
| 388 | struct osd_uld_device *oud = container_of(kref, | ||
| 389 | struct osd_uld_device, kref); | ||
| 390 | struct scsi_device *scsi_device = oud->od.scsi_device; | ||
| 391 | |||
| 392 | /* now let delete the char_dev */ | ||
| 393 | kobject_put(&oud->cdev.kobj); | ||
| 394 | |||
| 395 | osd_dev_fini(&oud->od); | ||
| 396 | scsi_device_put(scsi_device); | ||
| 397 | |||
| 398 | OSD_INFO("osd_remove %s\n", | ||
| 399 | oud->disk ? oud->disk->disk_name : NULL); | ||
| 400 | |||
| 401 | if (oud->disk) | ||
| 402 | put_disk(oud->disk); | ||
| 403 | |||
| 404 | ida_remove(&osd_minor_ida, oud->minor); | ||
| 405 | kfree(oud); | ||
| 406 | } | ||
| 407 | |||
| 408 | static void __uld_get(struct osd_uld_device *oud) | ||
| 409 | { | ||
| 410 | kref_get(&oud->kref); | ||
| 411 | } | ||
| 412 | |||
| 413 | static void __uld_put(struct osd_uld_device *oud) | ||
| 414 | { | ||
| 415 | kref_put(&oud->kref, __remove); | ||
| 416 | } | ||
| 417 | |||
| 418 | /* | 513 | /* |
| 419 | * Global driver and scsi registration | 514 | * Global driver and scsi registration |
| 420 | */ | 515 | */ |
| @@ -432,11 +527,10 @@ static int __init osd_uld_init(void) | |||
| 432 | { | 527 | { |
| 433 | int err; | 528 | int err; |
| 434 | 529 | ||
| 435 | osd_sysfs_class = class_create(THIS_MODULE, osd_symlink); | 530 | err = class_register(&osd_uld_class); |
| 436 | if (IS_ERR(osd_sysfs_class)) { | 531 | if (err) { |
| 437 | OSD_ERR("Unable to register sysfs class => %ld\n", | 532 | OSD_ERR("Unable to register sysfs class => %d\n", err); |
| 438 | PTR_ERR(osd_sysfs_class)); | 533 | return err; |
| 439 | return PTR_ERR(osd_sysfs_class); | ||
| 440 | } | 534 | } |
| 441 | 535 | ||
| 442 | err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), | 536 | err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), |
| @@ -459,7 +553,7 @@ static int __init osd_uld_init(void) | |||
| 459 | err_out_chrdev: | 553 | err_out_chrdev: |
| 460 | unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); | 554 | unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); |
| 461 | err_out: | 555 | err_out: |
| 462 | class_destroy(osd_sysfs_class); | 556 | class_unregister(&osd_uld_class); |
| 463 | return err; | 557 | return err; |
| 464 | } | 558 | } |
| 465 | 559 | ||
| @@ -467,7 +561,7 @@ static void __exit osd_uld_exit(void) | |||
| 467 | { | 561 | { |
| 468 | scsi_unregister_driver(&osd_driver.gendrv); | 562 | scsi_unregister_driver(&osd_driver.gendrv); |
| 469 | unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); | 563 | unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); |
| 470 | class_destroy(osd_sysfs_class); | 564 | class_unregister(&osd_uld_class); |
| 471 | OSD_INFO("UNLOADED %s\n", osd_version_string); | 565 | OSD_INFO("UNLOADED %s\n", osd_version_string); |
| 472 | } | 566 | } |
| 473 | 567 | ||
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile new file mode 100644 index 000000000000..52f04296171c --- /dev/null +++ b/drivers/scsi/pm8001/Makefile | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | # | ||
| 2 | # Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver | ||
| 3 | # | ||
| 4 | # Copyright (C) 2008-2009 USI Co., Ltd. | ||
| 5 | |||
| 6 | |||
| 7 | obj-$(CONFIG_SCSI_PM8001) += pm8001.o | ||
| 8 | pm8001-y += pm8001_init.o \ | ||
| 9 | pm8001_sas.o \ | ||
| 10 | pm8001_ctl.o \ | ||
| 11 | pm8001_hwi.o | ||
| 12 | |||
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h new file mode 100644 index 000000000000..4efa4d0950e5 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_chips.h | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #ifndef _PM8001_CHIPS_H_ | ||
| 42 | #define _PM8001_CHIPS_H_ | ||
| 43 | |||
| 44 | static inline u32 pm8001_read_32(void *virt_addr) | ||
| 45 | { | ||
| 46 | return *((u32 *)virt_addr); | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void pm8001_write_32(void *addr, u32 offset, u32 val) | ||
| 50 | { | ||
| 51 | *((u32 *)(addr + offset)) = val; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar, | ||
| 55 | u32 offset) | ||
| 56 | { | ||
| 57 | return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset); | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar, | ||
| 61 | u32 addr, u32 val) | ||
| 62 | { | ||
| 63 | writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr); | ||
| 64 | } | ||
| 65 | static inline u32 pm8001_mr32(void __iomem *addr, u32 offset) | ||
| 66 | { | ||
| 67 | return readl(addr + offset); | ||
| 68 | } | ||
| 69 | static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val) | ||
| 70 | { | ||
| 71 | writel(val, addr + offset); | ||
| 72 | } | ||
| 73 | static inline u32 get_pci_bar_index(u32 pcibar) | ||
| 74 | { | ||
| 75 | switch (pcibar) { | ||
| 76 | case 0x18: | ||
| 77 | case 0x1C: | ||
| 78 | return 1; | ||
| 79 | case 0x20: | ||
| 80 | return 2; | ||
| 81 | case 0x24: | ||
| 82 | return 3; | ||
| 83 | default: | ||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | #endif /* _PM8001_CHIPS_H_ */ | ||
| 89 | |||
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c new file mode 100644 index 000000000000..14b13acae6dd --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.c | |||
| @@ -0,0 +1,573 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | #include <linux/firmware.h> | ||
| 41 | #include "pm8001_sas.h" | ||
| 42 | #include "pm8001_ctl.h" | ||
| 43 | |||
| 44 | /* scsi host attributes */ | ||
| 45 | |||
| 46 | /** | ||
| 47 | * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number | ||
| 48 | * @cdev: pointer to embedded class device | ||
| 49 | * @buf: the buffer returned | ||
| 50 | * | ||
| 51 | * A sysfs 'read-only' shost attribute. | ||
| 52 | */ | ||
| 53 | static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, | ||
| 54 | struct device_attribute *attr, char *buf) | ||
| 55 | { | ||
| 56 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 57 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 58 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 59 | |||
| 60 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
| 61 | pm8001_ha->main_cfg_tbl.interface_rev); | ||
| 62 | } | ||
| 63 | static | ||
| 64 | DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); | ||
| 65 | |||
| 66 | /** | ||
| 67 | * pm8001_ctl_fw_version_show - firmware version | ||
| 68 | * @cdev: pointer to embedded class device | ||
| 69 | * @buf: the buffer returned | ||
| 70 | * | ||
| 71 | * A sysfs 'read-only' shost attribute. | ||
| 72 | */ | ||
| 73 | static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, | ||
| 74 | struct device_attribute *attr, char *buf) | ||
| 75 | { | ||
| 76 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 77 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 78 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 79 | |||
| 80 | return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", | ||
| 81 | (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), | ||
| 82 | (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), | ||
| 83 | (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), | ||
| 84 | (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); | ||
| 85 | } | ||
| 86 | static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); | ||
| 87 | /** | ||
| 88 | * pm8001_ctl_max_out_io_show - max outstanding io supported | ||
| 89 | * @cdev: pointer to embedded class device | ||
| 90 | * @buf: the buffer returned | ||
| 91 | * | ||
| 92 | * A sysfs 'read-only' shost attribute. | ||
| 93 | */ | ||
| 94 | static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, | ||
| 95 | struct device_attribute *attr, char *buf) | ||
| 96 | { | ||
| 97 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 98 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 99 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 100 | |||
| 101 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
| 102 | pm8001_ha->main_cfg_tbl.max_out_io); | ||
| 103 | } | ||
| 104 | static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); | ||
| 105 | /** | ||
| 106 | * pm8001_ctl_max_devices_show - max devices support | ||
| 107 | * @cdev: pointer to embedded class device | ||
| 108 | * @buf: the buffer returned | ||
| 109 | * | ||
| 110 | * A sysfs 'read-only' shost attribute. | ||
| 111 | */ | ||
| 112 | static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, | ||
| 113 | struct device_attribute *attr, char *buf) | ||
| 114 | { | ||
| 115 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 116 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 117 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 118 | |||
| 119 | return snprintf(buf, PAGE_SIZE, "%04d\n", | ||
| 120 | (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); | ||
| 121 | } | ||
| 122 | static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); | ||
| 123 | /** | ||
| 124 | * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no | ||
| 125 | * hardware limitation | ||
| 126 | * @cdev: pointer to embedded class device | ||
| 127 | * @buf: the buffer returned | ||
| 128 | * | ||
| 129 | * A sysfs 'read-only' shost attribute. | ||
| 130 | */ | ||
| 131 | static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, | ||
| 132 | struct device_attribute *attr, char *buf) | ||
| 133 | { | ||
| 134 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 135 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 136 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 137 | |||
| 138 | return snprintf(buf, PAGE_SIZE, "%04d\n", | ||
| 139 | pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); | ||
| 140 | } | ||
| 141 | static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); | ||
| 142 | |||
| 143 | #define SAS_1_0 0x1 | ||
| 144 | #define SAS_1_1 0x2 | ||
| 145 | #define SAS_2_0 0x4 | ||
| 146 | |||
| 147 | static ssize_t | ||
| 148 | show_sas_spec_support_status(unsigned int mode, char *buf) | ||
| 149 | { | ||
| 150 | ssize_t len = 0; | ||
| 151 | |||
| 152 | if (mode & SAS_1_1) | ||
| 153 | len = sprintf(buf, "%s", "SAS1.1"); | ||
| 154 | if (mode & SAS_2_0) | ||
| 155 | len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0"); | ||
| 156 | len += sprintf(buf + len, "\n"); | ||
| 157 | |||
| 158 | return len; | ||
| 159 | } | ||
| 160 | |||
| 161 | /** | ||
| 162 | * pm8001_ctl_sas_spec_support_show - sas spec supported | ||
| 163 | * @cdev: pointer to embedded class device | ||
| 164 | * @buf: the buffer returned | ||
| 165 | * | ||
| 166 | * A sysfs 'read-only' shost attribute. | ||
| 167 | */ | ||
| 168 | static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, | ||
| 169 | struct device_attribute *attr, char *buf) | ||
| 170 | { | ||
| 171 | unsigned int mode; | ||
| 172 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 173 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 174 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 175 | mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; | ||
| 176 | return show_sas_spec_support_status(mode, buf); | ||
| 177 | } | ||
| 178 | static DEVICE_ATTR(sas_spec_support, S_IRUGO, | ||
| 179 | pm8001_ctl_sas_spec_support_show, NULL); | ||
| 180 | |||
| 181 | /** | ||
| 182 | * pm8001_ctl_sas_address_show - sas address | ||
| 183 | * @cdev: pointer to embedded class device | ||
| 184 | * @buf: the buffer returned | ||
| 185 | * | ||
| 186 | * This is the controller sas address | ||
| 187 | * | ||
| 188 | * A sysfs 'read-only' shost attribute. | ||
| 189 | */ | ||
| 190 | static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev, | ||
| 191 | struct device_attribute *attr, char *buf) | ||
| 192 | { | ||
| 193 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 194 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 195 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 196 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | ||
| 197 | be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); | ||
| 198 | } | ||
| 199 | static DEVICE_ATTR(host_sas_address, S_IRUGO, | ||
| 200 | pm8001_ctl_host_sas_address_show, NULL); | ||
| 201 | |||
| 202 | /** | ||
| 203 | * pm8001_ctl_logging_level_show - logging level | ||
| 204 | * @cdev: pointer to embedded class device | ||
| 205 | * @buf: the buffer returned | ||
| 206 | * | ||
| 207 | * A sysfs 'read/write' shost attribute. | ||
| 208 | */ | ||
| 209 | static ssize_t pm8001_ctl_logging_level_show(struct device *cdev, | ||
| 210 | struct device_attribute *attr, char *buf) | ||
| 211 | { | ||
| 212 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 213 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 214 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 215 | |||
| 216 | return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level); | ||
| 217 | } | ||
| 218 | static ssize_t pm8001_ctl_logging_level_store(struct device *cdev, | ||
| 219 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 220 | { | ||
| 221 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 222 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 223 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 224 | int val = 0; | ||
| 225 | |||
| 226 | if (sscanf(buf, "%x", &val) != 1) | ||
| 227 | return -EINVAL; | ||
| 228 | |||
| 229 | pm8001_ha->logging_level = val; | ||
| 230 | return strlen(buf); | ||
| 231 | } | ||
| 232 | |||
| 233 | static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, | ||
| 234 | pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store); | ||
| 235 | /** | ||
| 236 | * pm8001_ctl_aap_log_show - aap1 event log | ||
| 237 | * @cdev: pointer to embedded class device | ||
| 238 | * @buf: the buffer returned | ||
| 239 | * | ||
| 240 | * A sysfs 'read-only' shost attribute. | ||
| 241 | */ | ||
| 242 | static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, | ||
| 243 | struct device_attribute *attr, char *buf) | ||
| 244 | { | ||
| 245 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 246 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 247 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 248 | int i; | ||
| 249 | #define AAP1_MEMMAP(r, c) \ | ||
| 250 | (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \ | ||
| 251 | + (c))) | ||
| 252 | |||
| 253 | char *str = buf; | ||
| 254 | int max = 2; | ||
| 255 | for (i = 0; i < max; i++) { | ||
| 256 | str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" | ||
| 257 | "0x%08x 0x%08x\n", | ||
| 258 | AAP1_MEMMAP(i, 0), | ||
| 259 | AAP1_MEMMAP(i, 4), | ||
| 260 | AAP1_MEMMAP(i, 8), | ||
| 261 | AAP1_MEMMAP(i, 12), | ||
| 262 | AAP1_MEMMAP(i, 16), | ||
| 263 | AAP1_MEMMAP(i, 20), | ||
| 264 | AAP1_MEMMAP(i, 24), | ||
| 265 | AAP1_MEMMAP(i, 28)); | ||
| 266 | } | ||
| 267 | |||
| 268 | return str - buf; | ||
| 269 | } | ||
| 270 | static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); | ||
| 271 | /** | ||
| 272 | * pm8001_ctl_aap_log_show - IOP event log | ||
| 273 | * @cdev: pointer to embedded class device | ||
| 274 | * @buf: the buffer returned | ||
| 275 | * | ||
| 276 | * A sysfs 'read-only' shost attribute. | ||
| 277 | */ | ||
| 278 | static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, | ||
| 279 | struct device_attribute *attr, char *buf) | ||
| 280 | { | ||
| 281 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 282 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 283 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 284 | #define IOP_MEMMAP(r, c) \ | ||
| 285 | (*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \ | ||
| 286 | + (c))) | ||
| 287 | int i; | ||
| 288 | char *str = buf; | ||
| 289 | int max = 2; | ||
| 290 | for (i = 0; i < max; i++) { | ||
| 291 | str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" | ||
| 292 | "0x%08x 0x%08x\n", | ||
| 293 | IOP_MEMMAP(i, 0), | ||
| 294 | IOP_MEMMAP(i, 4), | ||
| 295 | IOP_MEMMAP(i, 8), | ||
| 296 | IOP_MEMMAP(i, 12), | ||
| 297 | IOP_MEMMAP(i, 16), | ||
| 298 | IOP_MEMMAP(i, 20), | ||
| 299 | IOP_MEMMAP(i, 24), | ||
| 300 | IOP_MEMMAP(i, 28)); | ||
| 301 | } | ||
| 302 | |||
| 303 | return str - buf; | ||
| 304 | } | ||
| 305 | static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); | ||
| 306 | |||
| 307 | #define FLASH_CMD_NONE 0x00 | ||
| 308 | #define FLASH_CMD_UPDATE 0x01 | ||
| 309 | #define FLASH_CMD_SET_NVMD 0x02 | ||
| 310 | |||
| 311 | struct flash_command { | ||
| 312 | u8 command[8]; | ||
| 313 | int code; | ||
| 314 | }; | ||
| 315 | |||
| 316 | static struct flash_command flash_command_table[] = | ||
| 317 | { | ||
| 318 | {"set_nvmd", FLASH_CMD_SET_NVMD}, | ||
| 319 | {"update", FLASH_CMD_UPDATE}, | ||
| 320 | {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ | ||
| 321 | }; | ||
| 322 | |||
| 323 | struct error_fw { | ||
| 324 | char *reason; | ||
| 325 | int err_code; | ||
| 326 | }; | ||
| 327 | |||
| 328 | static struct error_fw flash_error_table[] = | ||
| 329 | { | ||
| 330 | {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE}, | ||
| 331 | {"image header mismatch", FLASH_UPDATE_HDR_ERR}, | ||
| 332 | {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR}, | ||
| 333 | {"image CRC Error", FLASH_UPDATE_CRC_ERR}, | ||
| 334 | {"image length Error.", FLASH_UPDATE_LENGTH_ERR}, | ||
| 335 | {"Failed to program flash chip", FLASH_UPDATE_HW_ERR}, | ||
| 336 | {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED}, | ||
| 337 | {"Flash update disabled.", FLASH_UPDATE_DISABLED}, | ||
| 338 | {"Flash in progress", FLASH_IN_PROGRESS}, | ||
| 339 | {"Image file size Error", FAIL_FILE_SIZE}, | ||
| 340 | {"Input parameter error", FAIL_PARAMETERS}, | ||
| 341 | {"Out of memory", FAIL_OUT_MEMORY}, | ||
| 342 | {"OK", 0} /* Last entry err_code = 0. */ | ||
| 343 | }; | ||
| 344 | |||
| 345 | static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) | ||
| 346 | { | ||
| 347 | struct pm8001_ioctl_payload *payload; | ||
| 348 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 349 | u8 *ioctlbuffer = NULL; | ||
| 350 | u32 length = 0; | ||
| 351 | u32 ret = 0; | ||
| 352 | |||
| 353 | length = 1024 * 5 + sizeof(*payload) - 1; | ||
| 354 | ioctlbuffer = kzalloc(length, GFP_KERNEL); | ||
| 355 | if (!ioctlbuffer) | ||
| 356 | return -ENOMEM; | ||
| 357 | if ((pm8001_ha->fw_image->size <= 0) || | ||
| 358 | (pm8001_ha->fw_image->size > 4096)) { | ||
| 359 | ret = FAIL_FILE_SIZE; | ||
| 360 | goto out; | ||
| 361 | } | ||
| 362 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; | ||
| 363 | memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, | ||
| 364 | pm8001_ha->fw_image->size); | ||
| 365 | payload->length = pm8001_ha->fw_image->size; | ||
| 366 | payload->id = 0; | ||
| 367 | pm8001_ha->nvmd_completion = &completion; | ||
| 368 | ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); | ||
| 369 | wait_for_completion(&completion); | ||
| 370 | out: | ||
| 371 | kfree(ioctlbuffer); | ||
| 372 | return ret; | ||
| 373 | } | ||
| 374 | |||
| 375 | static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) | ||
| 376 | { | ||
| 377 | struct pm8001_ioctl_payload *payload; | ||
| 378 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 379 | u8 *ioctlbuffer = NULL; | ||
| 380 | u32 length = 0; | ||
| 381 | struct fw_control_info *fwControl; | ||
| 382 | u32 loopNumber, loopcount = 0; | ||
| 383 | u32 sizeRead = 0; | ||
| 384 | u32 partitionSize, partitionSizeTmp; | ||
| 385 | u32 ret = 0; | ||
| 386 | u32 partitionNumber = 0; | ||
| 387 | struct pm8001_fw_image_header *image_hdr; | ||
| 388 | |||
| 389 | length = 1024 * 16 + sizeof(*payload) - 1; | ||
| 390 | ioctlbuffer = kzalloc(length, GFP_KERNEL); | ||
| 391 | image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; | ||
| 392 | if (!ioctlbuffer) | ||
| 393 | return -ENOMEM; | ||
| 394 | if (pm8001_ha->fw_image->size < 28) { | ||
| 395 | ret = FAIL_FILE_SIZE; | ||
| 396 | goto out; | ||
| 397 | } | ||
| 398 | |||
| 399 | while (sizeRead < pm8001_ha->fw_image->size) { | ||
| 400 | partitionSizeTmp = | ||
| 401 | *(u32 *)((u8 *)&image_hdr->image_length + sizeRead); | ||
| 402 | partitionSize = be32_to_cpu(partitionSizeTmp); | ||
| 403 | loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE; | ||
| 404 | if (loopcount % IOCTL_BUF_SIZE) | ||
| 405 | loopcount++; | ||
| 406 | if (loopcount == 0) | ||
| 407 | loopcount++; | ||
| 408 | for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { | ||
| 409 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; | ||
| 410 | payload->length = 1024*16; | ||
| 411 | payload->id = 0; | ||
| 412 | fwControl = | ||
| 413 | (struct fw_control_info *)payload->func_specific; | ||
| 414 | fwControl->len = IOCTL_BUF_SIZE; /* IN */ | ||
| 415 | fwControl->size = partitionSize + HEADER_LEN;/* IN */ | ||
| 416 | fwControl->retcode = 0;/* OUT */ | ||
| 417 | fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */ | ||
| 418 | |||
| 419 | /* for the last chunk of data in case file size is not even with | ||
| 420 | 4k, load only the rest*/ | ||
| 421 | if (((loopcount-loopNumber) == 1) && | ||
| 422 | ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) { | ||
| 423 | fwControl->len = | ||
| 424 | (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; | ||
| 425 | memcpy((u8 *)fwControl->buffer, | ||
| 426 | (u8 *)pm8001_ha->fw_image->data + sizeRead, | ||
| 427 | (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE); | ||
| 428 | sizeRead += | ||
| 429 | (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; | ||
| 430 | } else { | ||
| 431 | memcpy((u8 *)fwControl->buffer, | ||
| 432 | (u8 *)pm8001_ha->fw_image->data + sizeRead, | ||
| 433 | IOCTL_BUF_SIZE); | ||
| 434 | sizeRead += IOCTL_BUF_SIZE; | ||
| 435 | } | ||
| 436 | |||
| 437 | pm8001_ha->nvmd_completion = &completion; | ||
| 438 | ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); | ||
| 439 | wait_for_completion(&completion); | ||
| 440 | if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) { | ||
| 441 | ret = fwControl->retcode; | ||
| 442 | kfree(ioctlbuffer); | ||
| 443 | ioctlbuffer = NULL; | ||
| 444 | break; | ||
| 445 | } | ||
| 446 | } | ||
| 447 | if (ret) | ||
| 448 | break; | ||
| 449 | partitionNumber++; | ||
| 450 | } | ||
| 451 | out: | ||
| 452 | kfree(ioctlbuffer); | ||
| 453 | return ret; | ||
| 454 | } | ||
| 455 | static ssize_t pm8001_store_update_fw(struct device *cdev, | ||
| 456 | struct device_attribute *attr, | ||
| 457 | const char *buf, size_t count) | ||
| 458 | { | ||
| 459 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 460 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 461 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 462 | char *cmd_ptr, *filename_ptr; | ||
| 463 | int res, i; | ||
| 464 | int flash_command = FLASH_CMD_NONE; | ||
| 465 | int err = 0; | ||
| 466 | if (!capable(CAP_SYS_ADMIN)) | ||
| 467 | return -EACCES; | ||
| 468 | |||
| 469 | cmd_ptr = kzalloc(count*2, GFP_KERNEL); | ||
| 470 | |||
| 471 | if (!cmd_ptr) { | ||
| 472 | err = FAIL_OUT_MEMORY; | ||
| 473 | goto out; | ||
| 474 | } | ||
| 475 | |||
| 476 | filename_ptr = cmd_ptr + count; | ||
| 477 | res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); | ||
| 478 | if (res != 2) { | ||
| 479 | err = FAIL_PARAMETERS; | ||
| 480 | goto out1; | ||
| 481 | } | ||
| 482 | |||
| 483 | for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { | ||
| 484 | if (!memcmp(flash_command_table[i].command, | ||
| 485 | cmd_ptr, strlen(cmd_ptr))) { | ||
| 486 | flash_command = flash_command_table[i].code; | ||
| 487 | break; | ||
| 488 | } | ||
| 489 | } | ||
| 490 | if (flash_command == FLASH_CMD_NONE) { | ||
| 491 | err = FAIL_PARAMETERS; | ||
| 492 | goto out1; | ||
| 493 | } | ||
| 494 | |||
| 495 | if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) { | ||
| 496 | err = FLASH_IN_PROGRESS; | ||
| 497 | goto out1; | ||
| 498 | } | ||
| 499 | err = request_firmware(&pm8001_ha->fw_image, | ||
| 500 | filename_ptr, | ||
| 501 | pm8001_ha->dev); | ||
| 502 | |||
| 503 | if (err) { | ||
| 504 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 505 | pm8001_printk("Failed to load firmware image file %s," | ||
| 506 | " error %d\n", filename_ptr, err)); | ||
| 507 | err = FAIL_OPEN_BIOS_FILE; | ||
| 508 | goto out1; | ||
| 509 | } | ||
| 510 | |||
| 511 | switch (flash_command) { | ||
| 512 | case FLASH_CMD_UPDATE: | ||
| 513 | pm8001_ha->fw_status = FLASH_IN_PROGRESS; | ||
| 514 | err = pm8001_update_flash(pm8001_ha); | ||
| 515 | break; | ||
| 516 | case FLASH_CMD_SET_NVMD: | ||
| 517 | pm8001_ha->fw_status = FLASH_IN_PROGRESS; | ||
| 518 | err = pm8001_set_nvmd(pm8001_ha); | ||
| 519 | break; | ||
| 520 | default: | ||
| 521 | pm8001_ha->fw_status = FAIL_PARAMETERS; | ||
| 522 | err = FAIL_PARAMETERS; | ||
| 523 | break; | ||
| 524 | } | ||
| 525 | release_firmware(pm8001_ha->fw_image); | ||
| 526 | out1: | ||
| 527 | kfree(cmd_ptr); | ||
| 528 | out: | ||
| 529 | pm8001_ha->fw_status = err; | ||
| 530 | |||
| 531 | if (!err) | ||
| 532 | return count; | ||
| 533 | else | ||
| 534 | return -err; | ||
| 535 | } | ||
| 536 | |||
| 537 | static ssize_t pm8001_show_update_fw(struct device *cdev, | ||
| 538 | struct device_attribute *attr, char *buf) | ||
| 539 | { | ||
| 540 | int i; | ||
| 541 | struct Scsi_Host *shost = class_to_shost(cdev); | ||
| 542 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 543 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 544 | |||
| 545 | for (i = 0; flash_error_table[i].err_code != 0; i++) { | ||
| 546 | if (flash_error_table[i].err_code == pm8001_ha->fw_status) | ||
| 547 | break; | ||
| 548 | } | ||
| 549 | if (pm8001_ha->fw_status != FLASH_IN_PROGRESS) | ||
| 550 | pm8001_ha->fw_status = FLASH_OK; | ||
| 551 | |||
| 552 | return snprintf(buf, PAGE_SIZE, "status=%x %s\n", | ||
| 553 | flash_error_table[i].err_code, | ||
| 554 | flash_error_table[i].reason); | ||
| 555 | } | ||
| 556 | |||
| 557 | static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, | ||
| 558 | pm8001_show_update_fw, pm8001_store_update_fw); | ||
| 559 | struct device_attribute *pm8001_host_attrs[] = { | ||
| 560 | &dev_attr_interface_rev, | ||
| 561 | &dev_attr_fw_version, | ||
| 562 | &dev_attr_update_fw, | ||
| 563 | &dev_attr_aap_log, | ||
| 564 | &dev_attr_iop_log, | ||
| 565 | &dev_attr_max_out_io, | ||
| 566 | &dev_attr_max_devices, | ||
| 567 | &dev_attr_max_sg_list, | ||
| 568 | &dev_attr_sas_spec_support, | ||
| 569 | &dev_attr_logging_level, | ||
| 570 | &dev_attr_host_sas_address, | ||
| 571 | NULL, | ||
| 572 | }; | ||
| 573 | |||
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h new file mode 100644 index 000000000000..22644de26399 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #ifndef PM8001_CTL_H_INCLUDED | ||
| 42 | #define PM8001_CTL_H_INCLUDED | ||
| 43 | |||
| 44 | #define IOCTL_BUF_SIZE 4096 | ||
| 45 | #define HEADER_LEN 28 | ||
| 46 | #define SIZE_OFFSET 16 | ||
| 47 | |||
| 48 | struct pm8001_ioctl_payload { | ||
| 49 | u32 signature; | ||
| 50 | u16 major_function; | ||
| 51 | u16 minor_function; | ||
| 52 | u16 length; | ||
| 53 | u16 status; | ||
| 54 | u16 offset; | ||
| 55 | u16 id; | ||
| 56 | u8 func_specific[1]; | ||
| 57 | }; | ||
| 58 | |||
| 59 | #define FLASH_OK 0x000000 | ||
| 60 | #define FAIL_OPEN_BIOS_FILE 0x000100 | ||
| 61 | #define FAIL_FILE_SIZE 0x000a00 | ||
| 62 | #define FAIL_PARAMETERS 0x000b00 | ||
| 63 | #define FAIL_OUT_MEMORY 0x000c00 | ||
| 64 | #define FLASH_IN_PROGRESS 0x001000 | ||
| 65 | |||
| 66 | #endif /* PM8001_CTL_H_INCLUDED */ | ||
| 67 | |||
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h new file mode 100644 index 000000000000..944afada61ee --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_defs.h | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #ifndef _PM8001_DEFS_H_ | ||
| 42 | #define _PM8001_DEFS_H_ | ||
| 43 | |||
| 44 | enum chip_flavors { | ||
| 45 | chip_8001, | ||
| 46 | }; | ||
| 47 | #define USI_MAX_MEMCNT 9 | ||
| 48 | #define PM8001_MAX_DMA_SG SG_ALL | ||
| 49 | enum phy_speed { | ||
| 50 | PHY_SPEED_15 = 0x01, | ||
| 51 | PHY_SPEED_30 = 0x02, | ||
| 52 | PHY_SPEED_60 = 0x04, | ||
| 53 | }; | ||
| 54 | |||
| 55 | enum data_direction { | ||
| 56 | DATA_DIR_NONE = 0x0, /* NO TRANSFER */ | ||
| 57 | DATA_DIR_IN = 0x01, /* INBOUND */ | ||
| 58 | DATA_DIR_OUT = 0x02, /* OUTBOUND */ | ||
| 59 | DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | enum port_type { | ||
| 63 | PORT_TYPE_SAS = (1L << 1), | ||
| 64 | PORT_TYPE_SATA = (1L << 0), | ||
| 65 | }; | ||
| 66 | |||
| 67 | /* driver compile-time configuration */ | ||
| 68 | #define PM8001_MAX_CCB 512 /* max ccbs supported */ | ||
| 69 | #define PM8001_MAX_INB_NUM 1 | ||
| 70 | #define PM8001_MAX_OUTB_NUM 1 | ||
| 71 | #define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */ | ||
| 72 | |||
| 73 | /* unchangeable hardware details */ | ||
| 74 | #define PM8001_MAX_PHYS 8 /* max. possible phys */ | ||
| 75 | #define PM8001_MAX_PORTS 8 /* max. possible ports */ | ||
| 76 | #define PM8001_MAX_DEVICES 1024 /* max supported device */ | ||
| 77 | |||
| 78 | enum memory_region_num { | ||
| 79 | AAP1 = 0x0, /* application acceleration processor */ | ||
| 80 | IOP, /* IO processor */ | ||
| 81 | CI, /* consumer index */ | ||
| 82 | PI, /* producer index */ | ||
| 83 | IB, /* inbound queue */ | ||
| 84 | OB, /* outbound queue */ | ||
| 85 | NVMD, /* NVM device */ | ||
| 86 | DEV_MEM, /* memory for devices */ | ||
| 87 | CCB_MEM, /* memory for command control block */ | ||
| 88 | }; | ||
| 89 | #define PM8001_EVENT_LOG_SIZE (128 * 1024) | ||
| 90 | |||
| 91 | /*error code*/ | ||
| 92 | enum mpi_err { | ||
| 93 | MPI_IO_STATUS_SUCCESS = 0x0, | ||
| 94 | MPI_IO_STATUS_BUSY = 0x01, | ||
| 95 | MPI_IO_STATUS_FAIL = 0x02, | ||
| 96 | }; | ||
| 97 | |||
| 98 | /** | ||
| 99 | * Phy Control constants | ||
| 100 | */ | ||
| 101 | enum phy_control_type { | ||
| 102 | PHY_LINK_RESET = 0x01, | ||
| 103 | PHY_HARD_RESET = 0x02, | ||
| 104 | PHY_NOTIFY_ENABLE_SPINUP = 0x10, | ||
| 105 | }; | ||
| 106 | |||
| 107 | enum pm8001_hba_info_flags { | ||
| 108 | PM8001F_INIT_TIME = (1U << 0), | ||
| 109 | PM8001F_RUN_TIME = (1U << 1), | ||
| 110 | }; | ||
| 111 | |||
| 112 | #endif | ||
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c new file mode 100644 index 000000000000..a3de306b9045 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
| @@ -0,0 +1,4458 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | #include "pm8001_sas.h" | ||
| 41 | #include "pm8001_hwi.h" | ||
| 42 | #include "pm8001_chips.h" | ||
| 43 | #include "pm8001_ctl.h" | ||
| 44 | |||
| 45 | /** | ||
| 46 | * read_main_config_table - read the configure table and save it. | ||
| 47 | * @pm8001_ha: our hba card information | ||
| 48 | */ | ||
| 49 | static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha) | ||
| 50 | { | ||
| 51 | void __iomem *address = pm8001_ha->main_cfg_tbl_addr; | ||
| 52 | pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); | ||
| 53 | pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); | ||
| 54 | pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); | ||
| 55 | pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); | ||
| 56 | pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); | ||
| 57 | pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); | ||
| 58 | pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); | ||
| 59 | pm8001_ha->main_cfg_tbl.inbound_queue_offset = | ||
| 60 | pm8001_mr32(address, MAIN_IBQ_OFFSET); | ||
| 61 | pm8001_ha->main_cfg_tbl.outbound_queue_offset = | ||
| 62 | pm8001_mr32(address, MAIN_OBQ_OFFSET); | ||
| 63 | pm8001_ha->main_cfg_tbl.hda_mode_flag = | ||
| 64 | pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); | ||
| 65 | |||
| 66 | /* read analog Setting offset from the configuration table */ | ||
| 67 | pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = | ||
| 68 | pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); | ||
| 69 | |||
| 70 | /* read Error Dump Offset and Length */ | ||
| 71 | pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = | ||
| 72 | pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); | ||
| 73 | pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = | ||
| 74 | pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); | ||
| 75 | pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = | ||
| 76 | pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); | ||
| 77 | pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = | ||
| 78 | pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * read_general_status_table - read the general status table and save it. | ||
| 83 | * @pm8001_ha: our hba card information | ||
| 84 | */ | ||
| 85 | static void __devinit | ||
| 86 | read_general_status_table(struct pm8001_hba_info *pm8001_ha) | ||
| 87 | { | ||
| 88 | void __iomem *address = pm8001_ha->general_stat_tbl_addr; | ||
| 89 | pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); | ||
| 90 | pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); | ||
| 91 | pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); | ||
| 92 | pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); | ||
| 93 | pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); | ||
| 94 | pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); | ||
| 95 | pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); | ||
| 96 | pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); | ||
| 97 | pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); | ||
| 98 | pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); | ||
| 99 | pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); | ||
| 100 | pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); | ||
| 101 | pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); | ||
| 102 | pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); | ||
| 103 | pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); | ||
| 104 | pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); | ||
| 105 | pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); | ||
| 106 | pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); | ||
| 107 | pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); | ||
| 108 | pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); | ||
| 109 | pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); | ||
| 110 | pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); | ||
| 111 | pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); | ||
| 112 | pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); | ||
| 113 | pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); | ||
| 114 | } | ||
| 115 | |||
| 116 | /** | ||
| 117 | * read_inbnd_queue_table - read the inbound queue table and save it. | ||
| 118 | * @pm8001_ha: our hba card information | ||
| 119 | */ | ||
| 120 | static void __devinit | ||
| 121 | read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) | ||
| 122 | { | ||
| 123 | int inbQ_num = 1; | ||
| 124 | int i; | ||
| 125 | void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; | ||
| 126 | for (i = 0; i < inbQ_num; i++) { | ||
| 127 | u32 offset = i * 0x20; | ||
| 128 | pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = | ||
| 129 | get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); | ||
| 130 | pm8001_ha->inbnd_q_tbl[i].pi_offset = | ||
| 131 | pm8001_mr32(address, (offset + 0x18)); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | /** | ||
| 136 | * read_outbnd_queue_table - read the outbound queue table and save it. | ||
| 137 | * @pm8001_ha: our hba card information | ||
| 138 | */ | ||
| 139 | static void __devinit | ||
| 140 | read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) | ||
| 141 | { | ||
| 142 | int outbQ_num = 1; | ||
| 143 | int i; | ||
| 144 | void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; | ||
| 145 | for (i = 0; i < outbQ_num; i++) { | ||
| 146 | u32 offset = i * 0x24; | ||
| 147 | pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = | ||
| 148 | get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); | ||
| 149 | pm8001_ha->outbnd_q_tbl[i].ci_offset = | ||
| 150 | pm8001_mr32(address, (offset + 0x18)); | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | /** | ||
| 155 | * init_default_table_values - init the default table. | ||
| 156 | * @pm8001_ha: our hba card information | ||
| 157 | */ | ||
| 158 | static void __devinit | ||
| 159 | init_default_table_values(struct pm8001_hba_info *pm8001_ha) | ||
| 160 | { | ||
| 161 | int qn = 1; | ||
| 162 | int i; | ||
| 163 | u32 offsetib, offsetob; | ||
| 164 | void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; | ||
| 165 | void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; | ||
| 166 | |||
| 167 | pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; | ||
| 168 | pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; | ||
| 169 | pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; | ||
| 170 | pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; | ||
| 171 | pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; | ||
| 172 | pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; | ||
| 173 | pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; | ||
| 174 | pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; | ||
| 175 | pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; | ||
| 176 | pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; | ||
| 177 | pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; | ||
| 178 | |||
| 179 | pm8001_ha->main_cfg_tbl.upper_event_log_addr = | ||
| 180 | pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; | ||
| 181 | pm8001_ha->main_cfg_tbl.lower_event_log_addr = | ||
| 182 | pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; | ||
| 183 | pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; | ||
| 184 | pm8001_ha->main_cfg_tbl.event_log_option = 0x01; | ||
| 185 | pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = | ||
| 186 | pm8001_ha->memoryMap.region[IOP].phys_addr_hi; | ||
| 187 | pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = | ||
| 188 | pm8001_ha->memoryMap.region[IOP].phys_addr_lo; | ||
| 189 | pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; | ||
| 190 | pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; | ||
| 191 | pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; | ||
| 192 | for (i = 0; i < qn; i++) { | ||
| 193 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | ||
| 194 | 0x00000100 | (0x00000040 << 16) | (0x00<<30); | ||
| 195 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | ||
| 196 | pm8001_ha->memoryMap.region[IB].phys_addr_hi; | ||
| 197 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | ||
| 198 | pm8001_ha->memoryMap.region[IB].phys_addr_lo; | ||
| 199 | pm8001_ha->inbnd_q_tbl[i].base_virt = | ||
| 200 | (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; | ||
| 201 | pm8001_ha->inbnd_q_tbl[i].total_length = | ||
| 202 | pm8001_ha->memoryMap.region[IB].total_len; | ||
| 203 | pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = | ||
| 204 | pm8001_ha->memoryMap.region[CI].phys_addr_hi; | ||
| 205 | pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = | ||
| 206 | pm8001_ha->memoryMap.region[CI].phys_addr_lo; | ||
| 207 | pm8001_ha->inbnd_q_tbl[i].ci_virt = | ||
| 208 | pm8001_ha->memoryMap.region[CI].virt_ptr; | ||
| 209 | offsetib = i * 0x20; | ||
| 210 | pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = | ||
| 211 | get_pci_bar_index(pm8001_mr32(addressib, | ||
| 212 | (offsetib + 0x14))); | ||
| 213 | pm8001_ha->inbnd_q_tbl[i].pi_offset = | ||
| 214 | pm8001_mr32(addressib, (offsetib + 0x18)); | ||
| 215 | pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; | ||
| 216 | pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; | ||
| 217 | } | ||
| 218 | for (i = 0; i < qn; i++) { | ||
| 219 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | ||
| 220 | 256 | (64 << 16) | (1<<30); | ||
| 221 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | ||
| 222 | pm8001_ha->memoryMap.region[OB].phys_addr_hi; | ||
| 223 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | ||
| 224 | pm8001_ha->memoryMap.region[OB].phys_addr_lo; | ||
| 225 | pm8001_ha->outbnd_q_tbl[i].base_virt = | ||
| 226 | (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; | ||
| 227 | pm8001_ha->outbnd_q_tbl[i].total_length = | ||
| 228 | pm8001_ha->memoryMap.region[OB].total_len; | ||
| 229 | pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = | ||
| 230 | pm8001_ha->memoryMap.region[PI].phys_addr_hi; | ||
| 231 | pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = | ||
| 232 | pm8001_ha->memoryMap.region[PI].phys_addr_lo; | ||
| 233 | pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = | ||
| 234 | 0 | (10 << 16) | (0 << 24); | ||
| 235 | pm8001_ha->outbnd_q_tbl[i].pi_virt = | ||
| 236 | pm8001_ha->memoryMap.region[PI].virt_ptr; | ||
| 237 | offsetob = i * 0x24; | ||
| 238 | pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = | ||
| 239 | get_pci_bar_index(pm8001_mr32(addressob, | ||
| 240 | offsetob + 0x14)); | ||
| 241 | pm8001_ha->outbnd_q_tbl[i].ci_offset = | ||
| 242 | pm8001_mr32(addressob, (offsetob + 0x18)); | ||
| 243 | pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; | ||
| 244 | pm8001_ha->outbnd_q_tbl[i].producer_index = 0; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 248 | /** | ||
| 249 | * update_main_config_table - update the main default table to the HBA. | ||
| 250 | * @pm8001_ha: our hba card information | ||
| 251 | */ | ||
| 252 | static void __devinit | ||
| 253 | update_main_config_table(struct pm8001_hba_info *pm8001_ha) | ||
| 254 | { | ||
| 255 | void __iomem *address = pm8001_ha->main_cfg_tbl_addr; | ||
| 256 | pm8001_mw32(address, 0x24, | ||
| 257 | pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); | ||
| 258 | pm8001_mw32(address, 0x28, | ||
| 259 | pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); | ||
| 260 | pm8001_mw32(address, 0x2C, | ||
| 261 | pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); | ||
| 262 | pm8001_mw32(address, 0x30, | ||
| 263 | pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); | ||
| 264 | pm8001_mw32(address, 0x34, | ||
| 265 | pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); | ||
| 266 | pm8001_mw32(address, 0x38, | ||
| 267 | pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); | ||
| 268 | pm8001_mw32(address, 0x3C, | ||
| 269 | pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); | ||
| 270 | pm8001_mw32(address, 0x40, | ||
| 271 | pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); | ||
| 272 | pm8001_mw32(address, 0x44, | ||
| 273 | pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); | ||
| 274 | pm8001_mw32(address, 0x48, | ||
| 275 | pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); | ||
| 276 | pm8001_mw32(address, 0x4C, | ||
| 277 | pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); | ||
| 278 | pm8001_mw32(address, 0x50, | ||
| 279 | pm8001_ha->main_cfg_tbl.upper_event_log_addr); | ||
| 280 | pm8001_mw32(address, 0x54, | ||
| 281 | pm8001_ha->main_cfg_tbl.lower_event_log_addr); | ||
| 282 | pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); | ||
| 283 | pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); | ||
| 284 | pm8001_mw32(address, 0x60, | ||
| 285 | pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); | ||
| 286 | pm8001_mw32(address, 0x64, | ||
| 287 | pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); | ||
| 288 | pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); | ||
| 289 | pm8001_mw32(address, 0x6C, | ||
| 290 | pm8001_ha->main_cfg_tbl.iop_event_log_option); | ||
| 291 | pm8001_mw32(address, 0x70, | ||
| 292 | pm8001_ha->main_cfg_tbl.fatal_err_interrupt); | ||
| 293 | } | ||
| 294 | |||
| 295 | /** | ||
| 296 | * update_inbnd_queue_table - update the inbound queue table to the HBA. | ||
| 297 | * @pm8001_ha: our hba card information | ||
| 298 | */ | ||
| 299 | static void __devinit | ||
| 300 | update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) | ||
| 301 | { | ||
| 302 | void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; | ||
| 303 | u16 offset = number * 0x20; | ||
| 304 | pm8001_mw32(address, offset + 0x00, | ||
| 305 | pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); | ||
| 306 | pm8001_mw32(address, offset + 0x04, | ||
| 307 | pm8001_ha->inbnd_q_tbl[number].upper_base_addr); | ||
| 308 | pm8001_mw32(address, offset + 0x08, | ||
| 309 | pm8001_ha->inbnd_q_tbl[number].lower_base_addr); | ||
| 310 | pm8001_mw32(address, offset + 0x0C, | ||
| 311 | pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); | ||
| 312 | pm8001_mw32(address, offset + 0x10, | ||
| 313 | pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); | ||
| 314 | } | ||
| 315 | |||
| 316 | /** | ||
| 317 | * update_outbnd_queue_table - update the outbound queue table to the HBA. | ||
| 318 | * @pm8001_ha: our hba card information | ||
| 319 | */ | ||
| 320 | static void __devinit | ||
| 321 | update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) | ||
| 322 | { | ||
| 323 | void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; | ||
| 324 | u16 offset = number * 0x24; | ||
| 325 | pm8001_mw32(address, offset + 0x00, | ||
| 326 | pm8001_ha->outbnd_q_tbl[number].element_size_cnt); | ||
| 327 | pm8001_mw32(address, offset + 0x04, | ||
| 328 | pm8001_ha->outbnd_q_tbl[number].upper_base_addr); | ||
| 329 | pm8001_mw32(address, offset + 0x08, | ||
| 330 | pm8001_ha->outbnd_q_tbl[number].lower_base_addr); | ||
| 331 | pm8001_mw32(address, offset + 0x0C, | ||
| 332 | pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); | ||
| 333 | pm8001_mw32(address, offset + 0x10, | ||
| 334 | pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); | ||
| 335 | pm8001_mw32(address, offset + 0x1C, | ||
| 336 | pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); | ||
| 337 | } | ||
| 338 | |||
| 339 | /** | ||
| 340 | * bar4_shift - function is called to shift BAR base address | ||
| 341 | * @pm8001_ha : our hba card infomation | ||
| 342 | * @shiftValue : shifting value in memory bar. | ||
| 343 | */ | ||
| 344 | static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) | ||
| 345 | { | ||
| 346 | u32 regVal; | ||
| 347 | u32 max_wait_count; | ||
| 348 | |||
| 349 | /* program the inbound AXI translation Lower Address */ | ||
| 350 | pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); | ||
| 351 | |||
| 352 | /* confirm the setting is written */ | ||
| 353 | max_wait_count = 1 * 1000 * 1000; /* 1 sec */ | ||
| 354 | do { | ||
| 355 | udelay(1); | ||
| 356 | regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); | ||
| 357 | } while ((regVal != shiftValue) && (--max_wait_count)); | ||
| 358 | |||
| 359 | if (!max_wait_count) { | ||
| 360 | PM8001_INIT_DBG(pm8001_ha, | ||
| 361 | pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW" | ||
| 362 | " = 0x%x\n", regVal)); | ||
| 363 | return -1; | ||
| 364 | } | ||
| 365 | return 0; | ||
| 366 | } | ||
| 367 | |||
| 368 | /** | ||
| 369 | * mpi_set_phys_g3_with_ssc | ||
| 370 | * @pm8001_ha: our hba card information | ||
| 371 | * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. | ||
| 372 | */ | ||
| 373 | static void __devinit | ||
| 374 | mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) | ||
| 375 | { | ||
| 376 | u32 offset; | ||
| 377 | u32 value; | ||
| 378 | u32 i, j; | ||
| 379 | u32 bit_cnt; | ||
| 380 | |||
| 381 | #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 | ||
| 382 | #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 | ||
| 383 | #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 | ||
| 384 | #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 | ||
| 385 | #define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 | ||
| 386 | #define PHY_G3_WITH_SSC_BIT_SHIFT 13 | ||
| 387 | #define SNW3_PHY_CAPABILITIES_PARITY 31 | ||
| 388 | |||
| 389 | /* | ||
| 390 | * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) | ||
| 391 | * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) | ||
| 392 | */ | ||
| 393 | if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) | ||
| 394 | return; | ||
| 395 | /* set SSC bit of PHY 0 - 3 */ | ||
| 396 | for (i = 0; i < 4; i++) { | ||
| 397 | offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; | ||
| 398 | value = pm8001_cr32(pm8001_ha, 2, offset); | ||
| 399 | if (SSCbit) { | ||
| 400 | value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT; | ||
| 401 | value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT); | ||
| 402 | } else { | ||
| 403 | value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT; | ||
| 404 | value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT); | ||
| 405 | } | ||
| 406 | bit_cnt = 0; | ||
| 407 | for (j = 0; j < 31; j++) | ||
| 408 | if ((value >> j) & 0x00000001) | ||
| 409 | bit_cnt++; | ||
| 410 | if (bit_cnt % 2) | ||
| 411 | value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY); | ||
| 412 | else | ||
| 413 | value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY; | ||
| 414 | |||
| 415 | pm8001_cw32(pm8001_ha, 2, offset, value); | ||
| 416 | } | ||
| 417 | |||
| 418 | /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ | ||
| 419 | if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) | ||
| 420 | return; | ||
| 421 | |||
| 422 | /* set SSC bit of PHY 4 - 7 */ | ||
| 423 | for (i = 4; i < 8; i++) { | ||
| 424 | offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); | ||
| 425 | value = pm8001_cr32(pm8001_ha, 2, offset); | ||
| 426 | if (SSCbit) { | ||
| 427 | value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT; | ||
| 428 | value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT); | ||
| 429 | } else { | ||
| 430 | value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT; | ||
| 431 | value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT); | ||
| 432 | } | ||
| 433 | bit_cnt = 0; | ||
| 434 | for (j = 0; j < 31; j++) | ||
| 435 | if ((value >> j) & 0x00000001) | ||
| 436 | bit_cnt++; | ||
| 437 | if (bit_cnt % 2) | ||
| 438 | value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY); | ||
| 439 | else | ||
| 440 | value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY; | ||
| 441 | |||
| 442 | pm8001_cw32(pm8001_ha, 2, offset, value); | ||
| 443 | } | ||
| 444 | |||
| 445 | /*set the shifted destination address to 0x0 to avoid error operation */ | ||
| 446 | bar4_shift(pm8001_ha, 0x0); | ||
| 447 | return; | ||
| 448 | } | ||
| 449 | |||
| 450 | /** | ||
| 451 | * mpi_set_open_retry_interval_reg | ||
| 452 | * @pm8001_ha: our hba card information | ||
| 453 | * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us. | ||
| 454 | */ | ||
| 455 | static void __devinit | ||
| 456 | mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha, | ||
| 457 | u32 interval) | ||
| 458 | { | ||
| 459 | u32 offset; | ||
| 460 | u32 value; | ||
| 461 | u32 i; | ||
| 462 | |||
| 463 | #define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 | ||
| 464 | #define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 | ||
| 465 | #define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4 | ||
| 466 | #define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4 | ||
| 467 | #define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF | ||
| 468 | |||
| 469 | value = interval & OPEN_RETRY_INTERVAL_REG_MASK; | ||
| 470 | /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ | ||
| 471 | if (-1 == bar4_shift(pm8001_ha, | ||
| 472 | OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) | ||
| 473 | return; | ||
| 474 | for (i = 0; i < 4; i++) { | ||
| 475 | offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; | ||
| 476 | pm8001_cw32(pm8001_ha, 2, offset, value); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (-1 == bar4_shift(pm8001_ha, | ||
| 480 | OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) | ||
| 481 | return; | ||
| 482 | for (i = 4; i < 8; i++) { | ||
| 483 | offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); | ||
| 484 | pm8001_cw32(pm8001_ha, 2, offset, value); | ||
| 485 | } | ||
| 486 | /*set the shifted destination address to 0x0 to avoid error operation */ | ||
| 487 | bar4_shift(pm8001_ha, 0x0); | ||
| 488 | return; | ||
| 489 | } | ||
| 490 | |||
| 491 | /** | ||
| 492 | * mpi_init_check - check firmware initialization status. | ||
| 493 | * @pm8001_ha: our hba card information | ||
| 494 | */ | ||
| 495 | static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) | ||
| 496 | { | ||
| 497 | u32 max_wait_count; | ||
| 498 | u32 value; | ||
| 499 | u32 gst_len_mpistate; | ||
| 500 | /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the | ||
| 501 | table is updated */ | ||
| 502 | pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); | ||
| 503 | /* wait until Inbound DoorBell Clear Register toggled */ | ||
| 504 | max_wait_count = 1 * 1000 * 1000;/* 1 sec */ | ||
| 505 | do { | ||
| 506 | udelay(1); | ||
| 507 | value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); | ||
| 508 | value &= SPC_MSGU_CFG_TABLE_UPDATE; | ||
| 509 | } while ((value != 0) && (--max_wait_count)); | ||
| 510 | |||
| 511 | if (!max_wait_count) | ||
| 512 | return -1; | ||
| 513 | /* check the MPI-State for initialization */ | ||
| 514 | gst_len_mpistate = | ||
| 515 | pm8001_mr32(pm8001_ha->general_stat_tbl_addr, | ||
| 516 | GST_GSTLEN_MPIS_OFFSET); | ||
| 517 | if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) | ||
| 518 | return -1; | ||
| 519 | /* check MPI Initialization error */ | ||
| 520 | gst_len_mpistate = gst_len_mpistate >> 16; | ||
| 521 | if (0x0000 != gst_len_mpistate) | ||
| 522 | return -1; | ||
| 523 | return 0; | ||
| 524 | } | ||
| 525 | |||
| 526 | /** | ||
| 527 | * check_fw_ready - The LLDD check if the FW is ready, if not, return error. | ||
| 528 | * @pm8001_ha: our hba card information | ||
| 529 | */ | ||
| 530 | static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) | ||
| 531 | { | ||
| 532 | u32 value, value1; | ||
| 533 | u32 max_wait_count; | ||
| 534 | /* check error state */ | ||
| 535 | value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); | ||
| 536 | value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); | ||
| 537 | /* check AAP error */ | ||
| 538 | if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { | ||
| 539 | /* error state */ | ||
| 540 | value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); | ||
| 541 | return -1; | ||
| 542 | } | ||
| 543 | |||
| 544 | /* check IOP error */ | ||
| 545 | if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { | ||
| 546 | /* error state */ | ||
| 547 | value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); | ||
| 548 | return -1; | ||
| 549 | } | ||
| 550 | |||
| 551 | /* bit 4-31 of scratch pad1 should be zeros if it is not | ||
| 552 | in error state*/ | ||
| 553 | if (value & SCRATCH_PAD1_STATE_MASK) { | ||
| 554 | /* error case */ | ||
| 555 | pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); | ||
| 556 | return -1; | ||
| 557 | } | ||
| 558 | |||
| 559 | /* bit 2, 4-31 of scratch pad2 should be zeros if it is not | ||
| 560 | in error state */ | ||
| 561 | if (value1 & SCRATCH_PAD2_STATE_MASK) { | ||
| 562 | /* error case */ | ||
| 563 | return -1; | ||
| 564 | } | ||
| 565 | |||
| 566 | max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ | ||
| 567 | |||
| 568 | /* wait until scratch pad 1 and 2 registers in ready state */ | ||
| 569 | do { | ||
| 570 | udelay(1); | ||
| 571 | value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) | ||
| 572 | & SCRATCH_PAD1_RDY; | ||
| 573 | value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) | ||
| 574 | & SCRATCH_PAD2_RDY; | ||
| 575 | if ((--max_wait_count) == 0) | ||
| 576 | return -1; | ||
| 577 | } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); | ||
| 578 | return 0; | ||
| 579 | } | ||
| 580 | |||
| 581 | static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) | ||
| 582 | { | ||
| 583 | void __iomem *base_addr; | ||
| 584 | u32 value; | ||
| 585 | u32 offset; | ||
| 586 | u32 pcibar; | ||
| 587 | u32 pcilogic; | ||
| 588 | |||
| 589 | value = pm8001_cr32(pm8001_ha, 0, 0x44); | ||
| 590 | offset = value & 0x03FFFFFF; | ||
| 591 | PM8001_INIT_DBG(pm8001_ha, | ||
| 592 | pm8001_printk("Scratchpad 0 Offset: %x \n", offset)); | ||
| 593 | pcilogic = (value & 0xFC000000) >> 26; | ||
| 594 | pcibar = get_pci_bar_index(pcilogic); | ||
| 595 | PM8001_INIT_DBG(pm8001_ha, | ||
| 596 | pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar)); | ||
| 597 | pm8001_ha->main_cfg_tbl_addr = base_addr = | ||
| 598 | pm8001_ha->io_mem[pcibar].memvirtaddr + offset; | ||
| 599 | pm8001_ha->general_stat_tbl_addr = | ||
| 600 | base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); | ||
| 601 | pm8001_ha->inbnd_q_tbl_addr = | ||
| 602 | base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); | ||
| 603 | pm8001_ha->outbnd_q_tbl_addr = | ||
| 604 | base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); | ||
| 605 | } | ||
| 606 | |||
| 607 | /** | ||
| 608 | * pm8001_chip_init - the main init function that initialize whole PM8001 chip. | ||
| 609 | * @pm8001_ha: our hba card information | ||
| 610 | */ | ||
| 611 | static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) | ||
| 612 | { | ||
| 613 | /* check the firmware status */ | ||
| 614 | if (-1 == check_fw_ready(pm8001_ha)) { | ||
| 615 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 616 | pm8001_printk("Firmware is not ready!\n")); | ||
| 617 | return -EBUSY; | ||
| 618 | } | ||
| 619 | |||
| 620 | /* Initialize pci space address eg: mpi offset */ | ||
| 621 | init_pci_device_addresses(pm8001_ha); | ||
| 622 | init_default_table_values(pm8001_ha); | ||
| 623 | read_main_config_table(pm8001_ha); | ||
| 624 | read_general_status_table(pm8001_ha); | ||
| 625 | read_inbnd_queue_table(pm8001_ha); | ||
| 626 | read_outbnd_queue_table(pm8001_ha); | ||
| 627 | /* update main config table ,inbound table and outbound table */ | ||
| 628 | update_main_config_table(pm8001_ha); | ||
| 629 | update_inbnd_queue_table(pm8001_ha, 0); | ||
| 630 | update_outbnd_queue_table(pm8001_ha, 0); | ||
| 631 | mpi_set_phys_g3_with_ssc(pm8001_ha, 0); | ||
| 632 | mpi_set_open_retry_interval_reg(pm8001_ha, 7); | ||
| 633 | /* notify firmware update finished and check initialization status */ | ||
| 634 | if (0 == mpi_init_check(pm8001_ha)) { | ||
| 635 | PM8001_INIT_DBG(pm8001_ha, | ||
| 636 | pm8001_printk("MPI initialize successful!\n")); | ||
| 637 | } else | ||
| 638 | return -EBUSY; | ||
| 639 | /*This register is a 16-bit timer with a resolution of 1us. This is the | ||
| 640 | timer used for interrupt delay/coalescing in the PCIe Application Layer. | ||
| 641 | Zero is not a valid value. A value of 1 in the register will cause the | ||
| 642 | interrupts to be normal. A value greater than 1 will cause coalescing | ||
| 643 | delays.*/ | ||
| 644 | pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); | ||
| 645 | pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); | ||
| 646 | return 0; | ||
| 647 | } | ||
| 648 | |||
| 649 | static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) | ||
| 650 | { | ||
| 651 | u32 max_wait_count; | ||
| 652 | u32 value; | ||
| 653 | u32 gst_len_mpistate; | ||
| 654 | init_pci_device_addresses(pm8001_ha); | ||
| 655 | /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the | ||
| 656 | table is stop */ | ||
| 657 | pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); | ||
| 658 | |||
| 659 | /* wait until Inbound DoorBell Clear Register toggled */ | ||
| 660 | max_wait_count = 1 * 1000 * 1000;/* 1 sec */ | ||
| 661 | do { | ||
| 662 | udelay(1); | ||
| 663 | value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); | ||
| 664 | value &= SPC_MSGU_CFG_TABLE_RESET; | ||
| 665 | } while ((value != 0) && (--max_wait_count)); | ||
| 666 | |||
| 667 | if (!max_wait_count) { | ||
| 668 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 669 | pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value)); | ||
| 670 | return -1; | ||
| 671 | } | ||
| 672 | |||
| 673 | /* check the MPI-State for termination in progress */ | ||
| 674 | /* wait until Inbound DoorBell Clear Register toggled */ | ||
| 675 | max_wait_count = 1 * 1000 * 1000; /* 1 sec */ | ||
| 676 | do { | ||
| 677 | udelay(1); | ||
| 678 | gst_len_mpistate = | ||
| 679 | pm8001_mr32(pm8001_ha->general_stat_tbl_addr, | ||
| 680 | GST_GSTLEN_MPIS_OFFSET); | ||
| 681 | if (GST_MPI_STATE_UNINIT == | ||
| 682 | (gst_len_mpistate & GST_MPI_STATE_MASK)) | ||
| 683 | break; | ||
| 684 | } while (--max_wait_count); | ||
| 685 | if (!max_wait_count) { | ||
| 686 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 687 | pm8001_printk(" TIME OUT MPI State = 0x%x\n", | ||
| 688 | gst_len_mpistate & GST_MPI_STATE_MASK)); | ||
| 689 | return -1; | ||
| 690 | } | ||
| 691 | return 0; | ||
| 692 | } | ||
| 693 | |||
| 694 | /** | ||
| 695 | * soft_reset_ready_check - Function to check FW is ready for soft reset. | ||
| 696 | * @pm8001_ha: our hba card information | ||
| 697 | */ | ||
| 698 | static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) | ||
| 699 | { | ||
| 700 | u32 regVal, regVal1, regVal2; | ||
| 701 | if (mpi_uninit_check(pm8001_ha) != 0) { | ||
| 702 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 703 | pm8001_printk("MPI state is not ready\n")); | ||
| 704 | return -1; | ||
| 705 | } | ||
| 706 | /* read the scratch pad 2 register bit 2 */ | ||
| 707 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) | ||
| 708 | & SCRATCH_PAD2_FWRDY_RST; | ||
| 709 | if (regVal == SCRATCH_PAD2_FWRDY_RST) { | ||
| 710 | PM8001_INIT_DBG(pm8001_ha, | ||
| 711 | pm8001_printk("Firmware is ready for reset .\n")); | ||
| 712 | } else { | ||
| 713 | /* Trigger NMI twice via RB6 */ | ||
| 714 | if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { | ||
| 715 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 716 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 717 | RB6_ACCESS_REG)); | ||
| 718 | return -1; | ||
| 719 | } | ||
| 720 | pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, | ||
| 721 | RB6_MAGIC_NUMBER_RST); | ||
| 722 | pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); | ||
| 723 | /* wait for 100 ms */ | ||
| 724 | mdelay(100); | ||
| 725 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & | ||
| 726 | SCRATCH_PAD2_FWRDY_RST; | ||
| 727 | if (regVal != SCRATCH_PAD2_FWRDY_RST) { | ||
| 728 | regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); | ||
| 729 | regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); | ||
| 730 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 731 | pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1" | ||
| 732 | "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", | ||
| 733 | regVal1, regVal2)); | ||
| 734 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 735 | pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", | ||
| 736 | pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); | ||
| 737 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 738 | pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", | ||
| 739 | pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); | ||
| 740 | return -1; | ||
| 741 | } | ||
| 742 | } | ||
| 743 | return 0; | ||
| 744 | } | ||
| 745 | |||
| 746 | /** | ||
| 747 | * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all | ||
| 748 | * the FW register status to the originated status. | ||
| 749 | * @pm8001_ha: our hba card information | ||
| 750 | * @signature: signature in host scratch pad0 register. | ||
| 751 | */ | ||
| 752 | static int | ||
| 753 | pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) | ||
| 754 | { | ||
| 755 | u32 regVal, toggleVal; | ||
| 756 | u32 max_wait_count; | ||
| 757 | u32 regVal1, regVal2, regVal3; | ||
| 758 | |||
| 759 | /* step1: Check FW is ready for soft reset */ | ||
| 760 | if (soft_reset_ready_check(pm8001_ha) != 0) { | ||
| 761 | PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n")); | ||
| 762 | return -1; | ||
| 763 | } | ||
| 764 | |||
| 765 | /* step 2: clear NMI status register on AAP1 and IOP, write the same | ||
| 766 | value to clear */ | ||
| 767 | /* map 0x60000 to BAR4(0x20), BAR2(win) */ | ||
| 768 | if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { | ||
| 769 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 770 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 771 | MBIC_AAP1_ADDR_BASE)); | ||
| 772 | return -1; | ||
| 773 | } | ||
| 774 | regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); | ||
| 775 | PM8001_INIT_DBG(pm8001_ha, | ||
| 776 | pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal)); | ||
| 777 | pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); | ||
| 778 | /* map 0x70000 to BAR4(0x20), BAR2(win) */ | ||
| 779 | if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { | ||
| 780 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 781 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 782 | MBIC_IOP_ADDR_BASE)); | ||
| 783 | return -1; | ||
| 784 | } | ||
| 785 | regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); | ||
| 786 | PM8001_INIT_DBG(pm8001_ha, | ||
| 787 | pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal)); | ||
| 788 | pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); | ||
| 789 | |||
| 790 | regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); | ||
| 791 | PM8001_INIT_DBG(pm8001_ha, | ||
| 792 | pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal)); | ||
| 793 | pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); | ||
| 794 | |||
| 795 | regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); | ||
| 796 | PM8001_INIT_DBG(pm8001_ha, | ||
| 797 | pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal)); | ||
| 798 | pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); | ||
| 799 | |||
| 800 | regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); | ||
| 801 | PM8001_INIT_DBG(pm8001_ha, | ||
| 802 | pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal)); | ||
| 803 | pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); | ||
| 804 | |||
| 805 | regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); | ||
| 806 | PM8001_INIT_DBG(pm8001_ha, | ||
| 807 | pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal)); | ||
| 808 | pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); | ||
| 809 | |||
| 810 | /* read the scratch pad 1 register bit 2 */ | ||
| 811 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) | ||
| 812 | & SCRATCH_PAD1_RST; | ||
| 813 | toggleVal = regVal ^ SCRATCH_PAD1_RST; | ||
| 814 | |||
| 815 | /* set signature in host scratch pad0 register to tell SPC that the | ||
| 816 | host performs the soft reset */ | ||
| 817 | pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); | ||
| 818 | |||
| 819 | /* read required registers for confirmming */ | ||
| 820 | /* map 0x0700000 to BAR4(0x20), BAR2(win) */ | ||
| 821 | if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { | ||
| 822 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 823 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 824 | GSM_ADDR_BASE)); | ||
| 825 | return -1; | ||
| 826 | } | ||
| 827 | PM8001_INIT_DBG(pm8001_ha, | ||
| 828 | pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and" | ||
| 829 | " Reset = 0x%x\n", | ||
| 830 | pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); | ||
| 831 | |||
| 832 | /* step 3: host read GSM Configuration and Reset register */ | ||
| 833 | regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); | ||
| 834 | /* Put those bits to low */ | ||
| 835 | /* GSM XCBI offset = 0x70 0000 | ||
| 836 | 0x00 Bit 13 COM_SLV_SW_RSTB 1 | ||
| 837 | 0x00 Bit 12 QSSP_SW_RSTB 1 | ||
| 838 | 0x00 Bit 11 RAAE_SW_RSTB 1 | ||
| 839 | 0x00 Bit 9 RB_1_SW_RSTB 1 | ||
| 840 | 0x00 Bit 8 SM_SW_RSTB 1 | ||
| 841 | */ | ||
| 842 | regVal &= ~(0x00003b00); | ||
| 843 | /* host write GSM Configuration and Reset register */ | ||
| 844 | pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); | ||
| 845 | PM8001_INIT_DBG(pm8001_ha, | ||
| 846 | pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM " | ||
| 847 | "Configuration and Reset is set to = 0x%x\n", | ||
| 848 | pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); | ||
| 849 | |||
| 850 | /* step 4: */ | ||
| 851 | /* disable GSM - Read Address Parity Check */ | ||
| 852 | regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); | ||
| 853 | PM8001_INIT_DBG(pm8001_ha, | ||
| 854 | pm8001_printk("GSM 0x700038 - Read Address Parity Check " | ||
| 855 | "Enable = 0x%x\n", regVal1)); | ||
| 856 | pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); | ||
| 857 | PM8001_INIT_DBG(pm8001_ha, | ||
| 858 | pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" | ||
| 859 | "is set to = 0x%x\n", | ||
| 860 | pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); | ||
| 861 | |||
| 862 | /* disable GSM - Write Address Parity Check */ | ||
| 863 | regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); | ||
| 864 | PM8001_INIT_DBG(pm8001_ha, | ||
| 865 | pm8001_printk("GSM 0x700040 - Write Address Parity Check" | ||
| 866 | " Enable = 0x%x\n", regVal2)); | ||
| 867 | pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); | ||
| 868 | PM8001_INIT_DBG(pm8001_ha, | ||
| 869 | pm8001_printk("GSM 0x700040 - Write Address Parity Check " | ||
| 870 | "Enable is set to = 0x%x\n", | ||
| 871 | pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); | ||
| 872 | |||
| 873 | /* disable GSM - Write Data Parity Check */ | ||
| 874 | regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); | ||
| 875 | PM8001_INIT_DBG(pm8001_ha, | ||
| 876 | pm8001_printk("GSM 0x300048 - Write Data Parity Check" | ||
| 877 | " Enable = 0x%x\n", regVal3)); | ||
| 878 | pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); | ||
| 879 | PM8001_INIT_DBG(pm8001_ha, | ||
| 880 | pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable" | ||
| 881 | "is set to = 0x%x\n", | ||
| 882 | pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); | ||
| 883 | |||
| 884 | /* step 5: delay 10 usec */ | ||
| 885 | udelay(10); | ||
| 886 | /* step 5-b: set GPIO-0 output control to tristate anyway */ | ||
| 887 | if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { | ||
| 888 | PM8001_INIT_DBG(pm8001_ha, | ||
| 889 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 890 | GPIO_ADDR_BASE)); | ||
| 891 | return -1; | ||
| 892 | } | ||
| 893 | regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); | ||
| 894 | PM8001_INIT_DBG(pm8001_ha, | ||
| 895 | pm8001_printk("GPIO Output Control Register:" | ||
| 896 | " = 0x%x\n", regVal)); | ||
| 897 | /* set GPIO-0 output control to tri-state */ | ||
| 898 | regVal &= 0xFFFFFFFC; | ||
| 899 | pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); | ||
| 900 | |||
| 901 | /* Step 6: Reset the IOP and AAP1 */ | ||
| 902 | /* map 0x00000 to BAR4(0x20), BAR2(win) */ | ||
| 903 | if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { | ||
| 904 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 905 | pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", | ||
| 906 | SPC_TOP_LEVEL_ADDR_BASE)); | ||
| 907 | return -1; | ||
| 908 | } | ||
| 909 | regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); | ||
| 910 | PM8001_INIT_DBG(pm8001_ha, | ||
| 911 | pm8001_printk("Top Register before resetting IOP/AAP1" | ||
| 912 | ":= 0x%x\n", regVal)); | ||
| 913 | regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); | ||
| 914 | pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); | ||
| 915 | |||
| 916 | /* step 7: Reset the BDMA/OSSP */ | ||
| 917 | regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); | ||
| 918 | PM8001_INIT_DBG(pm8001_ha, | ||
| 919 | pm8001_printk("Top Register before resetting BDMA/OSSP" | ||
| 920 | ": = 0x%x\n", regVal)); | ||
| 921 | regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); | ||
| 922 | pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); | ||
| 923 | |||
| 924 | /* step 8: delay 10 usec */ | ||
| 925 | udelay(10); | ||
| 926 | |||
| 927 | /* step 9: bring the BDMA and OSSP out of reset */ | ||
| 928 | regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); | ||
| 929 | PM8001_INIT_DBG(pm8001_ha, | ||
| 930 | pm8001_printk("Top Register before bringing up BDMA/OSSP" | ||
| 931 | ":= 0x%x\n", regVal)); | ||
| 932 | regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); | ||
| 933 | pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); | ||
| 934 | |||
| 935 | /* step 10: delay 10 usec */ | ||
| 936 | udelay(10); | ||
| 937 | |||
| 938 | /* step 11: reads and sets the GSM Configuration and Reset Register */ | ||
| 939 | /* map 0x0700000 to BAR4(0x20), BAR2(win) */ | ||
| 940 | if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { | ||
| 941 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 942 | pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", | ||
| 943 | GSM_ADDR_BASE)); | ||
| 944 | return -1; | ||
| 945 | } | ||
| 946 | PM8001_INIT_DBG(pm8001_ha, | ||
| 947 | pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and " | ||
| 948 | "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); | ||
| 949 | regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); | ||
| 950 | /* Put those bits to high */ | ||
| 951 | /* GSM XCBI offset = 0x70 0000 | ||
| 952 | 0x00 Bit 13 COM_SLV_SW_RSTB 1 | ||
| 953 | 0x00 Bit 12 QSSP_SW_RSTB 1 | ||
| 954 | 0x00 Bit 11 RAAE_SW_RSTB 1 | ||
| 955 | 0x00 Bit 9 RB_1_SW_RSTB 1 | ||
| 956 | 0x00 Bit 8 SM_SW_RSTB 1 | ||
| 957 | */ | ||
| 958 | regVal |= (GSM_CONFIG_RESET_VALUE); | ||
| 959 | pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); | ||
| 960 | PM8001_INIT_DBG(pm8001_ha, | ||
| 961 | pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM" | ||
| 962 | " Configuration and Reset is set to = 0x%x\n", | ||
| 963 | pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); | ||
| 964 | |||
| 965 | /* step 12: Restore GSM - Read Address Parity Check */ | ||
| 966 | regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); | ||
| 967 | /* just for debugging */ | ||
| 968 | PM8001_INIT_DBG(pm8001_ha, | ||
| 969 | pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" | ||
| 970 | " = 0x%x\n", regVal)); | ||
| 971 | pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); | ||
| 972 | PM8001_INIT_DBG(pm8001_ha, | ||
| 973 | pm8001_printk("GSM 0x700038 - Read Address Parity" | ||
| 974 | " Check Enable is set to = 0x%x\n", | ||
| 975 | pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); | ||
| 976 | /* Restore GSM - Write Address Parity Check */ | ||
| 977 | regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); | ||
| 978 | pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); | ||
| 979 | PM8001_INIT_DBG(pm8001_ha, | ||
| 980 | pm8001_printk("GSM 0x700040 - Write Address Parity Check" | ||
| 981 | " Enable is set to = 0x%x\n", | ||
| 982 | pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); | ||
| 983 | /* Restore GSM - Write Data Parity Check */ | ||
| 984 | regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); | ||
| 985 | pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); | ||
| 986 | PM8001_INIT_DBG(pm8001_ha, | ||
| 987 | pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable" | ||
| 988 | "is set to = 0x%x\n", | ||
| 989 | pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); | ||
| 990 | |||
| 991 | /* step 13: bring the IOP and AAP1 out of reset */ | ||
| 992 | /* map 0x00000 to BAR4(0x20), BAR2(win) */ | ||
| 993 | if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { | ||
| 994 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 995 | pm8001_printk("Shift Bar4 to 0x%x failed\n", | ||
| 996 | SPC_TOP_LEVEL_ADDR_BASE)); | ||
| 997 | return -1; | ||
| 998 | } | ||
| 999 | regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); | ||
| 1000 | regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); | ||
| 1001 | pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); | ||
| 1002 | |||
| 1003 | /* step 14: delay 10 usec - Normal Mode */ | ||
| 1004 | udelay(10); | ||
| 1005 | /* check Soft Reset Normal mode or Soft Reset HDA mode */ | ||
| 1006 | if (signature == SPC_SOFT_RESET_SIGNATURE) { | ||
| 1007 | /* step 15 (Normal Mode): wait until scratch pad1 register | ||
| 1008 | bit 2 toggled */ | ||
| 1009 | max_wait_count = 2 * 1000 * 1000;/* 2 sec */ | ||
| 1010 | do { | ||
| 1011 | udelay(1); | ||
| 1012 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & | ||
| 1013 | SCRATCH_PAD1_RST; | ||
| 1014 | } while ((regVal != toggleVal) && (--max_wait_count)); | ||
| 1015 | |||
| 1016 | if (!max_wait_count) { | ||
| 1017 | regVal = pm8001_cr32(pm8001_ha, 0, | ||
| 1018 | MSGU_SCRATCH_PAD_1); | ||
| 1019 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1020 | pm8001_printk("TIMEOUT : ToggleVal 0x%x," | ||
| 1021 | "MSGU_SCRATCH_PAD1 = 0x%x\n", | ||
| 1022 | toggleVal, regVal)); | ||
| 1023 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1024 | pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", | ||
| 1025 | pm8001_cr32(pm8001_ha, 0, | ||
| 1026 | MSGU_SCRATCH_PAD_0))); | ||
| 1027 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1028 | pm8001_printk("SCRATCH_PAD2 value = 0x%x\n", | ||
| 1029 | pm8001_cr32(pm8001_ha, 0, | ||
| 1030 | MSGU_SCRATCH_PAD_2))); | ||
| 1031 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1032 | pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", | ||
| 1033 | pm8001_cr32(pm8001_ha, 0, | ||
| 1034 | MSGU_SCRATCH_PAD_3))); | ||
| 1035 | return -1; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | /* step 16 (Normal) - Clear ODMR and ODCR */ | ||
| 1039 | pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); | ||
| 1040 | pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); | ||
| 1041 | |||
| 1042 | /* step 17 (Normal Mode): wait for the FW and IOP to get | ||
| 1043 | ready - 1 sec timeout */ | ||
| 1044 | /* Wait for the SPC Configuration Table to be ready */ | ||
| 1045 | if (check_fw_ready(pm8001_ha) == -1) { | ||
| 1046 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); | ||
| 1047 | /* return error if MPI Configuration Table not ready */ | ||
| 1048 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1049 | pm8001_printk("FW not ready SCRATCH_PAD1" | ||
| 1050 | " = 0x%x\n", regVal)); | ||
| 1051 | regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); | ||
| 1052 | /* return error if MPI Configuration Table not ready */ | ||
| 1053 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1054 | pm8001_printk("FW not ready SCRATCH_PAD2" | ||
| 1055 | " = 0x%x\n", regVal)); | ||
| 1056 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1057 | pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", | ||
| 1058 | pm8001_cr32(pm8001_ha, 0, | ||
| 1059 | MSGU_SCRATCH_PAD_0))); | ||
| 1060 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1061 | pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", | ||
| 1062 | pm8001_cr32(pm8001_ha, 0, | ||
| 1063 | MSGU_SCRATCH_PAD_3))); | ||
| 1064 | return -1; | ||
| 1065 | } | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1069 | pm8001_printk("SPC soft reset Complete\n")); | ||
| 1070 | return 0; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) | ||
| 1074 | { | ||
| 1075 | u32 i; | ||
| 1076 | u32 regVal; | ||
| 1077 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1078 | pm8001_printk("chip reset start\n")); | ||
| 1079 | |||
| 1080 | /* do SPC chip reset. */ | ||
| 1081 | regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); | ||
| 1082 | regVal &= ~(SPC_REG_RESET_DEVICE); | ||
| 1083 | pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); | ||
| 1084 | |||
| 1085 | /* delay 10 usec */ | ||
| 1086 | udelay(10); | ||
| 1087 | |||
| 1088 | /* bring chip reset out of reset */ | ||
| 1089 | regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); | ||
| 1090 | regVal |= SPC_REG_RESET_DEVICE; | ||
| 1091 | pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); | ||
| 1092 | |||
| 1093 | /* delay 10 usec */ | ||
| 1094 | udelay(10); | ||
| 1095 | |||
| 1096 | /* wait for 20 msec until the firmware gets reloaded */ | ||
| 1097 | i = 20; | ||
| 1098 | do { | ||
| 1099 | mdelay(1); | ||
| 1100 | } while ((--i) != 0); | ||
| 1101 | |||
| 1102 | PM8001_INIT_DBG(pm8001_ha, | ||
| 1103 | pm8001_printk("chip reset finished\n")); | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | /** | ||
| 1107 | * pm8001_chip_iounmap - which maped when initilized. | ||
| 1108 | * @pm8001_ha: our hba card information | ||
| 1109 | */ | ||
| 1110 | static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) | ||
| 1111 | { | ||
| 1112 | s8 bar, logical = 0; | ||
| 1113 | for (bar = 0; bar < 6; bar++) { | ||
| 1114 | /* | ||
| 1115 | ** logical BARs for SPC: | ||
| 1116 | ** bar 0 and 1 - logical BAR0 | ||
| 1117 | ** bar 2 and 3 - logical BAR1 | ||
| 1118 | ** bar4 - logical BAR2 | ||
| 1119 | ** bar5 - logical BAR3 | ||
| 1120 | ** Skip the appropriate assignments: | ||
| 1121 | */ | ||
| 1122 | if ((bar == 1) || (bar == 3)) | ||
| 1123 | continue; | ||
| 1124 | if (pm8001_ha->io_mem[logical].memvirtaddr) { | ||
| 1125 | iounmap(pm8001_ha->io_mem[logical].memvirtaddr); | ||
| 1126 | logical++; | ||
| 1127 | } | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | /** | ||
| 1132 | * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt | ||
| 1133 | * @pm8001_ha: our hba card information | ||
| 1134 | */ | ||
| 1135 | static void | ||
| 1136 | pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) | ||
| 1137 | { | ||
| 1138 | pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); | ||
| 1139 | pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | /** | ||
| 1143 | * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt | ||
| 1144 | * @pm8001_ha: our hba card information | ||
| 1145 | */ | ||
| 1146 | static void | ||
| 1147 | pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) | ||
| 1148 | { | ||
| 1149 | pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | /** | ||
| 1153 | * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt | ||
| 1154 | * @pm8001_ha: our hba card information | ||
| 1155 | */ | ||
| 1156 | static void | ||
| 1157 | pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, | ||
| 1158 | u32 int_vec_idx) | ||
| 1159 | { | ||
| 1160 | u32 msi_index; | ||
| 1161 | u32 value; | ||
| 1162 | msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; | ||
| 1163 | msi_index += MSIX_TABLE_BASE; | ||
| 1164 | pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); | ||
| 1165 | value = (1 << int_vec_idx); | ||
| 1166 | pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); | ||
| 1167 | |||
| 1168 | } | ||
| 1169 | |||
| 1170 | /** | ||
| 1171 | * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt | ||
| 1172 | * @pm8001_ha: our hba card information | ||
| 1173 | */ | ||
| 1174 | static void | ||
| 1175 | pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, | ||
| 1176 | u32 int_vec_idx) | ||
| 1177 | { | ||
| 1178 | u32 msi_index; | ||
| 1179 | msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; | ||
| 1180 | msi_index += MSIX_TABLE_BASE; | ||
| 1181 | pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); | ||
| 1182 | |||
| 1183 | } | ||
| 1184 | /** | ||
| 1185 | * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt | ||
| 1186 | * @pm8001_ha: our hba card information | ||
| 1187 | */ | ||
| 1188 | static void | ||
| 1189 | pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) | ||
| 1190 | { | ||
| 1191 | #ifdef PM8001_USE_MSIX | ||
| 1192 | pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); | ||
| 1193 | return; | ||
| 1194 | #endif | ||
| 1195 | pm8001_chip_intx_interrupt_enable(pm8001_ha); | ||
| 1196 | |||
| 1197 | } | ||
| 1198 | |||
| 1199 | /** | ||
| 1200 | * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt | ||
| 1201 | * @pm8001_ha: our hba card information | ||
| 1202 | */ | ||
| 1203 | static void | ||
| 1204 | pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) | ||
| 1205 | { | ||
| 1206 | #ifdef PM8001_USE_MSIX | ||
| 1207 | pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); | ||
| 1208 | return; | ||
| 1209 | #endif | ||
| 1210 | pm8001_chip_intx_interrupt_disable(pm8001_ha); | ||
| 1211 | |||
| 1212 | } | ||
| 1213 | |||
| 1214 | /** | ||
| 1215 | * mpi_msg_free_get- get the free message buffer for transfer inbound queue. | ||
| 1216 | * @circularQ: the inbound queue we want to transfer to HBA. | ||
| 1217 | * @messageSize: the message size of this transfer, normally it is 64 bytes | ||
| 1218 | * @messagePtr: the pointer to message. | ||
| 1219 | */ | ||
| 1220 | static int mpi_msg_free_get(struct inbound_queue_table *circularQ, | ||
| 1221 | u16 messageSize, void **messagePtr) | ||
| 1222 | { | ||
| 1223 | u32 offset, consumer_index; | ||
| 1224 | struct mpi_msg_hdr *msgHeader; | ||
| 1225 | u8 bcCount = 1; /* only support single buffer */ | ||
| 1226 | |||
| 1227 | /* Checks is the requested message size can be allocated in this queue*/ | ||
| 1228 | if (messageSize > 64) { | ||
| 1229 | *messagePtr = NULL; | ||
| 1230 | return -1; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | /* Stores the new consumer index */ | ||
| 1234 | consumer_index = pm8001_read_32(circularQ->ci_virt); | ||
| 1235 | circularQ->consumer_index = cpu_to_le32(consumer_index); | ||
| 1236 | if (((circularQ->producer_idx + bcCount) % 256) == | ||
| 1237 | circularQ->consumer_index) { | ||
| 1238 | *messagePtr = NULL; | ||
| 1239 | return -1; | ||
| 1240 | } | ||
| 1241 | /* get memory IOMB buffer address */ | ||
| 1242 | offset = circularQ->producer_idx * 64; | ||
| 1243 | /* increment to next bcCount element */ | ||
| 1244 | circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256; | ||
| 1245 | /* Adds that distance to the base of the region virtual address plus | ||
| 1246 | the message header size*/ | ||
| 1247 | msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); | ||
| 1248 | *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); | ||
| 1249 | return 0; | ||
| 1250 | } | ||
| 1251 | |||
| 1252 | /** | ||
| 1253 | * mpi_build_cmd- build the message queue for transfer, update the PI to FW | ||
| 1254 | * to tell the fw to get this message from IOMB. | ||
| 1255 | * @pm8001_ha: our hba card information | ||
| 1256 | * @circularQ: the inbound queue we want to transfer to HBA. | ||
| 1257 | * @opCode: the operation code represents commands which LLDD and fw recognized. | ||
| 1258 | * @payload: the command payload of each operation command. | ||
| 1259 | */ | ||
| 1260 | static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, | ||
| 1261 | struct inbound_queue_table *circularQ, | ||
| 1262 | u32 opCode, void *payload) | ||
| 1263 | { | ||
| 1264 | u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; | ||
| 1265 | u32 responseQueue = 0; | ||
| 1266 | void *pMessage; | ||
| 1267 | |||
| 1268 | if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { | ||
| 1269 | PM8001_IO_DBG(pm8001_ha, | ||
| 1270 | pm8001_printk("No free mpi buffer \n")); | ||
| 1271 | return -1; | ||
| 1272 | } | ||
| 1273 | BUG_ON(!payload); | ||
| 1274 | /*Copy to the payload*/ | ||
| 1275 | memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); | ||
| 1276 | |||
| 1277 | /*Build the header*/ | ||
| 1278 | Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) | ||
| 1279 | | ((responseQueue & 0x3F) << 16) | ||
| 1280 | | ((category & 0xF) << 12) | (opCode & 0xFFF)); | ||
| 1281 | |||
| 1282 | pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); | ||
| 1283 | /*Update the PI to the firmware*/ | ||
| 1284 | pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, | ||
| 1285 | circularQ->pi_offset, circularQ->producer_idx); | ||
| 1286 | PM8001_IO_DBG(pm8001_ha, | ||
| 1287 | pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx, | ||
| 1288 | circularQ->consumer_index)); | ||
| 1289 | return 0; | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, | ||
| 1293 | struct outbound_queue_table *circularQ, u8 bc) | ||
| 1294 | { | ||
| 1295 | u32 producer_index; | ||
| 1296 | struct mpi_msg_hdr *msgHeader; | ||
| 1297 | struct mpi_msg_hdr *pOutBoundMsgHeader; | ||
| 1298 | |||
| 1299 | msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); | ||
| 1300 | pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + | ||
| 1301 | circularQ->consumer_idx * 64); | ||
| 1302 | if (pOutBoundMsgHeader != msgHeader) { | ||
| 1303 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1304 | pm8001_printk("consumer_idx = %d msgHeader = %p\n", | ||
| 1305 | circularQ->consumer_idx, msgHeader)); | ||
| 1306 | |||
| 1307 | /* Update the producer index from SPC */ | ||
| 1308 | producer_index = pm8001_read_32(circularQ->pi_virt); | ||
| 1309 | circularQ->producer_index = cpu_to_le32(producer_index); | ||
| 1310 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1311 | pm8001_printk("consumer_idx = %d producer_index = %d" | ||
| 1312 | "msgHeader = %p\n", circularQ->consumer_idx, | ||
| 1313 | circularQ->producer_index, msgHeader)); | ||
| 1314 | return 0; | ||
| 1315 | } | ||
| 1316 | /* free the circular queue buffer elements associated with the message*/ | ||
| 1317 | circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256; | ||
| 1318 | /* update the CI of outbound queue */ | ||
| 1319 | pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, | ||
| 1320 | circularQ->consumer_idx); | ||
| 1321 | /* Update the producer index from SPC*/ | ||
| 1322 | producer_index = pm8001_read_32(circularQ->pi_virt); | ||
| 1323 | circularQ->producer_index = cpu_to_le32(producer_index); | ||
| 1324 | PM8001_IO_DBG(pm8001_ha, | ||
| 1325 | pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx, | ||
| 1326 | circularQ->producer_index)); | ||
| 1327 | return 0; | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | /** | ||
| 1331 | * mpi_msg_consume- get the MPI message from outbound queue message table. | ||
| 1332 | * @pm8001_ha: our hba card information | ||
| 1333 | * @circularQ: the outbound queue table. | ||
| 1334 | * @messagePtr1: the message contents of this outbound message. | ||
| 1335 | * @pBC: the message size. | ||
| 1336 | */ | ||
| 1337 | static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, | ||
| 1338 | struct outbound_queue_table *circularQ, | ||
| 1339 | void **messagePtr1, u8 *pBC) | ||
| 1340 | { | ||
| 1341 | struct mpi_msg_hdr *msgHeader; | ||
| 1342 | __le32 msgHeader_tmp; | ||
| 1343 | u32 header_tmp; | ||
| 1344 | do { | ||
| 1345 | /* If there are not-yet-delivered messages ... */ | ||
| 1346 | if (circularQ->producer_index != circularQ->consumer_idx) { | ||
| 1347 | /*Get the pointer to the circular queue buffer element*/ | ||
| 1348 | msgHeader = (struct mpi_msg_hdr *) | ||
| 1349 | (circularQ->base_virt + | ||
| 1350 | circularQ->consumer_idx * 64); | ||
| 1351 | /* read header */ | ||
| 1352 | header_tmp = pm8001_read_32(msgHeader); | ||
| 1353 | msgHeader_tmp = cpu_to_le32(header_tmp); | ||
| 1354 | if (0 != (msgHeader_tmp & 0x80000000)) { | ||
| 1355 | if (OPC_OUB_SKIP_ENTRY != | ||
| 1356 | (msgHeader_tmp & 0xfff)) { | ||
| 1357 | *messagePtr1 = | ||
| 1358 | ((u8 *)msgHeader) + | ||
| 1359 | sizeof(struct mpi_msg_hdr); | ||
| 1360 | *pBC = (u8)((msgHeader_tmp >> 24) & | ||
| 1361 | 0x1f); | ||
| 1362 | PM8001_IO_DBG(pm8001_ha, | ||
| 1363 | pm8001_printk(": CI=%d PI=%d " | ||
| 1364 | "msgHeader=%x\n", | ||
| 1365 | circularQ->consumer_idx, | ||
| 1366 | circularQ->producer_index, | ||
| 1367 | msgHeader_tmp)); | ||
| 1368 | return MPI_IO_STATUS_SUCCESS; | ||
| 1369 | } else { | ||
| 1370 | circularQ->consumer_idx = | ||
| 1371 | (circularQ->consumer_idx + | ||
| 1372 | ((msgHeader_tmp >> 24) & 0x1f)) | ||
| 1373 | % 256; | ||
| 1374 | msgHeader_tmp = 0; | ||
| 1375 | pm8001_write_32(msgHeader, 0, 0); | ||
| 1376 | /* update the CI of outbound queue */ | ||
| 1377 | pm8001_cw32(pm8001_ha, | ||
| 1378 | circularQ->ci_pci_bar, | ||
| 1379 | circularQ->ci_offset, | ||
| 1380 | circularQ->consumer_idx); | ||
| 1381 | } | ||
| 1382 | } else { | ||
| 1383 | circularQ->consumer_idx = | ||
| 1384 | (circularQ->consumer_idx + | ||
| 1385 | ((msgHeader_tmp >> 24) & 0x1f)) % 256; | ||
| 1386 | msgHeader_tmp = 0; | ||
| 1387 | pm8001_write_32(msgHeader, 0, 0); | ||
| 1388 | /* update the CI of outbound queue */ | ||
| 1389 | pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, | ||
| 1390 | circularQ->ci_offset, | ||
| 1391 | circularQ->consumer_idx); | ||
| 1392 | return MPI_IO_STATUS_FAIL; | ||
| 1393 | } | ||
| 1394 | } else { | ||
| 1395 | u32 producer_index; | ||
| 1396 | void *pi_virt = circularQ->pi_virt; | ||
| 1397 | /* Update the producer index from SPC */ | ||
| 1398 | producer_index = pm8001_read_32(pi_virt); | ||
| 1399 | circularQ->producer_index = cpu_to_le32(producer_index); | ||
| 1400 | } | ||
| 1401 | } while (circularQ->producer_index != circularQ->consumer_idx); | ||
| 1402 | /* while we don't have any more not-yet-delivered message */ | ||
| 1403 | /* report empty */ | ||
| 1404 | return MPI_IO_STATUS_BUSY; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | static void pm8001_work_queue(struct work_struct *work) | ||
| 1408 | { | ||
| 1409 | struct delayed_work *dw = container_of(work, struct delayed_work, work); | ||
| 1410 | struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q); | ||
| 1411 | struct pm8001_device *pm8001_dev; | ||
| 1412 | struct domain_device *dev; | ||
| 1413 | |||
| 1414 | switch (wq->handler) { | ||
| 1415 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 1416 | pm8001_dev = wq->data; | ||
| 1417 | dev = pm8001_dev->sas_device; | ||
| 1418 | pm8001_I_T_nexus_reset(dev); | ||
| 1419 | break; | ||
| 1420 | case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: | ||
| 1421 | pm8001_dev = wq->data; | ||
| 1422 | dev = pm8001_dev->sas_device; | ||
| 1423 | pm8001_I_T_nexus_reset(dev); | ||
| 1424 | break; | ||
| 1425 | case IO_DS_IN_ERROR: | ||
| 1426 | pm8001_dev = wq->data; | ||
| 1427 | dev = pm8001_dev->sas_device; | ||
| 1428 | pm8001_I_T_nexus_reset(dev); | ||
| 1429 | break; | ||
| 1430 | case IO_DS_NON_OPERATIONAL: | ||
| 1431 | pm8001_dev = wq->data; | ||
| 1432 | dev = pm8001_dev->sas_device; | ||
| 1433 | pm8001_I_T_nexus_reset(dev); | ||
| 1434 | break; | ||
| 1435 | } | ||
| 1436 | list_del(&wq->entry); | ||
| 1437 | kfree(wq); | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, | ||
| 1441 | int handler) | ||
| 1442 | { | ||
| 1443 | struct pm8001_wq *wq; | ||
| 1444 | int ret = 0; | ||
| 1445 | |||
| 1446 | wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC); | ||
| 1447 | if (wq) { | ||
| 1448 | wq->pm8001_ha = pm8001_ha; | ||
| 1449 | wq->data = data; | ||
| 1450 | wq->handler = handler; | ||
| 1451 | INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue); | ||
| 1452 | list_add_tail(&wq->entry, &pm8001_ha->wq_list); | ||
| 1453 | schedule_delayed_work(&wq->work_q, 0); | ||
| 1454 | } else | ||
| 1455 | ret = -ENOMEM; | ||
| 1456 | |||
| 1457 | return ret; | ||
| 1458 | } | ||
| 1459 | |||
| 1460 | /** | ||
| 1461 | * mpi_ssp_completion- process the event that FW response to the SSP request. | ||
| 1462 | * @pm8001_ha: our hba card information | ||
| 1463 | * @piomb: the message contents of this outbound message. | ||
| 1464 | * | ||
| 1465 | * When FW has completed a ssp request for example a IO request, after it has | ||
| 1466 | * filled the SG data with the data, it will trigger this event represent | ||
| 1467 | * that he has finished the job,please check the coresponding buffer. | ||
| 1468 | * So we will tell the caller who maybe waiting the result to tell upper layer | ||
| 1469 | * that the task has been finished. | ||
| 1470 | */ | ||
| 1471 | static void | ||
| 1472 | mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) | ||
| 1473 | { | ||
| 1474 | struct sas_task *t; | ||
| 1475 | struct pm8001_ccb_info *ccb; | ||
| 1476 | unsigned long flags; | ||
| 1477 | u32 status; | ||
| 1478 | u32 param; | ||
| 1479 | u32 tag; | ||
| 1480 | struct ssp_completion_resp *psspPayload; | ||
| 1481 | struct task_status_struct *ts; | ||
| 1482 | struct ssp_response_iu *iu; | ||
| 1483 | struct pm8001_device *pm8001_dev; | ||
| 1484 | psspPayload = (struct ssp_completion_resp *)(piomb + 4); | ||
| 1485 | status = le32_to_cpu(psspPayload->status); | ||
| 1486 | tag = le32_to_cpu(psspPayload->tag); | ||
| 1487 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 1488 | pm8001_dev = ccb->device; | ||
| 1489 | param = le32_to_cpu(psspPayload->param); | ||
| 1490 | |||
| 1491 | t = ccb->task; | ||
| 1492 | |||
| 1493 | if (status && status != IO_UNDERFLOW) | ||
| 1494 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1495 | pm8001_printk("sas IO status 0x%x\n", status)); | ||
| 1496 | if (unlikely(!t || !t->lldd_task || !t->dev)) | ||
| 1497 | return; | ||
| 1498 | ts = &t->task_status; | ||
| 1499 | switch (status) { | ||
| 1500 | case IO_SUCCESS: | ||
| 1501 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" | ||
| 1502 | ",param = %d \n", param)); | ||
| 1503 | if (param == 0) { | ||
| 1504 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1505 | ts->stat = SAM_GOOD; | ||
| 1506 | } else { | ||
| 1507 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1508 | ts->stat = SAS_PROTO_RESPONSE; | ||
| 1509 | ts->residual = param; | ||
| 1510 | iu = &psspPayload->ssp_resp_iu; | ||
| 1511 | sas_ssp_task_response(pm8001_ha->dev, t, iu); | ||
| 1512 | } | ||
| 1513 | if (pm8001_dev) | ||
| 1514 | pm8001_dev->running_req--; | ||
| 1515 | break; | ||
| 1516 | case IO_ABORTED: | ||
| 1517 | PM8001_IO_DBG(pm8001_ha, | ||
| 1518 | pm8001_printk("IO_ABORTED IOMB Tag \n")); | ||
| 1519 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1520 | ts->stat = SAS_ABORTED_TASK; | ||
| 1521 | break; | ||
| 1522 | case IO_UNDERFLOW: | ||
| 1523 | /* SSP Completion with error */ | ||
| 1524 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW" | ||
| 1525 | ",param = %d \n", param)); | ||
| 1526 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1527 | ts->stat = SAS_DATA_UNDERRUN; | ||
| 1528 | ts->residual = param; | ||
| 1529 | if (pm8001_dev) | ||
| 1530 | pm8001_dev->running_req--; | ||
| 1531 | break; | ||
| 1532 | case IO_NO_DEVICE: | ||
| 1533 | PM8001_IO_DBG(pm8001_ha, | ||
| 1534 | pm8001_printk("IO_NO_DEVICE\n")); | ||
| 1535 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 1536 | ts->stat = SAS_PHY_DOWN; | ||
| 1537 | break; | ||
| 1538 | case IO_XFER_ERROR_BREAK: | ||
| 1539 | PM8001_IO_DBG(pm8001_ha, | ||
| 1540 | pm8001_printk("IO_XFER_ERROR_BREAK\n")); | ||
| 1541 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1542 | ts->stat = SAS_OPEN_REJECT; | ||
| 1543 | break; | ||
| 1544 | case IO_XFER_ERROR_PHY_NOT_READY: | ||
| 1545 | PM8001_IO_DBG(pm8001_ha, | ||
| 1546 | pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); | ||
| 1547 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1548 | ts->stat = SAS_OPEN_REJECT; | ||
| 1549 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1550 | break; | ||
| 1551 | case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: | ||
| 1552 | PM8001_IO_DBG(pm8001_ha, | ||
| 1553 | pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); | ||
| 1554 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1555 | ts->stat = SAS_OPEN_REJECT; | ||
| 1556 | ts->open_rej_reason = SAS_OREJ_EPROTO; | ||
| 1557 | break; | ||
| 1558 | case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: | ||
| 1559 | PM8001_IO_DBG(pm8001_ha, | ||
| 1560 | pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); | ||
| 1561 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1562 | ts->stat = SAS_OPEN_REJECT; | ||
| 1563 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 1564 | break; | ||
| 1565 | case IO_OPEN_CNX_ERROR_BREAK: | ||
| 1566 | PM8001_IO_DBG(pm8001_ha, | ||
| 1567 | pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); | ||
| 1568 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1569 | ts->stat = SAS_OPEN_REJECT; | ||
| 1570 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1571 | break; | ||
| 1572 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 1573 | PM8001_IO_DBG(pm8001_ha, | ||
| 1574 | pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); | ||
| 1575 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1576 | ts->stat = SAS_OPEN_REJECT; | ||
| 1577 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 1578 | if (!t->uldd_task) | ||
| 1579 | pm8001_handle_event(pm8001_ha, | ||
| 1580 | pm8001_dev, | ||
| 1581 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 1582 | break; | ||
| 1583 | case IO_OPEN_CNX_ERROR_BAD_DESTINATION: | ||
| 1584 | PM8001_IO_DBG(pm8001_ha, | ||
| 1585 | pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); | ||
| 1586 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1587 | ts->stat = SAS_OPEN_REJECT; | ||
| 1588 | ts->open_rej_reason = SAS_OREJ_BAD_DEST; | ||
| 1589 | break; | ||
| 1590 | case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: | ||
| 1591 | PM8001_IO_DBG(pm8001_ha, | ||
| 1592 | pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" | ||
| 1593 | "NOT_SUPPORTED\n")); | ||
| 1594 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1595 | ts->stat = SAS_OPEN_REJECT; | ||
| 1596 | ts->open_rej_reason = SAS_OREJ_CONN_RATE; | ||
| 1597 | break; | ||
| 1598 | case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: | ||
| 1599 | PM8001_IO_DBG(pm8001_ha, | ||
| 1600 | pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); | ||
| 1601 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 1602 | ts->stat = SAS_OPEN_REJECT; | ||
| 1603 | ts->open_rej_reason = SAS_OREJ_WRONG_DEST; | ||
| 1604 | break; | ||
| 1605 | case IO_XFER_ERROR_NAK_RECEIVED: | ||
| 1606 | PM8001_IO_DBG(pm8001_ha, | ||
| 1607 | pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); | ||
| 1608 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1609 | ts->stat = SAS_OPEN_REJECT; | ||
| 1610 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1611 | break; | ||
| 1612 | case IO_XFER_ERROR_ACK_NAK_TIMEOUT: | ||
| 1613 | PM8001_IO_DBG(pm8001_ha, | ||
| 1614 | pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); | ||
| 1615 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1616 | ts->stat = SAS_NAK_R_ERR; | ||
| 1617 | break; | ||
| 1618 | case IO_XFER_ERROR_DMA: | ||
| 1619 | PM8001_IO_DBG(pm8001_ha, | ||
| 1620 | pm8001_printk("IO_XFER_ERROR_DMA\n")); | ||
| 1621 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1622 | ts->stat = SAS_OPEN_REJECT; | ||
| 1623 | break; | ||
| 1624 | case IO_XFER_OPEN_RETRY_TIMEOUT: | ||
| 1625 | PM8001_IO_DBG(pm8001_ha, | ||
| 1626 | pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); | ||
| 1627 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1628 | ts->stat = SAS_OPEN_REJECT; | ||
| 1629 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1630 | break; | ||
| 1631 | case IO_XFER_ERROR_OFFSET_MISMATCH: | ||
| 1632 | PM8001_IO_DBG(pm8001_ha, | ||
| 1633 | pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); | ||
| 1634 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1635 | ts->stat = SAS_OPEN_REJECT; | ||
| 1636 | break; | ||
| 1637 | case IO_PORT_IN_RESET: | ||
| 1638 | PM8001_IO_DBG(pm8001_ha, | ||
| 1639 | pm8001_printk("IO_PORT_IN_RESET\n")); | ||
| 1640 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1641 | ts->stat = SAS_OPEN_REJECT; | ||
| 1642 | break; | ||
| 1643 | case IO_DS_NON_OPERATIONAL: | ||
| 1644 | PM8001_IO_DBG(pm8001_ha, | ||
| 1645 | pm8001_printk("IO_DS_NON_OPERATIONAL\n")); | ||
| 1646 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1647 | ts->stat = SAS_OPEN_REJECT; | ||
| 1648 | if (!t->uldd_task) | ||
| 1649 | pm8001_handle_event(pm8001_ha, | ||
| 1650 | pm8001_dev, | ||
| 1651 | IO_DS_NON_OPERATIONAL); | ||
| 1652 | break; | ||
| 1653 | case IO_DS_IN_RECOVERY: | ||
| 1654 | PM8001_IO_DBG(pm8001_ha, | ||
| 1655 | pm8001_printk("IO_DS_IN_RECOVERY\n")); | ||
| 1656 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1657 | ts->stat = SAS_OPEN_REJECT; | ||
| 1658 | break; | ||
| 1659 | case IO_TM_TAG_NOT_FOUND: | ||
| 1660 | PM8001_IO_DBG(pm8001_ha, | ||
| 1661 | pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); | ||
| 1662 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1663 | ts->stat = SAS_OPEN_REJECT; | ||
| 1664 | break; | ||
| 1665 | case IO_SSP_EXT_IU_ZERO_LEN_ERROR: | ||
| 1666 | PM8001_IO_DBG(pm8001_ha, | ||
| 1667 | pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); | ||
| 1668 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1669 | ts->stat = SAS_OPEN_REJECT; | ||
| 1670 | break; | ||
| 1671 | case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: | ||
| 1672 | PM8001_IO_DBG(pm8001_ha, | ||
| 1673 | pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); | ||
| 1674 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1675 | ts->stat = SAS_OPEN_REJECT; | ||
| 1676 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1677 | default: | ||
| 1678 | PM8001_IO_DBG(pm8001_ha, | ||
| 1679 | pm8001_printk("Unknown status 0x%x\n", status)); | ||
| 1680 | /* not allowed case. Therefore, return failed status */ | ||
| 1681 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1682 | ts->stat = SAS_OPEN_REJECT; | ||
| 1683 | break; | ||
| 1684 | } | ||
| 1685 | PM8001_IO_DBG(pm8001_ha, | ||
| 1686 | pm8001_printk("scsi_status = %x \n ", | ||
| 1687 | psspPayload->ssp_resp_iu.status)); | ||
| 1688 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 1689 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 1690 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 1691 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 1692 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | ||
| 1693 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 1694 | PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" | ||
| 1695 | " io_status 0x%x resp 0x%x " | ||
| 1696 | "stat 0x%x but aborted by upper layer!\n", | ||
| 1697 | t, status, ts->resp, ts->stat)); | ||
| 1698 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 1699 | } else { | ||
| 1700 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 1701 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 1702 | mb();/* in order to force CPU ordering */ | ||
| 1703 | t->task_done(t); | ||
| 1704 | } | ||
| 1705 | } | ||
| 1706 | |||
| 1707 | /*See the comments for mpi_ssp_completion */ | ||
| 1708 | static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) | ||
| 1709 | { | ||
| 1710 | struct sas_task *t; | ||
| 1711 | unsigned long flags; | ||
| 1712 | struct task_status_struct *ts; | ||
| 1713 | struct pm8001_ccb_info *ccb; | ||
| 1714 | struct pm8001_device *pm8001_dev; | ||
| 1715 | struct ssp_event_resp *psspPayload = | ||
| 1716 | (struct ssp_event_resp *)(piomb + 4); | ||
| 1717 | u32 event = le32_to_cpu(psspPayload->event); | ||
| 1718 | u32 tag = le32_to_cpu(psspPayload->tag); | ||
| 1719 | u32 port_id = le32_to_cpu(psspPayload->port_id); | ||
| 1720 | u32 dev_id = le32_to_cpu(psspPayload->device_id); | ||
| 1721 | |||
| 1722 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 1723 | t = ccb->task; | ||
| 1724 | pm8001_dev = ccb->device; | ||
| 1725 | if (event) | ||
| 1726 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1727 | pm8001_printk("sas IO status 0x%x\n", event)); | ||
| 1728 | if (unlikely(!t || !t->lldd_task || !t->dev)) | ||
| 1729 | return; | ||
| 1730 | ts = &t->task_status; | ||
| 1731 | PM8001_IO_DBG(pm8001_ha, | ||
| 1732 | pm8001_printk("port_id = %x,device_id = %x\n", | ||
| 1733 | port_id, dev_id)); | ||
| 1734 | switch (event) { | ||
| 1735 | case IO_OVERFLOW: | ||
| 1736 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) | ||
| 1737 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1738 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1739 | ts->residual = 0; | ||
| 1740 | if (pm8001_dev) | ||
| 1741 | pm8001_dev->running_req--; | ||
| 1742 | break; | ||
| 1743 | case IO_XFER_ERROR_BREAK: | ||
| 1744 | PM8001_IO_DBG(pm8001_ha, | ||
| 1745 | pm8001_printk("IO_XFER_ERROR_BREAK\n")); | ||
| 1746 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1747 | ts->stat = SAS_INTERRUPTED; | ||
| 1748 | break; | ||
| 1749 | case IO_XFER_ERROR_PHY_NOT_READY: | ||
| 1750 | PM8001_IO_DBG(pm8001_ha, | ||
| 1751 | pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); | ||
| 1752 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1753 | ts->stat = SAS_OPEN_REJECT; | ||
| 1754 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1755 | break; | ||
| 1756 | case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: | ||
| 1757 | PM8001_IO_DBG(pm8001_ha, | ||
| 1758 | pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" | ||
| 1759 | "_SUPPORTED\n")); | ||
| 1760 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1761 | ts->stat = SAS_OPEN_REJECT; | ||
| 1762 | ts->open_rej_reason = SAS_OREJ_EPROTO; | ||
| 1763 | break; | ||
| 1764 | case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: | ||
| 1765 | PM8001_IO_DBG(pm8001_ha, | ||
| 1766 | pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); | ||
| 1767 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1768 | ts->stat = SAS_OPEN_REJECT; | ||
| 1769 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 1770 | break; | ||
| 1771 | case IO_OPEN_CNX_ERROR_BREAK: | ||
| 1772 | PM8001_IO_DBG(pm8001_ha, | ||
| 1773 | pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); | ||
| 1774 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1775 | ts->stat = SAS_OPEN_REJECT; | ||
| 1776 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1777 | break; | ||
| 1778 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 1779 | PM8001_IO_DBG(pm8001_ha, | ||
| 1780 | pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); | ||
| 1781 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1782 | ts->stat = SAS_OPEN_REJECT; | ||
| 1783 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 1784 | if (!t->uldd_task) | ||
| 1785 | pm8001_handle_event(pm8001_ha, | ||
| 1786 | pm8001_dev, | ||
| 1787 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 1788 | break; | ||
| 1789 | case IO_OPEN_CNX_ERROR_BAD_DESTINATION: | ||
| 1790 | PM8001_IO_DBG(pm8001_ha, | ||
| 1791 | pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); | ||
| 1792 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1793 | ts->stat = SAS_OPEN_REJECT; | ||
| 1794 | ts->open_rej_reason = SAS_OREJ_BAD_DEST; | ||
| 1795 | break; | ||
| 1796 | case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: | ||
| 1797 | PM8001_IO_DBG(pm8001_ha, | ||
| 1798 | pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" | ||
| 1799 | "NOT_SUPPORTED\n")); | ||
| 1800 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1801 | ts->stat = SAS_OPEN_REJECT; | ||
| 1802 | ts->open_rej_reason = SAS_OREJ_CONN_RATE; | ||
| 1803 | break; | ||
| 1804 | case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: | ||
| 1805 | PM8001_IO_DBG(pm8001_ha, | ||
| 1806 | pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); | ||
| 1807 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1808 | ts->stat = SAS_OPEN_REJECT; | ||
| 1809 | ts->open_rej_reason = SAS_OREJ_WRONG_DEST; | ||
| 1810 | break; | ||
| 1811 | case IO_XFER_ERROR_NAK_RECEIVED: | ||
| 1812 | PM8001_IO_DBG(pm8001_ha, | ||
| 1813 | pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); | ||
| 1814 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1815 | ts->stat = SAS_OPEN_REJECT; | ||
| 1816 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1817 | break; | ||
| 1818 | case IO_XFER_ERROR_ACK_NAK_TIMEOUT: | ||
| 1819 | PM8001_IO_DBG(pm8001_ha, | ||
| 1820 | pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); | ||
| 1821 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1822 | ts->stat = SAS_NAK_R_ERR; | ||
| 1823 | break; | ||
| 1824 | case IO_XFER_OPEN_RETRY_TIMEOUT: | ||
| 1825 | PM8001_IO_DBG(pm8001_ha, | ||
| 1826 | pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); | ||
| 1827 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1828 | ts->stat = SAS_OPEN_REJECT; | ||
| 1829 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 1830 | break; | ||
| 1831 | case IO_XFER_ERROR_UNEXPECTED_PHASE: | ||
| 1832 | PM8001_IO_DBG(pm8001_ha, | ||
| 1833 | pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); | ||
| 1834 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1835 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1836 | break; | ||
| 1837 | case IO_XFER_ERROR_XFER_RDY_OVERRUN: | ||
| 1838 | PM8001_IO_DBG(pm8001_ha, | ||
| 1839 | pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); | ||
| 1840 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1841 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1842 | break; | ||
| 1843 | case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: | ||
| 1844 | PM8001_IO_DBG(pm8001_ha, | ||
| 1845 | pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); | ||
| 1846 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1847 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1848 | break; | ||
| 1849 | case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: | ||
| 1850 | PM8001_IO_DBG(pm8001_ha, | ||
| 1851 | pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); | ||
| 1852 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1853 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1854 | break; | ||
| 1855 | case IO_XFER_ERROR_OFFSET_MISMATCH: | ||
| 1856 | PM8001_IO_DBG(pm8001_ha, | ||
| 1857 | pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); | ||
| 1858 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1859 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1860 | break; | ||
| 1861 | case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: | ||
| 1862 | PM8001_IO_DBG(pm8001_ha, | ||
| 1863 | pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); | ||
| 1864 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1865 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1866 | break; | ||
| 1867 | case IO_XFER_CMD_FRAME_ISSUED: | ||
| 1868 | PM8001_IO_DBG(pm8001_ha, | ||
| 1869 | pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); | ||
| 1870 | return; | ||
| 1871 | default: | ||
| 1872 | PM8001_IO_DBG(pm8001_ha, | ||
| 1873 | pm8001_printk("Unknown status 0x%x\n", event)); | ||
| 1874 | /* not allowed case. Therefore, return failed status */ | ||
| 1875 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1876 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1877 | break; | ||
| 1878 | } | ||
| 1879 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 1880 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 1881 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 1882 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 1883 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | ||
| 1884 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 1885 | PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" | ||
| 1886 | " event 0x%x resp 0x%x " | ||
| 1887 | "stat 0x%x but aborted by upper layer!\n", | ||
| 1888 | t, event, ts->resp, ts->stat)); | ||
| 1889 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 1890 | } else { | ||
| 1891 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 1892 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 1893 | mb();/* in order to force CPU ordering */ | ||
| 1894 | t->task_done(t); | ||
| 1895 | } | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | /*See the comments for mpi_ssp_completion */ | ||
| 1899 | static void | ||
| 1900 | mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 1901 | { | ||
| 1902 | struct sas_task *t; | ||
| 1903 | struct pm8001_ccb_info *ccb; | ||
| 1904 | unsigned long flags; | ||
| 1905 | u32 param; | ||
| 1906 | u32 status; | ||
| 1907 | u32 tag; | ||
| 1908 | struct sata_completion_resp *psataPayload; | ||
| 1909 | struct task_status_struct *ts; | ||
| 1910 | struct ata_task_resp *resp ; | ||
| 1911 | u32 *sata_resp; | ||
| 1912 | struct pm8001_device *pm8001_dev; | ||
| 1913 | |||
| 1914 | psataPayload = (struct sata_completion_resp *)(piomb + 4); | ||
| 1915 | status = le32_to_cpu(psataPayload->status); | ||
| 1916 | tag = le32_to_cpu(psataPayload->tag); | ||
| 1917 | |||
| 1918 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 1919 | param = le32_to_cpu(psataPayload->param); | ||
| 1920 | t = ccb->task; | ||
| 1921 | ts = &t->task_status; | ||
| 1922 | pm8001_dev = ccb->device; | ||
| 1923 | if (status) | ||
| 1924 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 1925 | pm8001_printk("sata IO status 0x%x\n", status)); | ||
| 1926 | if (unlikely(!t || !t->lldd_task || !t->dev)) | ||
| 1927 | return; | ||
| 1928 | |||
| 1929 | switch (status) { | ||
| 1930 | case IO_SUCCESS: | ||
| 1931 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); | ||
| 1932 | if (param == 0) { | ||
| 1933 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1934 | ts->stat = SAM_GOOD; | ||
| 1935 | } else { | ||
| 1936 | u8 len; | ||
| 1937 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1938 | ts->stat = SAS_PROTO_RESPONSE; | ||
| 1939 | ts->residual = param; | ||
| 1940 | PM8001_IO_DBG(pm8001_ha, | ||
| 1941 | pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", | ||
| 1942 | param)); | ||
| 1943 | sata_resp = &psataPayload->sata_resp[0]; | ||
| 1944 | resp = (struct ata_task_resp *)ts->buf; | ||
| 1945 | if (t->ata_task.dma_xfer == 0 && | ||
| 1946 | t->data_dir == PCI_DMA_FROMDEVICE) { | ||
| 1947 | len = sizeof(struct pio_setup_fis); | ||
| 1948 | PM8001_IO_DBG(pm8001_ha, | ||
| 1949 | pm8001_printk("PIO read len = %d\n", len)); | ||
| 1950 | } else if (t->ata_task.use_ncq) { | ||
| 1951 | len = sizeof(struct set_dev_bits_fis); | ||
| 1952 | PM8001_IO_DBG(pm8001_ha, | ||
| 1953 | pm8001_printk("FPDMA len = %d\n", len)); | ||
| 1954 | } else { | ||
| 1955 | len = sizeof(struct dev_to_host_fis); | ||
| 1956 | PM8001_IO_DBG(pm8001_ha, | ||
| 1957 | pm8001_printk("other len = %d\n", len)); | ||
| 1958 | } | ||
| 1959 | if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { | ||
| 1960 | resp->frame_len = len; | ||
| 1961 | memcpy(&resp->ending_fis[0], sata_resp, len); | ||
| 1962 | ts->buf_valid_size = sizeof(*resp); | ||
| 1963 | } else | ||
| 1964 | PM8001_IO_DBG(pm8001_ha, | ||
| 1965 | pm8001_printk("response to large \n")); | ||
| 1966 | } | ||
| 1967 | if (pm8001_dev) | ||
| 1968 | pm8001_dev->running_req--; | ||
| 1969 | break; | ||
| 1970 | case IO_ABORTED: | ||
| 1971 | PM8001_IO_DBG(pm8001_ha, | ||
| 1972 | pm8001_printk("IO_ABORTED IOMB Tag \n")); | ||
| 1973 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1974 | ts->stat = SAS_ABORTED_TASK; | ||
| 1975 | if (pm8001_dev) | ||
| 1976 | pm8001_dev->running_req--; | ||
| 1977 | break; | ||
| 1978 | /* following cases are to do cases */ | ||
| 1979 | case IO_UNDERFLOW: | ||
| 1980 | /* SATA Completion with error */ | ||
| 1981 | PM8001_IO_DBG(pm8001_ha, | ||
| 1982 | pm8001_printk("IO_UNDERFLOW param = %d\n", param)); | ||
| 1983 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1984 | ts->stat = SAS_DATA_UNDERRUN; | ||
| 1985 | ts->residual = param; | ||
| 1986 | if (pm8001_dev) | ||
| 1987 | pm8001_dev->running_req--; | ||
| 1988 | break; | ||
| 1989 | case IO_NO_DEVICE: | ||
| 1990 | PM8001_IO_DBG(pm8001_ha, | ||
| 1991 | pm8001_printk("IO_NO_DEVICE\n")); | ||
| 1992 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 1993 | ts->stat = SAS_PHY_DOWN; | ||
| 1994 | break; | ||
| 1995 | case IO_XFER_ERROR_BREAK: | ||
| 1996 | PM8001_IO_DBG(pm8001_ha, | ||
| 1997 | pm8001_printk("IO_XFER_ERROR_BREAK\n")); | ||
| 1998 | ts->resp = SAS_TASK_COMPLETE; | ||
| 1999 | ts->stat = SAS_INTERRUPTED; | ||
| 2000 | break; | ||
| 2001 | case IO_XFER_ERROR_PHY_NOT_READY: | ||
| 2002 | PM8001_IO_DBG(pm8001_ha, | ||
| 2003 | pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); | ||
| 2004 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2005 | ts->stat = SAS_OPEN_REJECT; | ||
| 2006 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2007 | break; | ||
| 2008 | case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: | ||
| 2009 | PM8001_IO_DBG(pm8001_ha, | ||
| 2010 | pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" | ||
| 2011 | "_SUPPORTED\n")); | ||
| 2012 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2013 | ts->stat = SAS_OPEN_REJECT; | ||
| 2014 | ts->open_rej_reason = SAS_OREJ_EPROTO; | ||
| 2015 | break; | ||
| 2016 | case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: | ||
| 2017 | PM8001_IO_DBG(pm8001_ha, | ||
| 2018 | pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); | ||
| 2019 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2020 | ts->stat = SAS_OPEN_REJECT; | ||
| 2021 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 2022 | break; | ||
| 2023 | case IO_OPEN_CNX_ERROR_BREAK: | ||
| 2024 | PM8001_IO_DBG(pm8001_ha, | ||
| 2025 | pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); | ||
| 2026 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2027 | ts->stat = SAS_OPEN_REJECT; | ||
| 2028 | ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; | ||
| 2029 | break; | ||
| 2030 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 2031 | PM8001_IO_DBG(pm8001_ha, | ||
| 2032 | pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); | ||
| 2033 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2034 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2035 | if (!t->uldd_task) { | ||
| 2036 | pm8001_handle_event(pm8001_ha, | ||
| 2037 | pm8001_dev, | ||
| 2038 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 2039 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2040 | ts->stat = SAS_QUEUE_FULL; | ||
| 2041 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2042 | mb();/*in order to force CPU ordering*/ | ||
| 2043 | t->task_done(t); | ||
| 2044 | return; | ||
| 2045 | } | ||
| 2046 | break; | ||
| 2047 | case IO_OPEN_CNX_ERROR_BAD_DESTINATION: | ||
| 2048 | PM8001_IO_DBG(pm8001_ha, | ||
| 2049 | pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); | ||
| 2050 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2051 | ts->stat = SAS_OPEN_REJECT; | ||
| 2052 | ts->open_rej_reason = SAS_OREJ_BAD_DEST; | ||
| 2053 | if (!t->uldd_task) { | ||
| 2054 | pm8001_handle_event(pm8001_ha, | ||
| 2055 | pm8001_dev, | ||
| 2056 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 2057 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2058 | ts->stat = SAS_QUEUE_FULL; | ||
| 2059 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2060 | mb();/*ditto*/ | ||
| 2061 | t->task_done(t); | ||
| 2062 | return; | ||
| 2063 | } | ||
| 2064 | break; | ||
| 2065 | case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: | ||
| 2066 | PM8001_IO_DBG(pm8001_ha, | ||
| 2067 | pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" | ||
| 2068 | "NOT_SUPPORTED\n")); | ||
| 2069 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2070 | ts->stat = SAS_OPEN_REJECT; | ||
| 2071 | ts->open_rej_reason = SAS_OREJ_CONN_RATE; | ||
| 2072 | break; | ||
| 2073 | case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: | ||
| 2074 | PM8001_IO_DBG(pm8001_ha, | ||
| 2075 | pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES" | ||
| 2076 | "_BUSY\n")); | ||
| 2077 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2078 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2079 | if (!t->uldd_task) { | ||
| 2080 | pm8001_handle_event(pm8001_ha, | ||
| 2081 | pm8001_dev, | ||
| 2082 | IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); | ||
| 2083 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2084 | ts->stat = SAS_QUEUE_FULL; | ||
| 2085 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2086 | mb();/* ditto*/ | ||
| 2087 | t->task_done(t); | ||
| 2088 | return; | ||
| 2089 | } | ||
| 2090 | break; | ||
| 2091 | case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: | ||
| 2092 | PM8001_IO_DBG(pm8001_ha, | ||
| 2093 | pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); | ||
| 2094 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2095 | ts->stat = SAS_OPEN_REJECT; | ||
| 2096 | ts->open_rej_reason = SAS_OREJ_WRONG_DEST; | ||
| 2097 | break; | ||
| 2098 | case IO_XFER_ERROR_NAK_RECEIVED: | ||
| 2099 | PM8001_IO_DBG(pm8001_ha, | ||
| 2100 | pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); | ||
| 2101 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2102 | ts->stat = SAS_NAK_R_ERR; | ||
| 2103 | break; | ||
| 2104 | case IO_XFER_ERROR_ACK_NAK_TIMEOUT: | ||
| 2105 | PM8001_IO_DBG(pm8001_ha, | ||
| 2106 | pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); | ||
| 2107 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2108 | ts->stat = SAS_NAK_R_ERR; | ||
| 2109 | break; | ||
| 2110 | case IO_XFER_ERROR_DMA: | ||
| 2111 | PM8001_IO_DBG(pm8001_ha, | ||
| 2112 | pm8001_printk("IO_XFER_ERROR_DMA\n")); | ||
| 2113 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2114 | ts->stat = SAS_ABORTED_TASK; | ||
| 2115 | break; | ||
| 2116 | case IO_XFER_ERROR_SATA_LINK_TIMEOUT: | ||
| 2117 | PM8001_IO_DBG(pm8001_ha, | ||
| 2118 | pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); | ||
| 2119 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2120 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2121 | break; | ||
| 2122 | case IO_XFER_ERROR_REJECTED_NCQ_MODE: | ||
| 2123 | PM8001_IO_DBG(pm8001_ha, | ||
| 2124 | pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); | ||
| 2125 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2126 | ts->stat = SAS_DATA_UNDERRUN; | ||
| 2127 | break; | ||
| 2128 | case IO_XFER_OPEN_RETRY_TIMEOUT: | ||
| 2129 | PM8001_IO_DBG(pm8001_ha, | ||
| 2130 | pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); | ||
| 2131 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2132 | ts->stat = SAS_OPEN_TO; | ||
| 2133 | break; | ||
| 2134 | case IO_PORT_IN_RESET: | ||
| 2135 | PM8001_IO_DBG(pm8001_ha, | ||
| 2136 | pm8001_printk("IO_PORT_IN_RESET\n")); | ||
| 2137 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2138 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2139 | break; | ||
| 2140 | case IO_DS_NON_OPERATIONAL: | ||
| 2141 | PM8001_IO_DBG(pm8001_ha, | ||
| 2142 | pm8001_printk("IO_DS_NON_OPERATIONAL\n")); | ||
| 2143 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2144 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2145 | if (!t->uldd_task) { | ||
| 2146 | pm8001_handle_event(pm8001_ha, pm8001_dev, | ||
| 2147 | IO_DS_NON_OPERATIONAL); | ||
| 2148 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2149 | ts->stat = SAS_QUEUE_FULL; | ||
| 2150 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2151 | mb();/*ditto*/ | ||
| 2152 | t->task_done(t); | ||
| 2153 | return; | ||
| 2154 | } | ||
| 2155 | break; | ||
| 2156 | case IO_DS_IN_RECOVERY: | ||
| 2157 | PM8001_IO_DBG(pm8001_ha, | ||
| 2158 | pm8001_printk(" IO_DS_IN_RECOVERY\n")); | ||
| 2159 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2160 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2161 | break; | ||
| 2162 | case IO_DS_IN_ERROR: | ||
| 2163 | PM8001_IO_DBG(pm8001_ha, | ||
| 2164 | pm8001_printk("IO_DS_IN_ERROR\n")); | ||
| 2165 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2166 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2167 | if (!t->uldd_task) { | ||
| 2168 | pm8001_handle_event(pm8001_ha, pm8001_dev, | ||
| 2169 | IO_DS_IN_ERROR); | ||
| 2170 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2171 | ts->stat = SAS_QUEUE_FULL; | ||
| 2172 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2173 | mb();/*ditto*/ | ||
| 2174 | t->task_done(t); | ||
| 2175 | return; | ||
| 2176 | } | ||
| 2177 | break; | ||
| 2178 | case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: | ||
| 2179 | PM8001_IO_DBG(pm8001_ha, | ||
| 2180 | pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); | ||
| 2181 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2182 | ts->stat = SAS_OPEN_REJECT; | ||
| 2183 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2184 | default: | ||
| 2185 | PM8001_IO_DBG(pm8001_ha, | ||
| 2186 | pm8001_printk("Unknown status 0x%x\n", status)); | ||
| 2187 | /* not allowed case. Therefore, return failed status */ | ||
| 2188 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2189 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2190 | break; | ||
| 2191 | } | ||
| 2192 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 2193 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 2194 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 2195 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 2196 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | ||
| 2197 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2198 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2199 | pm8001_printk("task 0x%p done with io_status 0x%x" | ||
| 2200 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", | ||
| 2201 | t, status, ts->resp, ts->stat)); | ||
| 2202 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2203 | } else { | ||
| 2204 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2205 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2206 | mb();/* ditto */ | ||
| 2207 | t->task_done(t); | ||
| 2208 | } | ||
| 2209 | } | ||
| 2210 | |||
| 2211 | /*See the comments for mpi_ssp_completion */ | ||
| 2212 | static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) | ||
| 2213 | { | ||
| 2214 | struct sas_task *t; | ||
| 2215 | unsigned long flags; | ||
| 2216 | struct task_status_struct *ts; | ||
| 2217 | struct pm8001_ccb_info *ccb; | ||
| 2218 | struct pm8001_device *pm8001_dev; | ||
| 2219 | struct sata_event_resp *psataPayload = | ||
| 2220 | (struct sata_event_resp *)(piomb + 4); | ||
| 2221 | u32 event = le32_to_cpu(psataPayload->event); | ||
| 2222 | u32 tag = le32_to_cpu(psataPayload->tag); | ||
| 2223 | u32 port_id = le32_to_cpu(psataPayload->port_id); | ||
| 2224 | u32 dev_id = le32_to_cpu(psataPayload->device_id); | ||
| 2225 | |||
| 2226 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 2227 | t = ccb->task; | ||
| 2228 | pm8001_dev = ccb->device; | ||
| 2229 | if (event) | ||
| 2230 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2231 | pm8001_printk("sata IO status 0x%x\n", event)); | ||
| 2232 | if (unlikely(!t || !t->lldd_task || !t->dev)) | ||
| 2233 | return; | ||
| 2234 | ts = &t->task_status; | ||
| 2235 | PM8001_IO_DBG(pm8001_ha, | ||
| 2236 | pm8001_printk("port_id = %x,device_id = %x\n", | ||
| 2237 | port_id, dev_id)); | ||
| 2238 | switch (event) { | ||
| 2239 | case IO_OVERFLOW: | ||
| 2240 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); | ||
| 2241 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2242 | ts->stat = SAS_DATA_OVERRUN; | ||
| 2243 | ts->residual = 0; | ||
| 2244 | if (pm8001_dev) | ||
| 2245 | pm8001_dev->running_req--; | ||
| 2246 | break; | ||
| 2247 | case IO_XFER_ERROR_BREAK: | ||
| 2248 | PM8001_IO_DBG(pm8001_ha, | ||
| 2249 | pm8001_printk("IO_XFER_ERROR_BREAK\n")); | ||
| 2250 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2251 | ts->stat = SAS_INTERRUPTED; | ||
| 2252 | break; | ||
| 2253 | case IO_XFER_ERROR_PHY_NOT_READY: | ||
| 2254 | PM8001_IO_DBG(pm8001_ha, | ||
| 2255 | pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); | ||
| 2256 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2257 | ts->stat = SAS_OPEN_REJECT; | ||
| 2258 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2259 | break; | ||
| 2260 | case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: | ||
| 2261 | PM8001_IO_DBG(pm8001_ha, | ||
| 2262 | pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" | ||
| 2263 | "_SUPPORTED\n")); | ||
| 2264 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2265 | ts->stat = SAS_OPEN_REJECT; | ||
| 2266 | ts->open_rej_reason = SAS_OREJ_EPROTO; | ||
| 2267 | break; | ||
| 2268 | case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: | ||
| 2269 | PM8001_IO_DBG(pm8001_ha, | ||
| 2270 | pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); | ||
| 2271 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2272 | ts->stat = SAS_OPEN_REJECT; | ||
| 2273 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 2274 | break; | ||
| 2275 | case IO_OPEN_CNX_ERROR_BREAK: | ||
| 2276 | PM8001_IO_DBG(pm8001_ha, | ||
| 2277 | pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); | ||
| 2278 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2279 | ts->stat = SAS_OPEN_REJECT; | ||
| 2280 | ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; | ||
| 2281 | break; | ||
| 2282 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 2283 | PM8001_IO_DBG(pm8001_ha, | ||
| 2284 | pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); | ||
| 2285 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2286 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2287 | if (!t->uldd_task) { | ||
| 2288 | pm8001_handle_event(pm8001_ha, | ||
| 2289 | pm8001_dev, | ||
| 2290 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 2291 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2292 | ts->stat = SAS_QUEUE_FULL; | ||
| 2293 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2294 | mb();/*ditto*/ | ||
| 2295 | t->task_done(t); | ||
| 2296 | return; | ||
| 2297 | } | ||
| 2298 | break; | ||
| 2299 | case IO_OPEN_CNX_ERROR_BAD_DESTINATION: | ||
| 2300 | PM8001_IO_DBG(pm8001_ha, | ||
| 2301 | pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); | ||
| 2302 | ts->resp = SAS_TASK_UNDELIVERED; | ||
| 2303 | ts->stat = SAS_OPEN_REJECT; | ||
| 2304 | ts->open_rej_reason = SAS_OREJ_BAD_DEST; | ||
| 2305 | break; | ||
| 2306 | case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: | ||
| 2307 | PM8001_IO_DBG(pm8001_ha, | ||
| 2308 | pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" | ||
| 2309 | "NOT_SUPPORTED\n")); | ||
| 2310 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2311 | ts->stat = SAS_OPEN_REJECT; | ||
| 2312 | ts->open_rej_reason = SAS_OREJ_CONN_RATE; | ||
| 2313 | break; | ||
| 2314 | case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: | ||
| 2315 | PM8001_IO_DBG(pm8001_ha, | ||
| 2316 | pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); | ||
| 2317 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2318 | ts->stat = SAS_OPEN_REJECT; | ||
| 2319 | ts->open_rej_reason = SAS_OREJ_WRONG_DEST; | ||
| 2320 | break; | ||
| 2321 | case IO_XFER_ERROR_NAK_RECEIVED: | ||
| 2322 | PM8001_IO_DBG(pm8001_ha, | ||
| 2323 | pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); | ||
| 2324 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2325 | ts->stat = SAS_NAK_R_ERR; | ||
| 2326 | break; | ||
| 2327 | case IO_XFER_ERROR_PEER_ABORTED: | ||
| 2328 | PM8001_IO_DBG(pm8001_ha, | ||
| 2329 | pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); | ||
| 2330 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2331 | ts->stat = SAS_NAK_R_ERR; | ||
| 2332 | break; | ||
| 2333 | case IO_XFER_ERROR_REJECTED_NCQ_MODE: | ||
| 2334 | PM8001_IO_DBG(pm8001_ha, | ||
| 2335 | pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); | ||
| 2336 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2337 | ts->stat = SAS_DATA_UNDERRUN; | ||
| 2338 | break; | ||
| 2339 | case IO_XFER_OPEN_RETRY_TIMEOUT: | ||
| 2340 | PM8001_IO_DBG(pm8001_ha, | ||
| 2341 | pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); | ||
| 2342 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2343 | ts->stat = SAS_OPEN_TO; | ||
| 2344 | break; | ||
| 2345 | case IO_XFER_ERROR_UNEXPECTED_PHASE: | ||
| 2346 | PM8001_IO_DBG(pm8001_ha, | ||
| 2347 | pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); | ||
| 2348 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2349 | ts->stat = SAS_OPEN_TO; | ||
| 2350 | break; | ||
| 2351 | case IO_XFER_ERROR_XFER_RDY_OVERRUN: | ||
| 2352 | PM8001_IO_DBG(pm8001_ha, | ||
| 2353 | pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); | ||
| 2354 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2355 | ts->stat = SAS_OPEN_TO; | ||
| 2356 | break; | ||
| 2357 | case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: | ||
| 2358 | PM8001_IO_DBG(pm8001_ha, | ||
| 2359 | pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); | ||
| 2360 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2361 | ts->stat = SAS_OPEN_TO; | ||
| 2362 | break; | ||
| 2363 | case IO_XFER_ERROR_OFFSET_MISMATCH: | ||
| 2364 | PM8001_IO_DBG(pm8001_ha, | ||
| 2365 | pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); | ||
| 2366 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2367 | ts->stat = SAS_OPEN_TO; | ||
| 2368 | break; | ||
| 2369 | case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: | ||
| 2370 | PM8001_IO_DBG(pm8001_ha, | ||
| 2371 | pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); | ||
| 2372 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2373 | ts->stat = SAS_OPEN_TO; | ||
| 2374 | break; | ||
| 2375 | case IO_XFER_CMD_FRAME_ISSUED: | ||
| 2376 | PM8001_IO_DBG(pm8001_ha, | ||
| 2377 | pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); | ||
| 2378 | break; | ||
| 2379 | case IO_XFER_PIO_SETUP_ERROR: | ||
| 2380 | PM8001_IO_DBG(pm8001_ha, | ||
| 2381 | pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); | ||
| 2382 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2383 | ts->stat = SAS_OPEN_TO; | ||
| 2384 | break; | ||
| 2385 | default: | ||
| 2386 | PM8001_IO_DBG(pm8001_ha, | ||
| 2387 | pm8001_printk("Unknown status 0x%x\n", event)); | ||
| 2388 | /* not allowed case. Therefore, return failed status */ | ||
| 2389 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2390 | ts->stat = SAS_OPEN_TO; | ||
| 2391 | break; | ||
| 2392 | } | ||
| 2393 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 2394 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 2395 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 2396 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 2397 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | ||
| 2398 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2399 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2400 | pm8001_printk("task 0x%p done with io_status 0x%x" | ||
| 2401 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", | ||
| 2402 | t, event, ts->resp, ts->stat)); | ||
| 2403 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2404 | } else { | ||
| 2405 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2406 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2407 | mb();/* in order to force CPU ordering */ | ||
| 2408 | t->task_done(t); | ||
| 2409 | } | ||
| 2410 | } | ||
| 2411 | |||
| 2412 | /*See the comments for mpi_ssp_completion */ | ||
| 2413 | static void | ||
| 2414 | mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2415 | { | ||
| 2416 | u32 param; | ||
| 2417 | struct sas_task *t; | ||
| 2418 | struct pm8001_ccb_info *ccb; | ||
| 2419 | unsigned long flags; | ||
| 2420 | u32 status; | ||
| 2421 | u32 tag; | ||
| 2422 | struct smp_completion_resp *psmpPayload; | ||
| 2423 | struct task_status_struct *ts; | ||
| 2424 | struct pm8001_device *pm8001_dev; | ||
| 2425 | |||
| 2426 | psmpPayload = (struct smp_completion_resp *)(piomb + 4); | ||
| 2427 | status = le32_to_cpu(psmpPayload->status); | ||
| 2428 | tag = le32_to_cpu(psmpPayload->tag); | ||
| 2429 | |||
| 2430 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 2431 | param = le32_to_cpu(psmpPayload->param); | ||
| 2432 | t = ccb->task; | ||
| 2433 | ts = &t->task_status; | ||
| 2434 | pm8001_dev = ccb->device; | ||
| 2435 | if (status) | ||
| 2436 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2437 | pm8001_printk("smp IO status 0x%x\n", status)); | ||
| 2438 | if (unlikely(!t || !t->lldd_task || !t->dev)) | ||
| 2439 | return; | ||
| 2440 | |||
| 2441 | switch (status) { | ||
| 2442 | case IO_SUCCESS: | ||
| 2443 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); | ||
| 2444 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2445 | ts->stat = SAM_GOOD; | ||
| 2446 | if (pm8001_dev) | ||
| 2447 | pm8001_dev->running_req--; | ||
| 2448 | break; | ||
| 2449 | case IO_ABORTED: | ||
| 2450 | PM8001_IO_DBG(pm8001_ha, | ||
| 2451 | pm8001_printk("IO_ABORTED IOMB\n")); | ||
| 2452 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2453 | ts->stat = SAS_ABORTED_TASK; | ||
| 2454 | if (pm8001_dev) | ||
| 2455 | pm8001_dev->running_req--; | ||
| 2456 | break; | ||
| 2457 | case IO_OVERFLOW: | ||
| 2458 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); | ||
| 2459 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2460 | ts->stat = SAS_DATA_OVERRUN; | ||
| 2461 | ts->residual = 0; | ||
| 2462 | if (pm8001_dev) | ||
| 2463 | pm8001_dev->running_req--; | ||
| 2464 | break; | ||
| 2465 | case IO_NO_DEVICE: | ||
| 2466 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); | ||
| 2467 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2468 | ts->stat = SAS_PHY_DOWN; | ||
| 2469 | break; | ||
| 2470 | case IO_ERROR_HW_TIMEOUT: | ||
| 2471 | PM8001_IO_DBG(pm8001_ha, | ||
| 2472 | pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); | ||
| 2473 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2474 | ts->stat = SAM_BUSY; | ||
| 2475 | break; | ||
| 2476 | case IO_XFER_ERROR_BREAK: | ||
| 2477 | PM8001_IO_DBG(pm8001_ha, | ||
| 2478 | pm8001_printk("IO_XFER_ERROR_BREAK\n")); | ||
| 2479 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2480 | ts->stat = SAM_BUSY; | ||
| 2481 | break; | ||
| 2482 | case IO_XFER_ERROR_PHY_NOT_READY: | ||
| 2483 | PM8001_IO_DBG(pm8001_ha, | ||
| 2484 | pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); | ||
| 2485 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2486 | ts->stat = SAM_BUSY; | ||
| 2487 | break; | ||
| 2488 | case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: | ||
| 2489 | PM8001_IO_DBG(pm8001_ha, | ||
| 2490 | pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); | ||
| 2491 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2492 | ts->stat = SAS_OPEN_REJECT; | ||
| 2493 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 2494 | break; | ||
| 2495 | case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: | ||
| 2496 | PM8001_IO_DBG(pm8001_ha, | ||
| 2497 | pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); | ||
| 2498 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2499 | ts->stat = SAS_OPEN_REJECT; | ||
| 2500 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 2501 | break; | ||
| 2502 | case IO_OPEN_CNX_ERROR_BREAK: | ||
| 2503 | PM8001_IO_DBG(pm8001_ha, | ||
| 2504 | pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); | ||
| 2505 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2506 | ts->stat = SAS_OPEN_REJECT; | ||
| 2507 | ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; | ||
| 2508 | break; | ||
| 2509 | case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: | ||
| 2510 | PM8001_IO_DBG(pm8001_ha, | ||
| 2511 | pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); | ||
| 2512 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2513 | ts->stat = SAS_OPEN_REJECT; | ||
| 2514 | ts->open_rej_reason = SAS_OREJ_UNKNOWN; | ||
| 2515 | pm8001_handle_event(pm8001_ha, | ||
| 2516 | pm8001_dev, | ||
| 2517 | IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); | ||
| 2518 | break; | ||
| 2519 | case IO_OPEN_CNX_ERROR_BAD_DESTINATION: | ||
| 2520 | PM8001_IO_DBG(pm8001_ha, | ||
| 2521 | pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); | ||
| 2522 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2523 | ts->stat = SAS_OPEN_REJECT; | ||
| 2524 | ts->open_rej_reason = SAS_OREJ_BAD_DEST; | ||
| 2525 | break; | ||
| 2526 | case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: | ||
| 2527 | PM8001_IO_DBG(pm8001_ha, | ||
| 2528 | pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" | ||
| 2529 | "NOT_SUPPORTED\n")); | ||
| 2530 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2531 | ts->stat = SAS_OPEN_REJECT; | ||
| 2532 | ts->open_rej_reason = SAS_OREJ_CONN_RATE; | ||
| 2533 | break; | ||
| 2534 | case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: | ||
| 2535 | PM8001_IO_DBG(pm8001_ha, | ||
| 2536 | pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); | ||
| 2537 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2538 | ts->stat = SAS_OPEN_REJECT; | ||
| 2539 | ts->open_rej_reason = SAS_OREJ_WRONG_DEST; | ||
| 2540 | break; | ||
| 2541 | case IO_XFER_ERROR_RX_FRAME: | ||
| 2542 | PM8001_IO_DBG(pm8001_ha, | ||
| 2543 | pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); | ||
| 2544 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2545 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2546 | break; | ||
| 2547 | case IO_XFER_OPEN_RETRY_TIMEOUT: | ||
| 2548 | PM8001_IO_DBG(pm8001_ha, | ||
| 2549 | pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); | ||
| 2550 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2551 | ts->stat = SAS_OPEN_REJECT; | ||
| 2552 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2553 | break; | ||
| 2554 | case IO_ERROR_INTERNAL_SMP_RESOURCE: | ||
| 2555 | PM8001_IO_DBG(pm8001_ha, | ||
| 2556 | pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); | ||
| 2557 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2558 | ts->stat = SAS_QUEUE_FULL; | ||
| 2559 | break; | ||
| 2560 | case IO_PORT_IN_RESET: | ||
| 2561 | PM8001_IO_DBG(pm8001_ha, | ||
| 2562 | pm8001_printk("IO_PORT_IN_RESET\n")); | ||
| 2563 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2564 | ts->stat = SAS_OPEN_REJECT; | ||
| 2565 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2566 | break; | ||
| 2567 | case IO_DS_NON_OPERATIONAL: | ||
| 2568 | PM8001_IO_DBG(pm8001_ha, | ||
| 2569 | pm8001_printk("IO_DS_NON_OPERATIONAL\n")); | ||
| 2570 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2571 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2572 | break; | ||
| 2573 | case IO_DS_IN_RECOVERY: | ||
| 2574 | PM8001_IO_DBG(pm8001_ha, | ||
| 2575 | pm8001_printk("IO_DS_IN_RECOVERY\n")); | ||
| 2576 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2577 | ts->stat = SAS_OPEN_REJECT; | ||
| 2578 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2579 | break; | ||
| 2580 | case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: | ||
| 2581 | PM8001_IO_DBG(pm8001_ha, | ||
| 2582 | pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); | ||
| 2583 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2584 | ts->stat = SAS_OPEN_REJECT; | ||
| 2585 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | ||
| 2586 | break; | ||
| 2587 | default: | ||
| 2588 | PM8001_IO_DBG(pm8001_ha, | ||
| 2589 | pm8001_printk("Unknown status 0x%x\n", status)); | ||
| 2590 | ts->resp = SAS_TASK_COMPLETE; | ||
| 2591 | ts->stat = SAS_DEV_NO_RESPONSE; | ||
| 2592 | /* not allowed case. Therefore, return failed status */ | ||
| 2593 | break; | ||
| 2594 | } | ||
| 2595 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 2596 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 2597 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 2598 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 2599 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | ||
| 2600 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2601 | PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" | ||
| 2602 | " io_status 0x%x resp 0x%x " | ||
| 2603 | "stat 0x%x but aborted by upper layer!\n", | ||
| 2604 | t, status, ts->resp, ts->stat)); | ||
| 2605 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2606 | } else { | ||
| 2607 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 2608 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | ||
| 2609 | mb();/* in order to force CPU ordering */ | ||
| 2610 | t->task_done(t); | ||
| 2611 | } | ||
| 2612 | } | ||
| 2613 | |||
| 2614 | static void | ||
| 2615 | mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2616 | { | ||
| 2617 | struct set_dev_state_resp *pPayload = | ||
| 2618 | (struct set_dev_state_resp *)(piomb + 4); | ||
| 2619 | u32 tag = le32_to_cpu(pPayload->tag); | ||
| 2620 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; | ||
| 2621 | struct pm8001_device *pm8001_dev = ccb->device; | ||
| 2622 | u32 status = le32_to_cpu(pPayload->status); | ||
| 2623 | u32 device_id = le32_to_cpu(pPayload->device_id); | ||
| 2624 | u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; | ||
| 2625 | u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; | ||
| 2626 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " | ||
| 2627 | "from 0x%x to 0x%x status = 0x%x!\n", | ||
| 2628 | device_id, pds, nds, status)); | ||
| 2629 | complete(pm8001_dev->setds_completion); | ||
| 2630 | ccb->task = NULL; | ||
| 2631 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 2632 | pm8001_ccb_free(pm8001_ha, tag); | ||
| 2633 | } | ||
| 2634 | |||
| 2635 | static void | ||
| 2636 | mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2637 | { | ||
| 2638 | struct get_nvm_data_resp *pPayload = | ||
| 2639 | (struct get_nvm_data_resp *)(piomb + 4); | ||
| 2640 | u32 tag = le32_to_cpu(pPayload->tag); | ||
| 2641 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; | ||
| 2642 | u32 dlen_status = le32_to_cpu(pPayload->dlen_status); | ||
| 2643 | complete(pm8001_ha->nvmd_completion); | ||
| 2644 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n")); | ||
| 2645 | if ((dlen_status & NVMD_STAT) != 0) { | ||
| 2646 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2647 | pm8001_printk("Set nvm data error!\n")); | ||
| 2648 | return; | ||
| 2649 | } | ||
| 2650 | ccb->task = NULL; | ||
| 2651 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 2652 | pm8001_ccb_free(pm8001_ha, tag); | ||
| 2653 | } | ||
| 2654 | |||
| 2655 | static void | ||
| 2656 | mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2657 | { | ||
| 2658 | struct fw_control_ex *fw_control_context; | ||
| 2659 | struct get_nvm_data_resp *pPayload = | ||
| 2660 | (struct get_nvm_data_resp *)(piomb + 4); | ||
| 2661 | u32 tag = le32_to_cpu(pPayload->tag); | ||
| 2662 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; | ||
| 2663 | u32 dlen_status = le32_to_cpu(pPayload->dlen_status); | ||
| 2664 | u32 ir_tds_bn_dps_das_nvm = | ||
| 2665 | le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); | ||
| 2666 | void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; | ||
| 2667 | fw_control_context = ccb->fw_control_context; | ||
| 2668 | |||
| 2669 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); | ||
| 2670 | if ((dlen_status & NVMD_STAT) != 0) { | ||
| 2671 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 2672 | pm8001_printk("Get nvm data error!\n")); | ||
| 2673 | complete(pm8001_ha->nvmd_completion); | ||
| 2674 | return; | ||
| 2675 | } | ||
| 2676 | |||
| 2677 | if (ir_tds_bn_dps_das_nvm & IPMode) { | ||
| 2678 | /* indirect mode - IR bit set */ | ||
| 2679 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2680 | pm8001_printk("Get NVMD success, IR=1\n")); | ||
| 2681 | if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { | ||
| 2682 | if (ir_tds_bn_dps_das_nvm == 0x80a80200) { | ||
| 2683 | memcpy(pm8001_ha->sas_addr, | ||
| 2684 | ((u8 *)virt_addr + 4), | ||
| 2685 | SAS_ADDR_SIZE); | ||
| 2686 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2687 | pm8001_printk("Get SAS address" | ||
| 2688 | " from VPD successfully!\n")); | ||
| 2689 | } | ||
| 2690 | } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) | ||
| 2691 | || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || | ||
| 2692 | ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { | ||
| 2693 | ; | ||
| 2694 | } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) | ||
| 2695 | || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { | ||
| 2696 | ; | ||
| 2697 | } else { | ||
| 2698 | /* Should not be happened*/ | ||
| 2699 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2700 | pm8001_printk("(IR=1)Wrong Device type 0x%x\n", | ||
| 2701 | ir_tds_bn_dps_das_nvm)); | ||
| 2702 | } | ||
| 2703 | } else /* direct mode */{ | ||
| 2704 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2705 | pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", | ||
| 2706 | (dlen_status & NVMD_LEN) >> 24)); | ||
| 2707 | } | ||
| 2708 | memcpy(fw_control_context->usrAddr, | ||
| 2709 | pm8001_ha->memoryMap.region[NVMD].virt_ptr, | ||
| 2710 | fw_control_context->len); | ||
| 2711 | complete(pm8001_ha->nvmd_completion); | ||
| 2712 | ccb->task = NULL; | ||
| 2713 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 2714 | pm8001_ccb_free(pm8001_ha, tag); | ||
| 2715 | } | ||
| 2716 | |||
| 2717 | static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2718 | { | ||
| 2719 | struct local_phy_ctl_resp *pPayload = | ||
| 2720 | (struct local_phy_ctl_resp *)(piomb + 4); | ||
| 2721 | u32 status = le32_to_cpu(pPayload->status); | ||
| 2722 | u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; | ||
| 2723 | u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; | ||
| 2724 | if (status != 0) { | ||
| 2725 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2726 | pm8001_printk("%x phy execute %x phy op failed! \n", | ||
| 2727 | phy_id, phy_op)); | ||
| 2728 | } else | ||
| 2729 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2730 | pm8001_printk("%x phy execute %x phy op success! \n", | ||
| 2731 | phy_id, phy_op)); | ||
| 2732 | return 0; | ||
| 2733 | } | ||
| 2734 | |||
| 2735 | /** | ||
| 2736 | * pm8001_bytes_dmaed - one of the interface function communication with libsas | ||
| 2737 | * @pm8001_ha: our hba card information | ||
| 2738 | * @i: which phy that received the event. | ||
| 2739 | * | ||
| 2740 | * when HBA driver received the identify done event or initiate FIS received | ||
| 2741 | * event(for SATA), it will invoke this function to notify the sas layer that | ||
| 2742 | * the sas toplogy has formed, please discover the the whole sas domain, | ||
| 2743 | * while receive a broadcast(change) primitive just tell the sas | ||
| 2744 | * layer to discover the changed domain rather than the whole domain. | ||
| 2745 | */ | ||
| 2746 | static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) | ||
| 2747 | { | ||
| 2748 | struct pm8001_phy *phy = &pm8001_ha->phy[i]; | ||
| 2749 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
| 2750 | struct sas_ha_struct *sas_ha; | ||
| 2751 | if (!phy->phy_attached) | ||
| 2752 | return; | ||
| 2753 | |||
| 2754 | sas_ha = pm8001_ha->sas; | ||
| 2755 | if (sas_phy->phy) { | ||
| 2756 | struct sas_phy *sphy = sas_phy->phy; | ||
| 2757 | sphy->negotiated_linkrate = sas_phy->linkrate; | ||
| 2758 | sphy->minimum_linkrate = phy->minimum_linkrate; | ||
| 2759 | sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; | ||
| 2760 | sphy->maximum_linkrate = phy->maximum_linkrate; | ||
| 2761 | sphy->maximum_linkrate_hw = phy->maximum_linkrate; | ||
| 2762 | } | ||
| 2763 | |||
| 2764 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
| 2765 | struct sas_identify_frame *id; | ||
| 2766 | id = (struct sas_identify_frame *)phy->frame_rcvd; | ||
| 2767 | id->dev_type = phy->identify.device_type; | ||
| 2768 | id->initiator_bits = SAS_PROTOCOL_ALL; | ||
| 2769 | id->target_bits = phy->identify.target_port_protocols; | ||
| 2770 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
| 2771 | /*Nothing*/ | ||
| 2772 | } | ||
| 2773 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i)); | ||
| 2774 | |||
| 2775 | sas_phy->frame_rcvd_size = phy->frame_rcvd_size; | ||
| 2776 | pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); | ||
| 2777 | } | ||
| 2778 | |||
| 2779 | /* Get the link rate speed */ | ||
| 2780 | static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) | ||
| 2781 | { | ||
| 2782 | struct sas_phy *sas_phy = phy->sas_phy.phy; | ||
| 2783 | |||
| 2784 | switch (link_rate) { | ||
| 2785 | case PHY_SPEED_60: | ||
| 2786 | phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; | ||
| 2787 | phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; | ||
| 2788 | break; | ||
| 2789 | case PHY_SPEED_30: | ||
| 2790 | phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; | ||
| 2791 | phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; | ||
| 2792 | break; | ||
| 2793 | case PHY_SPEED_15: | ||
| 2794 | phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; | ||
| 2795 | phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; | ||
| 2796 | break; | ||
| 2797 | } | ||
| 2798 | sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; | ||
| 2799 | sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS; | ||
| 2800 | sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; | ||
| 2801 | sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; | ||
| 2802 | sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; | ||
| 2803 | } | ||
| 2804 | |||
| 2805 | /** | ||
| 2806 | * asd_get_attached_sas_addr -- extract/generate attached SAS address | ||
| 2807 | * @phy: pointer to asd_phy | ||
| 2808 | * @sas_addr: pointer to buffer where the SAS address is to be written | ||
| 2809 | * | ||
| 2810 | * This function extracts the SAS address from an IDENTIFY frame | ||
| 2811 | * received. If OOB is SATA, then a SAS address is generated from the | ||
| 2812 | * HA tables. | ||
| 2813 | * | ||
| 2814 | * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame | ||
| 2815 | * buffer. | ||
| 2816 | */ | ||
| 2817 | static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, | ||
| 2818 | u8 *sas_addr) | ||
| 2819 | { | ||
| 2820 | if (phy->sas_phy.frame_rcvd[0] == 0x34 | ||
| 2821 | && phy->sas_phy.oob_mode == SATA_OOB_MODE) { | ||
| 2822 | struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; | ||
| 2823 | /* FIS device-to-host */ | ||
| 2824 | u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); | ||
| 2825 | addr += phy->sas_phy.id; | ||
| 2826 | *(__be64 *)sas_addr = cpu_to_be64(addr); | ||
| 2827 | } else { | ||
| 2828 | struct sas_identify_frame *idframe = | ||
| 2829 | (void *) phy->sas_phy.frame_rcvd; | ||
| 2830 | memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); | ||
| 2831 | } | ||
| 2832 | } | ||
| 2833 | |||
| 2834 | /** | ||
| 2835 | * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. | ||
| 2836 | * @pm8001_ha: our hba card information | ||
| 2837 | * @Qnum: the outbound queue message number. | ||
| 2838 | * @SEA: source of event to ack | ||
| 2839 | * @port_id: port id. | ||
| 2840 | * @phyId: phy id. | ||
| 2841 | * @param0: parameter 0. | ||
| 2842 | * @param1: parameter 1. | ||
| 2843 | */ | ||
| 2844 | static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, | ||
| 2845 | u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) | ||
| 2846 | { | ||
| 2847 | struct hw_event_ack_req payload; | ||
| 2848 | u32 opc = OPC_INB_SAS_HW_EVENT_ACK; | ||
| 2849 | |||
| 2850 | struct inbound_queue_table *circularQ; | ||
| 2851 | |||
| 2852 | memset((u8 *)&payload, 0, sizeof(payload)); | ||
| 2853 | circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; | ||
| 2854 | payload.tag = 1; | ||
| 2855 | payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | | ||
| 2856 | ((phyId & 0x0F) << 4) | (port_id & 0x0F)); | ||
| 2857 | payload.param0 = cpu_to_le32(param0); | ||
| 2858 | payload.param1 = cpu_to_le32(param1); | ||
| 2859 | mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 2860 | } | ||
| 2861 | |||
| 2862 | static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, | ||
| 2863 | u32 phyId, u32 phy_op); | ||
| 2864 | |||
| 2865 | /** | ||
| 2866 | * hw_event_sas_phy_up -FW tells me a SAS phy up event. | ||
| 2867 | * @pm8001_ha: our hba card information | ||
| 2868 | * @piomb: IO message buffer | ||
| 2869 | */ | ||
| 2870 | static void | ||
| 2871 | hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2872 | { | ||
| 2873 | struct hw_event_resp *pPayload = | ||
| 2874 | (struct hw_event_resp *)(piomb + 4); | ||
| 2875 | u32 lr_evt_status_phyid_portid = | ||
| 2876 | le32_to_cpu(pPayload->lr_evt_status_phyid_portid); | ||
| 2877 | u8 link_rate = | ||
| 2878 | (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); | ||
| 2879 | u8 phy_id = | ||
| 2880 | (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); | ||
| 2881 | struct sas_ha_struct *sas_ha = pm8001_ha->sas; | ||
| 2882 | struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; | ||
| 2883 | unsigned long flags; | ||
| 2884 | u8 deviceType = pPayload->sas_identify.dev_type; | ||
| 2885 | |||
| 2886 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2887 | pm8001_printk("HW_EVENT_SAS_PHY_UP \n")); | ||
| 2888 | |||
| 2889 | switch (deviceType) { | ||
| 2890 | case SAS_PHY_UNUSED: | ||
| 2891 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2892 | pm8001_printk("device type no device.\n")); | ||
| 2893 | break; | ||
| 2894 | case SAS_END_DEVICE: | ||
| 2895 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); | ||
| 2896 | pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, | ||
| 2897 | PHY_NOTIFY_ENABLE_SPINUP); | ||
| 2898 | get_lrate_mode(phy, link_rate); | ||
| 2899 | break; | ||
| 2900 | case SAS_EDGE_EXPANDER_DEVICE: | ||
| 2901 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2902 | pm8001_printk("expander device.\n")); | ||
| 2903 | get_lrate_mode(phy, link_rate); | ||
| 2904 | break; | ||
| 2905 | case SAS_FANOUT_EXPANDER_DEVICE: | ||
| 2906 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2907 | pm8001_printk("fanout expander device.\n")); | ||
| 2908 | get_lrate_mode(phy, link_rate); | ||
| 2909 | break; | ||
| 2910 | default: | ||
| 2911 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2912 | pm8001_printk("unkown device type(%x)\n", deviceType)); | ||
| 2913 | break; | ||
| 2914 | } | ||
| 2915 | phy->phy_type |= PORT_TYPE_SAS; | ||
| 2916 | phy->identify.device_type = deviceType; | ||
| 2917 | phy->phy_attached = 1; | ||
| 2918 | if (phy->identify.device_type == SAS_END_DEV) | ||
| 2919 | phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; | ||
| 2920 | else if (phy->identify.device_type != NO_DEVICE) | ||
| 2921 | phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; | ||
| 2922 | phy->sas_phy.oob_mode = SAS_OOB_MODE; | ||
| 2923 | sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); | ||
| 2924 | spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); | ||
| 2925 | memcpy(phy->frame_rcvd, &pPayload->sas_identify, | ||
| 2926 | sizeof(struct sas_identify_frame)-4); | ||
| 2927 | phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; | ||
| 2928 | pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); | ||
| 2929 | spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); | ||
| 2930 | if (pm8001_ha->flags == PM8001F_RUN_TIME) | ||
| 2931 | mdelay(200);/*delay a moment to wait disk to spinup*/ | ||
| 2932 | pm8001_bytes_dmaed(pm8001_ha, phy_id); | ||
| 2933 | } | ||
| 2934 | |||
| 2935 | /** | ||
| 2936 | * hw_event_sata_phy_up -FW tells me a SATA phy up event. | ||
| 2937 | * @pm8001_ha: our hba card information | ||
| 2938 | * @piomb: IO message buffer | ||
| 2939 | */ | ||
| 2940 | static void | ||
| 2941 | hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2942 | { | ||
| 2943 | struct hw_event_resp *pPayload = | ||
| 2944 | (struct hw_event_resp *)(piomb + 4); | ||
| 2945 | u32 lr_evt_status_phyid_portid = | ||
| 2946 | le32_to_cpu(pPayload->lr_evt_status_phyid_portid); | ||
| 2947 | u8 link_rate = | ||
| 2948 | (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); | ||
| 2949 | u8 phy_id = | ||
| 2950 | (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); | ||
| 2951 | struct sas_ha_struct *sas_ha = pm8001_ha->sas; | ||
| 2952 | struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; | ||
| 2953 | unsigned long flags; | ||
| 2954 | get_lrate_mode(phy, link_rate); | ||
| 2955 | phy->phy_type |= PORT_TYPE_SATA; | ||
| 2956 | phy->phy_attached = 1; | ||
| 2957 | phy->sas_phy.oob_mode = SATA_OOB_MODE; | ||
| 2958 | sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); | ||
| 2959 | spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); | ||
| 2960 | memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), | ||
| 2961 | sizeof(struct dev_to_host_fis)); | ||
| 2962 | phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); | ||
| 2963 | phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; | ||
| 2964 | phy->identify.device_type = SATA_DEV; | ||
| 2965 | pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); | ||
| 2966 | spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); | ||
| 2967 | pm8001_bytes_dmaed(pm8001_ha, phy_id); | ||
| 2968 | } | ||
| 2969 | |||
| 2970 | /** | ||
| 2971 | * hw_event_phy_down -we should notify the libsas the phy is down. | ||
| 2972 | * @pm8001_ha: our hba card information | ||
| 2973 | * @piomb: IO message buffer | ||
| 2974 | */ | ||
| 2975 | static void | ||
| 2976 | hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 2977 | { | ||
| 2978 | struct hw_event_resp *pPayload = | ||
| 2979 | (struct hw_event_resp *)(piomb + 4); | ||
| 2980 | u32 lr_evt_status_phyid_portid = | ||
| 2981 | le32_to_cpu(pPayload->lr_evt_status_phyid_portid); | ||
| 2982 | u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); | ||
| 2983 | u8 phy_id = | ||
| 2984 | (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); | ||
| 2985 | u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); | ||
| 2986 | u8 portstate = (u8)(npip_portstate & 0x0000000F); | ||
| 2987 | |||
| 2988 | switch (portstate) { | ||
| 2989 | case PORT_VALID: | ||
| 2990 | break; | ||
| 2991 | case PORT_INVALID: | ||
| 2992 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2993 | pm8001_printk(" PortInvalid portID %d \n", port_id)); | ||
| 2994 | PM8001_MSG_DBG(pm8001_ha, | ||
| 2995 | pm8001_printk(" Last phy Down and port invalid\n")); | ||
| 2996 | pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, | ||
| 2997 | port_id, phy_id, 0, 0); | ||
| 2998 | break; | ||
| 2999 | case PORT_IN_RESET: | ||
| 3000 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3001 | pm8001_printk(" PortInReset portID %d \n", port_id)); | ||
| 3002 | break; | ||
| 3003 | case PORT_NOT_ESTABLISHED: | ||
| 3004 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3005 | pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); | ||
| 3006 | break; | ||
| 3007 | case PORT_LOSTCOMM: | ||
| 3008 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3009 | pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); | ||
| 3010 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3011 | pm8001_printk(" Last phy Down and port invalid\n")); | ||
| 3012 | pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, | ||
| 3013 | port_id, phy_id, 0, 0); | ||
| 3014 | break; | ||
| 3015 | default: | ||
| 3016 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3017 | pm8001_printk(" phy Down and(default) = %x\n", | ||
| 3018 | portstate)); | ||
| 3019 | break; | ||
| 3020 | |||
| 3021 | } | ||
| 3022 | } | ||
| 3023 | |||
| 3024 | /** | ||
| 3025 | * mpi_reg_resp -process register device ID response. | ||
| 3026 | * @pm8001_ha: our hba card information | ||
| 3027 | * @piomb: IO message buffer | ||
| 3028 | * | ||
| 3029 | * when sas layer find a device it will notify LLDD, then the driver register | ||
| 3030 | * the domain device to FW, this event is the return device ID which the FW | ||
| 3031 | * has assigned, from now,inter-communication with FW is no longer using the | ||
| 3032 | * SAS address, use device ID which FW assigned. | ||
| 3033 | */ | ||
| 3034 | static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 3035 | { | ||
| 3036 | u32 status; | ||
| 3037 | u32 device_id; | ||
| 3038 | u32 htag; | ||
| 3039 | struct pm8001_ccb_info *ccb; | ||
| 3040 | struct pm8001_device *pm8001_dev; | ||
| 3041 | struct dev_reg_resp *registerRespPayload = | ||
| 3042 | (struct dev_reg_resp *)(piomb + 4); | ||
| 3043 | |||
| 3044 | htag = le32_to_cpu(registerRespPayload->tag); | ||
| 3045 | ccb = &pm8001_ha->ccb_info[registerRespPayload->tag]; | ||
| 3046 | pm8001_dev = ccb->device; | ||
| 3047 | status = le32_to_cpu(registerRespPayload->status); | ||
| 3048 | device_id = le32_to_cpu(registerRespPayload->device_id); | ||
| 3049 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3050 | pm8001_printk(" register device is status = %d\n", status)); | ||
| 3051 | switch (status) { | ||
| 3052 | case DEVREG_SUCCESS: | ||
| 3053 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n")); | ||
| 3054 | pm8001_dev->device_id = device_id; | ||
| 3055 | break; | ||
| 3056 | case DEVREG_FAILURE_OUT_OF_RESOURCE: | ||
| 3057 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3058 | pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n")); | ||
| 3059 | break; | ||
| 3060 | case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: | ||
| 3061 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3062 | pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n")); | ||
| 3063 | break; | ||
| 3064 | case DEVREG_FAILURE_INVALID_PHY_ID: | ||
| 3065 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3066 | pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n")); | ||
| 3067 | break; | ||
| 3068 | case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: | ||
| 3069 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3070 | pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n")); | ||
| 3071 | break; | ||
| 3072 | case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: | ||
| 3073 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3074 | pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n")); | ||
| 3075 | break; | ||
| 3076 | case DEVREG_FAILURE_PORT_NOT_VALID_STATE: | ||
| 3077 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3078 | pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n")); | ||
| 3079 | break; | ||
| 3080 | case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: | ||
| 3081 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3082 | pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n")); | ||
| 3083 | break; | ||
| 3084 | default: | ||
| 3085 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3086 | pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n")); | ||
| 3087 | break; | ||
| 3088 | } | ||
| 3089 | complete(pm8001_dev->dcompletion); | ||
| 3090 | ccb->task = NULL; | ||
| 3091 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 3092 | pm8001_ccb_free(pm8001_ha, htag); | ||
| 3093 | return 0; | ||
| 3094 | } | ||
| 3095 | |||
| 3096 | static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 3097 | { | ||
| 3098 | u32 status; | ||
| 3099 | u32 device_id; | ||
| 3100 | struct dev_reg_resp *registerRespPayload = | ||
| 3101 | (struct dev_reg_resp *)(piomb + 4); | ||
| 3102 | |||
| 3103 | status = le32_to_cpu(registerRespPayload->status); | ||
| 3104 | device_id = le32_to_cpu(registerRespPayload->device_id); | ||
| 3105 | if (status != 0) | ||
| 3106 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3107 | pm8001_printk(" deregister device failed ,status = %x" | ||
| 3108 | ", device_id = %x\n", status, device_id)); | ||
| 3109 | return 0; | ||
| 3110 | } | ||
| 3111 | |||
| 3112 | static int | ||
| 3113 | mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 3114 | { | ||
| 3115 | u32 status; | ||
| 3116 | struct fw_control_ex fw_control_context; | ||
| 3117 | struct fw_flash_Update_resp *ppayload = | ||
| 3118 | (struct fw_flash_Update_resp *)(piomb + 4); | ||
| 3119 | u32 tag = le32_to_cpu(ppayload->tag); | ||
| 3120 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; | ||
| 3121 | status = le32_to_cpu(ppayload->status); | ||
| 3122 | memcpy(&fw_control_context, | ||
| 3123 | ccb->fw_control_context, | ||
| 3124 | sizeof(fw_control_context)); | ||
| 3125 | switch (status) { | ||
| 3126 | case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: | ||
| 3127 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3128 | pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n")); | ||
| 3129 | break; | ||
| 3130 | case FLASH_UPDATE_IN_PROGRESS: | ||
| 3131 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3132 | pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n")); | ||
| 3133 | break; | ||
| 3134 | case FLASH_UPDATE_HDR_ERR: | ||
| 3135 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3136 | pm8001_printk(": FLASH_UPDATE_HDR_ERR\n")); | ||
| 3137 | break; | ||
| 3138 | case FLASH_UPDATE_OFFSET_ERR: | ||
| 3139 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3140 | pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n")); | ||
| 3141 | break; | ||
| 3142 | case FLASH_UPDATE_CRC_ERR: | ||
| 3143 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3144 | pm8001_printk(": FLASH_UPDATE_CRC_ERR\n")); | ||
| 3145 | break; | ||
| 3146 | case FLASH_UPDATE_LENGTH_ERR: | ||
| 3147 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3148 | pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n")); | ||
| 3149 | break; | ||
| 3150 | case FLASH_UPDATE_HW_ERR: | ||
| 3151 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3152 | pm8001_printk(": FLASH_UPDATE_HW_ERR\n")); | ||
| 3153 | break; | ||
| 3154 | case FLASH_UPDATE_DNLD_NOT_SUPPORTED: | ||
| 3155 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3156 | pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n")); | ||
| 3157 | break; | ||
| 3158 | case FLASH_UPDATE_DISABLED: | ||
| 3159 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3160 | pm8001_printk(": FLASH_UPDATE_DISABLED\n")); | ||
| 3161 | break; | ||
| 3162 | default: | ||
| 3163 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3164 | pm8001_printk("No matched status = %d\n", status)); | ||
| 3165 | break; | ||
| 3166 | } | ||
| 3167 | ccb->fw_control_context->fw_control->retcode = status; | ||
| 3168 | pci_free_consistent(pm8001_ha->pdev, | ||
| 3169 | fw_control_context.len, | ||
| 3170 | fw_control_context.virtAddr, | ||
| 3171 | fw_control_context.phys_addr); | ||
| 3172 | complete(pm8001_ha->nvmd_completion); | ||
| 3173 | ccb->task = NULL; | ||
| 3174 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 3175 | pm8001_ccb_free(pm8001_ha, tag); | ||
| 3176 | return 0; | ||
| 3177 | } | ||
| 3178 | |||
| 3179 | static int | ||
| 3180 | mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) | ||
| 3181 | { | ||
| 3182 | u32 status; | ||
| 3183 | int i; | ||
| 3184 | struct general_event_resp *pPayload = | ||
| 3185 | (struct general_event_resp *)(piomb + 4); | ||
| 3186 | status = le32_to_cpu(pPayload->status); | ||
| 3187 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3188 | pm8001_printk(" status = 0x%x\n", status)); | ||
| 3189 | for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) | ||
| 3190 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3191 | pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i, | ||
| 3192 | pPayload->inb_IOMB_payload[i])); | ||
| 3193 | return 0; | ||
| 3194 | } | ||
| 3195 | |||
| 3196 | static int | ||
| 3197 | mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 3198 | { | ||
| 3199 | struct sas_task *t; | ||
| 3200 | struct pm8001_ccb_info *ccb; | ||
| 3201 | unsigned long flags; | ||
| 3202 | u32 status ; | ||
| 3203 | u32 tag, scp; | ||
| 3204 | struct task_status_struct *ts; | ||
| 3205 | |||
| 3206 | struct task_abort_resp *pPayload = | ||
| 3207 | (struct task_abort_resp *)(piomb + 4); | ||
| 3208 | ccb = &pm8001_ha->ccb_info[pPayload->tag]; | ||
| 3209 | t = ccb->task; | ||
| 3210 | |||
| 3211 | |||
| 3212 | status = le32_to_cpu(pPayload->status); | ||
| 3213 | tag = le32_to_cpu(pPayload->tag); | ||
| 3214 | scp = le32_to_cpu(pPayload->scp); | ||
| 3215 | PM8001_IO_DBG(pm8001_ha, | ||
| 3216 | pm8001_printk(" status = 0x%x\n", status)); | ||
| 3217 | if (t == NULL) | ||
| 3218 | return -1; | ||
| 3219 | ts = &t->task_status; | ||
| 3220 | if (status != 0) | ||
| 3221 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 3222 | pm8001_printk("task abort failed status 0x%x ," | ||
| 3223 | "tag = 0x%x, scp= 0x%x\n", status, tag, scp)); | ||
| 3224 | switch (status) { | ||
| 3225 | case IO_SUCCESS: | ||
| 3226 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); | ||
| 3227 | ts->resp = SAS_TASK_COMPLETE; | ||
| 3228 | ts->stat = SAM_GOOD; | ||
| 3229 | break; | ||
| 3230 | case IO_NOT_VALID: | ||
| 3231 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); | ||
| 3232 | ts->resp = TMF_RESP_FUNC_FAILED; | ||
| 3233 | break; | ||
| 3234 | } | ||
| 3235 | spin_lock_irqsave(&t->task_state_lock, flags); | ||
| 3236 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 3237 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 3238 | t->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 3239 | spin_unlock_irqrestore(&t->task_state_lock, flags); | ||
| 3240 | pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag); | ||
| 3241 | mb(); | ||
| 3242 | t->task_done(t); | ||
| 3243 | return 0; | ||
| 3244 | } | ||
| 3245 | |||
| 3246 | /** | ||
| 3247 | * mpi_hw_event -The hw event has come. | ||
| 3248 | * @pm8001_ha: our hba card information | ||
| 3249 | * @piomb: IO message buffer | ||
| 3250 | */ | ||
| 3251 | static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) | ||
| 3252 | { | ||
| 3253 | unsigned long flags; | ||
| 3254 | struct hw_event_resp *pPayload = | ||
| 3255 | (struct hw_event_resp *)(piomb + 4); | ||
| 3256 | u32 lr_evt_status_phyid_portid = | ||
| 3257 | le32_to_cpu(pPayload->lr_evt_status_phyid_portid); | ||
| 3258 | u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); | ||
| 3259 | u8 phy_id = | ||
| 3260 | (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); | ||
| 3261 | u16 eventType = | ||
| 3262 | (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); | ||
| 3263 | u8 status = | ||
| 3264 | (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); | ||
| 3265 | struct sas_ha_struct *sas_ha = pm8001_ha->sas; | ||
| 3266 | struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; | ||
| 3267 | struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; | ||
| 3268 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3269 | pm8001_printk("outbound queue HW event & event type : ")); | ||
| 3270 | switch (eventType) { | ||
| 3271 | case HW_EVENT_PHY_START_STATUS: | ||
| 3272 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3273 | pm8001_printk("HW_EVENT_PHY_START_STATUS" | ||
| 3274 | " status = %x\n", status)); | ||
| 3275 | if (status == 0) { | ||
| 3276 | phy->phy_state = 1; | ||
| 3277 | if (pm8001_ha->flags == PM8001F_RUN_TIME) | ||
| 3278 | complete(phy->enable_completion); | ||
| 3279 | } | ||
| 3280 | break; | ||
| 3281 | case HW_EVENT_SAS_PHY_UP: | ||
| 3282 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3283 | pm8001_printk("HW_EVENT_PHY_START_STATUS \n")); | ||
| 3284 | hw_event_sas_phy_up(pm8001_ha, piomb); | ||
| 3285 | break; | ||
| 3286 | case HW_EVENT_SATA_PHY_UP: | ||
| 3287 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3288 | pm8001_printk("HW_EVENT_SATA_PHY_UP \n")); | ||
| 3289 | hw_event_sata_phy_up(pm8001_ha, piomb); | ||
| 3290 | break; | ||
| 3291 | case HW_EVENT_PHY_STOP_STATUS: | ||
| 3292 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3293 | pm8001_printk("HW_EVENT_PHY_STOP_STATUS " | ||
| 3294 | "status = %x\n", status)); | ||
| 3295 | if (status == 0) | ||
| 3296 | phy->phy_state = 0; | ||
| 3297 | break; | ||
| 3298 | case HW_EVENT_SATA_SPINUP_HOLD: | ||
| 3299 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3300 | pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n")); | ||
| 3301 | sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); | ||
| 3302 | break; | ||
| 3303 | case HW_EVENT_PHY_DOWN: | ||
| 3304 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3305 | pm8001_printk("HW_EVENT_PHY_DOWN \n")); | ||
| 3306 | sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); | ||
| 3307 | phy->phy_attached = 0; | ||
| 3308 | phy->phy_state = 0; | ||
| 3309 | hw_event_phy_down(pm8001_ha, piomb); | ||
| 3310 | break; | ||
| 3311 | case HW_EVENT_PORT_INVALID: | ||
| 3312 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3313 | pm8001_printk("HW_EVENT_PORT_INVALID\n")); | ||
| 3314 | sas_phy_disconnected(sas_phy); | ||
| 3315 | phy->phy_attached = 0; | ||
| 3316 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3317 | break; | ||
| 3318 | /* the broadcast change primitive received, tell the LIBSAS this event | ||
| 3319 | to revalidate the sas domain*/ | ||
| 3320 | case HW_EVENT_BROADCAST_CHANGE: | ||
| 3321 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3322 | pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); | ||
| 3323 | pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, | ||
| 3324 | port_id, phy_id, 1, 0); | ||
| 3325 | spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); | ||
| 3326 | sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; | ||
| 3327 | spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); | ||
| 3328 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
| 3329 | break; | ||
| 3330 | case HW_EVENT_PHY_ERROR: | ||
| 3331 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3332 | pm8001_printk("HW_EVENT_PHY_ERROR\n")); | ||
| 3333 | sas_phy_disconnected(&phy->sas_phy); | ||
| 3334 | phy->phy_attached = 0; | ||
| 3335 | sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); | ||
| 3336 | break; | ||
| 3337 | case HW_EVENT_BROADCAST_EXP: | ||
| 3338 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3339 | pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); | ||
| 3340 | spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); | ||
| 3341 | sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; | ||
| 3342 | spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); | ||
| 3343 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
| 3344 | break; | ||
| 3345 | case HW_EVENT_LINK_ERR_INVALID_DWORD: | ||
| 3346 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3347 | pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); | ||
| 3348 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3349 | HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); | ||
| 3350 | sas_phy_disconnected(sas_phy); | ||
| 3351 | phy->phy_attached = 0; | ||
| 3352 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3353 | break; | ||
| 3354 | case HW_EVENT_LINK_ERR_DISPARITY_ERROR: | ||
| 3355 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3356 | pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); | ||
| 3357 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3358 | HW_EVENT_LINK_ERR_DISPARITY_ERROR, | ||
| 3359 | port_id, phy_id, 0, 0); | ||
| 3360 | sas_phy_disconnected(sas_phy); | ||
| 3361 | phy->phy_attached = 0; | ||
| 3362 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3363 | break; | ||
| 3364 | case HW_EVENT_LINK_ERR_CODE_VIOLATION: | ||
| 3365 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3366 | pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); | ||
| 3367 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3368 | HW_EVENT_LINK_ERR_CODE_VIOLATION, | ||
| 3369 | port_id, phy_id, 0, 0); | ||
| 3370 | sas_phy_disconnected(sas_phy); | ||
| 3371 | phy->phy_attached = 0; | ||
| 3372 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3373 | break; | ||
| 3374 | case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: | ||
| 3375 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3376 | pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); | ||
| 3377 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3378 | HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, | ||
| 3379 | port_id, phy_id, 0, 0); | ||
| 3380 | sas_phy_disconnected(sas_phy); | ||
| 3381 | phy->phy_attached = 0; | ||
| 3382 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3383 | break; | ||
| 3384 | case HW_EVENT_MALFUNCTION: | ||
| 3385 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3386 | pm8001_printk("HW_EVENT_MALFUNCTION\n")); | ||
| 3387 | break; | ||
| 3388 | case HW_EVENT_BROADCAST_SES: | ||
| 3389 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3390 | pm8001_printk("HW_EVENT_BROADCAST_SES\n")); | ||
| 3391 | spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); | ||
| 3392 | sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; | ||
| 3393 | spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); | ||
| 3394 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
| 3395 | break; | ||
| 3396 | case HW_EVENT_INBOUND_CRC_ERROR: | ||
| 3397 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3398 | pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); | ||
| 3399 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3400 | HW_EVENT_INBOUND_CRC_ERROR, | ||
| 3401 | port_id, phy_id, 0, 0); | ||
| 3402 | break; | ||
| 3403 | case HW_EVENT_HARD_RESET_RECEIVED: | ||
| 3404 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3405 | pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); | ||
| 3406 | sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); | ||
| 3407 | break; | ||
| 3408 | case HW_EVENT_ID_FRAME_TIMEOUT: | ||
| 3409 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3410 | pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); | ||
| 3411 | sas_phy_disconnected(sas_phy); | ||
| 3412 | phy->phy_attached = 0; | ||
| 3413 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3414 | break; | ||
| 3415 | case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: | ||
| 3416 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3417 | pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n")); | ||
| 3418 | pm8001_hw_event_ack_req(pm8001_ha, 0, | ||
| 3419 | HW_EVENT_LINK_ERR_PHY_RESET_FAILED, | ||
| 3420 | port_id, phy_id, 0, 0); | ||
| 3421 | sas_phy_disconnected(sas_phy); | ||
| 3422 | phy->phy_attached = 0; | ||
| 3423 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3424 | break; | ||
| 3425 | case HW_EVENT_PORT_RESET_TIMER_TMO: | ||
| 3426 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3427 | pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n")); | ||
| 3428 | sas_phy_disconnected(sas_phy); | ||
| 3429 | phy->phy_attached = 0; | ||
| 3430 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3431 | break; | ||
| 3432 | case HW_EVENT_PORT_RECOVERY_TIMER_TMO: | ||
| 3433 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3434 | pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n")); | ||
| 3435 | sas_phy_disconnected(sas_phy); | ||
| 3436 | phy->phy_attached = 0; | ||
| 3437 | sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); | ||
| 3438 | break; | ||
| 3439 | case HW_EVENT_PORT_RECOVER: | ||
| 3440 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3441 | pm8001_printk("HW_EVENT_PORT_RECOVER \n")); | ||
| 3442 | break; | ||
| 3443 | case HW_EVENT_PORT_RESET_COMPLETE: | ||
| 3444 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3445 | pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n")); | ||
| 3446 | break; | ||
| 3447 | case EVENT_BROADCAST_ASYNCH_EVENT: | ||
| 3448 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3449 | pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); | ||
| 3450 | break; | ||
| 3451 | default: | ||
| 3452 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3453 | pm8001_printk("Unknown event type = %x\n", eventType)); | ||
| 3454 | break; | ||
| 3455 | } | ||
| 3456 | return 0; | ||
| 3457 | } | ||
| 3458 | |||
| 3459 | /** | ||
| 3460 | * process_one_iomb - process one outbound Queue memory block | ||
| 3461 | * @pm8001_ha: our hba card information | ||
| 3462 | * @piomb: IO message buffer | ||
| 3463 | */ | ||
| 3464 | static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) | ||
| 3465 | { | ||
| 3466 | u32 pHeader = (u32)*(u32 *)piomb; | ||
| 3467 | u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); | ||
| 3468 | |||
| 3469 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:")); | ||
| 3470 | |||
| 3471 | switch (opc) { | ||
| 3472 | case OPC_OUB_ECHO: | ||
| 3473 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n")); | ||
| 3474 | break; | ||
| 3475 | case OPC_OUB_HW_EVENT: | ||
| 3476 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3477 | pm8001_printk("OPC_OUB_HW_EVENT \n")); | ||
| 3478 | mpi_hw_event(pm8001_ha, piomb); | ||
| 3479 | break; | ||
| 3480 | case OPC_OUB_SSP_COMP: | ||
| 3481 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3482 | pm8001_printk("OPC_OUB_SSP_COMP \n")); | ||
| 3483 | mpi_ssp_completion(pm8001_ha, piomb); | ||
| 3484 | break; | ||
| 3485 | case OPC_OUB_SMP_COMP: | ||
| 3486 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3487 | pm8001_printk("OPC_OUB_SMP_COMP \n")); | ||
| 3488 | mpi_smp_completion(pm8001_ha, piomb); | ||
| 3489 | break; | ||
| 3490 | case OPC_OUB_LOCAL_PHY_CNTRL: | ||
| 3491 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3492 | pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); | ||
| 3493 | mpi_local_phy_ctl(pm8001_ha, piomb); | ||
| 3494 | break; | ||
| 3495 | case OPC_OUB_DEV_REGIST: | ||
| 3496 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3497 | pm8001_printk("OPC_OUB_DEV_REGIST \n")); | ||
| 3498 | mpi_reg_resp(pm8001_ha, piomb); | ||
| 3499 | break; | ||
| 3500 | case OPC_OUB_DEREG_DEV: | ||
| 3501 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3502 | pm8001_printk("unresgister the deviece \n")); | ||
| 3503 | mpi_dereg_resp(pm8001_ha, piomb); | ||
| 3504 | break; | ||
| 3505 | case OPC_OUB_GET_DEV_HANDLE: | ||
| 3506 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3507 | pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n")); | ||
| 3508 | break; | ||
| 3509 | case OPC_OUB_SATA_COMP: | ||
| 3510 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3511 | pm8001_printk("OPC_OUB_SATA_COMP \n")); | ||
| 3512 | mpi_sata_completion(pm8001_ha, piomb); | ||
| 3513 | break; | ||
| 3514 | case OPC_OUB_SATA_EVENT: | ||
| 3515 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3516 | pm8001_printk("OPC_OUB_SATA_EVENT \n")); | ||
| 3517 | mpi_sata_event(pm8001_ha, piomb); | ||
| 3518 | break; | ||
| 3519 | case OPC_OUB_SSP_EVENT: | ||
| 3520 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3521 | pm8001_printk("OPC_OUB_SSP_EVENT\n")); | ||
| 3522 | mpi_ssp_event(pm8001_ha, piomb); | ||
| 3523 | break; | ||
| 3524 | case OPC_OUB_DEV_HANDLE_ARRIV: | ||
| 3525 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3526 | pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); | ||
| 3527 | /*This is for target*/ | ||
| 3528 | break; | ||
| 3529 | case OPC_OUB_SSP_RECV_EVENT: | ||
| 3530 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3531 | pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); | ||
| 3532 | /*This is for target*/ | ||
| 3533 | break; | ||
| 3534 | case OPC_OUB_DEV_INFO: | ||
| 3535 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3536 | pm8001_printk("OPC_OUB_DEV_INFO\n")); | ||
| 3537 | break; | ||
| 3538 | case OPC_OUB_FW_FLASH_UPDATE: | ||
| 3539 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3540 | pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); | ||
| 3541 | mpi_fw_flash_update_resp(pm8001_ha, piomb); | ||
| 3542 | break; | ||
| 3543 | case OPC_OUB_GPIO_RESPONSE: | ||
| 3544 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3545 | pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); | ||
| 3546 | break; | ||
| 3547 | case OPC_OUB_GPIO_EVENT: | ||
| 3548 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3549 | pm8001_printk("OPC_OUB_GPIO_EVENT\n")); | ||
| 3550 | break; | ||
| 3551 | case OPC_OUB_GENERAL_EVENT: | ||
| 3552 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3553 | pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); | ||
| 3554 | mpi_general_event(pm8001_ha, piomb); | ||
| 3555 | break; | ||
| 3556 | case OPC_OUB_SSP_ABORT_RSP: | ||
| 3557 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3558 | pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); | ||
| 3559 | mpi_task_abort_resp(pm8001_ha, piomb); | ||
| 3560 | break; | ||
| 3561 | case OPC_OUB_SATA_ABORT_RSP: | ||
| 3562 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3563 | pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); | ||
| 3564 | mpi_task_abort_resp(pm8001_ha, piomb); | ||
| 3565 | break; | ||
| 3566 | case OPC_OUB_SAS_DIAG_MODE_START_END: | ||
| 3567 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3568 | pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); | ||
| 3569 | break; | ||
| 3570 | case OPC_OUB_SAS_DIAG_EXECUTE: | ||
| 3571 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3572 | pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); | ||
| 3573 | break; | ||
| 3574 | case OPC_OUB_GET_TIME_STAMP: | ||
| 3575 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3576 | pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); | ||
| 3577 | break; | ||
| 3578 | case OPC_OUB_SAS_HW_EVENT_ACK: | ||
| 3579 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3580 | pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); | ||
| 3581 | break; | ||
| 3582 | case OPC_OUB_PORT_CONTROL: | ||
| 3583 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3584 | pm8001_printk("OPC_OUB_PORT_CONTROL\n")); | ||
| 3585 | break; | ||
| 3586 | case OPC_OUB_SMP_ABORT_RSP: | ||
| 3587 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3588 | pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); | ||
| 3589 | mpi_task_abort_resp(pm8001_ha, piomb); | ||
| 3590 | break; | ||
| 3591 | case OPC_OUB_GET_NVMD_DATA: | ||
| 3592 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3593 | pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); | ||
| 3594 | mpi_get_nvmd_resp(pm8001_ha, piomb); | ||
| 3595 | break; | ||
| 3596 | case OPC_OUB_SET_NVMD_DATA: | ||
| 3597 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3598 | pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); | ||
| 3599 | mpi_set_nvmd_resp(pm8001_ha, piomb); | ||
| 3600 | break; | ||
| 3601 | case OPC_OUB_DEVICE_HANDLE_REMOVAL: | ||
| 3602 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3603 | pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); | ||
| 3604 | break; | ||
| 3605 | case OPC_OUB_SET_DEVICE_STATE: | ||
| 3606 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3607 | pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); | ||
| 3608 | mpi_set_dev_state_resp(pm8001_ha, piomb); | ||
| 3609 | break; | ||
| 3610 | case OPC_OUB_GET_DEVICE_STATE: | ||
| 3611 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3612 | pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); | ||
| 3613 | break; | ||
| 3614 | case OPC_OUB_SET_DEV_INFO: | ||
| 3615 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3616 | pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); | ||
| 3617 | break; | ||
| 3618 | case OPC_OUB_SAS_RE_INITIALIZE: | ||
| 3619 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3620 | pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n")); | ||
| 3621 | break; | ||
| 3622 | default: | ||
| 3623 | PM8001_MSG_DBG(pm8001_ha, | ||
| 3624 | pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n", | ||
| 3625 | opc)); | ||
| 3626 | break; | ||
| 3627 | } | ||
| 3628 | } | ||
| 3629 | |||
| 3630 | static int process_oq(struct pm8001_hba_info *pm8001_ha) | ||
| 3631 | { | ||
| 3632 | struct outbound_queue_table *circularQ; | ||
| 3633 | void *pMsg1 = NULL; | ||
| 3634 | u8 bc = 0; | ||
| 3635 | u32 ret = MPI_IO_STATUS_FAIL; | ||
| 3636 | |||
| 3637 | circularQ = &pm8001_ha->outbnd_q_tbl[0]; | ||
| 3638 | do { | ||
| 3639 | ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); | ||
| 3640 | if (MPI_IO_STATUS_SUCCESS == ret) { | ||
| 3641 | /* process the outbound message */ | ||
| 3642 | process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); | ||
| 3643 | /* free the message from the outbound circular buffer */ | ||
| 3644 | mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); | ||
| 3645 | } | ||
| 3646 | if (MPI_IO_STATUS_BUSY == ret) { | ||
| 3647 | u32 producer_idx; | ||
| 3648 | /* Update the producer index from SPC */ | ||
| 3649 | producer_idx = pm8001_read_32(circularQ->pi_virt); | ||
| 3650 | circularQ->producer_index = cpu_to_le32(producer_idx); | ||
| 3651 | if (circularQ->producer_index == | ||
| 3652 | circularQ->consumer_idx) | ||
| 3653 | /* OQ is empty */ | ||
| 3654 | break; | ||
| 3655 | } | ||
| 3656 | } while (1); | ||
| 3657 | return ret; | ||
| 3658 | } | ||
| 3659 | |||
| 3660 | /* PCI_DMA_... to our direction translation. */ | ||
| 3661 | static const u8 data_dir_flags[] = { | ||
| 3662 | [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ | ||
| 3663 | [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ | ||
| 3664 | [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ | ||
| 3665 | [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ | ||
| 3666 | }; | ||
| 3667 | static void | ||
| 3668 | pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) | ||
| 3669 | { | ||
| 3670 | int i; | ||
| 3671 | struct scatterlist *sg; | ||
| 3672 | struct pm8001_prd *buf_prd = prd; | ||
| 3673 | |||
| 3674 | for_each_sg(scatter, sg, nr, i) { | ||
| 3675 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
| 3676 | buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); | ||
| 3677 | buf_prd->im_len.e = 0; | ||
| 3678 | buf_prd++; | ||
| 3679 | } | ||
| 3680 | } | ||
| 3681 | |||
| 3682 | static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd) | ||
| 3683 | { | ||
| 3684 | psmp_cmd->tag = cpu_to_le32(hTag); | ||
| 3685 | psmp_cmd->device_id = cpu_to_le32(deviceID); | ||
| 3686 | psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); | ||
| 3687 | } | ||
| 3688 | |||
| 3689 | /** | ||
| 3690 | * pm8001_chip_smp_req - send a SMP task to FW | ||
| 3691 | * @pm8001_ha: our hba card information. | ||
| 3692 | * @ccb: the ccb information this request used. | ||
| 3693 | */ | ||
| 3694 | static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3695 | struct pm8001_ccb_info *ccb) | ||
| 3696 | { | ||
| 3697 | int elem, rc; | ||
| 3698 | struct sas_task *task = ccb->task; | ||
| 3699 | struct domain_device *dev = task->dev; | ||
| 3700 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 3701 | struct scatterlist *sg_req, *sg_resp; | ||
| 3702 | u32 req_len, resp_len; | ||
| 3703 | struct smp_req smp_cmd; | ||
| 3704 | u32 opc; | ||
| 3705 | struct inbound_queue_table *circularQ; | ||
| 3706 | |||
| 3707 | memset(&smp_cmd, 0, sizeof(smp_cmd)); | ||
| 3708 | /* | ||
| 3709 | * DMA-map SMP request, response buffers | ||
| 3710 | */ | ||
| 3711 | sg_req = &task->smp_task.smp_req; | ||
| 3712 | elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); | ||
| 3713 | if (!elem) | ||
| 3714 | return -ENOMEM; | ||
| 3715 | req_len = sg_dma_len(sg_req); | ||
| 3716 | |||
| 3717 | sg_resp = &task->smp_task.smp_resp; | ||
| 3718 | elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); | ||
| 3719 | if (!elem) { | ||
| 3720 | rc = -ENOMEM; | ||
| 3721 | goto err_out; | ||
| 3722 | } | ||
| 3723 | resp_len = sg_dma_len(sg_resp); | ||
| 3724 | /* must be in dwords */ | ||
| 3725 | if ((req_len & 0x3) || (resp_len & 0x3)) { | ||
| 3726 | rc = -EINVAL; | ||
| 3727 | goto err_out_2; | ||
| 3728 | } | ||
| 3729 | |||
| 3730 | opc = OPC_INB_SMP_REQUEST; | ||
| 3731 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3732 | smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); | ||
| 3733 | smp_cmd.long_smp_req.long_req_addr = | ||
| 3734 | cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); | ||
| 3735 | smp_cmd.long_smp_req.long_req_size = | ||
| 3736 | cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); | ||
| 3737 | smp_cmd.long_smp_req.long_resp_addr = | ||
| 3738 | cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); | ||
| 3739 | smp_cmd.long_smp_req.long_resp_size = | ||
| 3740 | cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); | ||
| 3741 | build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); | ||
| 3742 | mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); | ||
| 3743 | return 0; | ||
| 3744 | |||
| 3745 | err_out_2: | ||
| 3746 | dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, | ||
| 3747 | PCI_DMA_FROMDEVICE); | ||
| 3748 | err_out: | ||
| 3749 | dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, | ||
| 3750 | PCI_DMA_TODEVICE); | ||
| 3751 | return rc; | ||
| 3752 | } | ||
| 3753 | |||
| 3754 | /** | ||
| 3755 | * pm8001_chip_ssp_io_req - send a SSP task to FW | ||
| 3756 | * @pm8001_ha: our hba card information. | ||
| 3757 | * @ccb: the ccb information this request used. | ||
| 3758 | */ | ||
| 3759 | static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3760 | struct pm8001_ccb_info *ccb) | ||
| 3761 | { | ||
| 3762 | struct sas_task *task = ccb->task; | ||
| 3763 | struct domain_device *dev = task->dev; | ||
| 3764 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 3765 | struct ssp_ini_io_start_req ssp_cmd; | ||
| 3766 | u32 tag = ccb->ccb_tag; | ||
| 3767 | int ret; | ||
| 3768 | __le64 phys_addr; | ||
| 3769 | struct inbound_queue_table *circularQ; | ||
| 3770 | u32 opc = OPC_INB_SSPINIIOSTART; | ||
| 3771 | memset(&ssp_cmd, 0, sizeof(ssp_cmd)); | ||
| 3772 | memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); | ||
| 3773 | ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for | ||
| 3774 | SAS 1.1 compatible TLR*/ | ||
| 3775 | ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); | ||
| 3776 | ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); | ||
| 3777 | ssp_cmd.tag = cpu_to_le32(tag); | ||
| 3778 | if (task->ssp_task.enable_first_burst) | ||
| 3779 | ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; | ||
| 3780 | ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); | ||
| 3781 | ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); | ||
| 3782 | memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); | ||
| 3783 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3784 | |||
| 3785 | /* fill in PRD (scatter/gather) table, if any */ | ||
| 3786 | if (task->num_scatter > 1) { | ||
| 3787 | pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); | ||
| 3788 | phys_addr = cpu_to_le64(ccb->ccb_dma_handle + | ||
| 3789 | offsetof(struct pm8001_ccb_info, buf_prd[0])); | ||
| 3790 | ssp_cmd.addr_low = lower_32_bits(phys_addr); | ||
| 3791 | ssp_cmd.addr_high = upper_32_bits(phys_addr); | ||
| 3792 | ssp_cmd.esgl = cpu_to_le32(1<<31); | ||
| 3793 | } else if (task->num_scatter == 1) { | ||
| 3794 | __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); | ||
| 3795 | ssp_cmd.addr_low = lower_32_bits(dma_addr); | ||
| 3796 | ssp_cmd.addr_high = upper_32_bits(dma_addr); | ||
| 3797 | ssp_cmd.len = cpu_to_le32(task->total_xfer_len); | ||
| 3798 | ssp_cmd.esgl = 0; | ||
| 3799 | } else if (task->num_scatter == 0) { | ||
| 3800 | ssp_cmd.addr_low = 0; | ||
| 3801 | ssp_cmd.addr_high = 0; | ||
| 3802 | ssp_cmd.len = cpu_to_le32(task->total_xfer_len); | ||
| 3803 | ssp_cmd.esgl = 0; | ||
| 3804 | } | ||
| 3805 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); | ||
| 3806 | return ret; | ||
| 3807 | } | ||
| 3808 | |||
| 3809 | static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3810 | struct pm8001_ccb_info *ccb) | ||
| 3811 | { | ||
| 3812 | struct sas_task *task = ccb->task; | ||
| 3813 | struct domain_device *dev = task->dev; | ||
| 3814 | struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; | ||
| 3815 | u32 tag = ccb->ccb_tag; | ||
| 3816 | int ret; | ||
| 3817 | struct sata_start_req sata_cmd; | ||
| 3818 | u32 hdr_tag, ncg_tag = 0; | ||
| 3819 | __le64 phys_addr; | ||
| 3820 | u32 ATAP = 0x0; | ||
| 3821 | u32 dir; | ||
| 3822 | struct inbound_queue_table *circularQ; | ||
| 3823 | u32 opc = OPC_INB_SATA_HOST_OPSTART; | ||
| 3824 | memset(&sata_cmd, 0, sizeof(sata_cmd)); | ||
| 3825 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3826 | if (task->data_dir == PCI_DMA_NONE) { | ||
| 3827 | ATAP = 0x04; /* no data*/ | ||
| 3828 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n")); | ||
| 3829 | } else if (likely(!task->ata_task.device_control_reg_update)) { | ||
| 3830 | if (task->ata_task.dma_xfer) { | ||
| 3831 | ATAP = 0x06; /* DMA */ | ||
| 3832 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n")); | ||
| 3833 | } else { | ||
| 3834 | ATAP = 0x05; /* PIO*/ | ||
| 3835 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n")); | ||
| 3836 | } | ||
| 3837 | if (task->ata_task.use_ncq && | ||
| 3838 | dev->sata_dev.command_set != ATAPI_COMMAND_SET) { | ||
| 3839 | ATAP = 0x07; /* FPDMA */ | ||
| 3840 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n")); | ||
| 3841 | } | ||
| 3842 | } | ||
| 3843 | if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) | ||
| 3844 | ncg_tag = cpu_to_le32(hdr_tag); | ||
| 3845 | dir = data_dir_flags[task->data_dir] << 8; | ||
| 3846 | sata_cmd.tag = cpu_to_le32(tag); | ||
| 3847 | sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); | ||
| 3848 | sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); | ||
| 3849 | sata_cmd.ncqtag_atap_dir_m = | ||
| 3850 | cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir); | ||
| 3851 | sata_cmd.sata_fis = task->ata_task.fis; | ||
| 3852 | if (likely(!task->ata_task.device_control_reg_update)) | ||
| 3853 | sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ | ||
| 3854 | sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ | ||
| 3855 | /* fill in PRD (scatter/gather) table, if any */ | ||
| 3856 | if (task->num_scatter > 1) { | ||
| 3857 | pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); | ||
| 3858 | phys_addr = cpu_to_le64(ccb->ccb_dma_handle + | ||
| 3859 | offsetof(struct pm8001_ccb_info, buf_prd[0])); | ||
| 3860 | sata_cmd.addr_low = lower_32_bits(phys_addr); | ||
| 3861 | sata_cmd.addr_high = upper_32_bits(phys_addr); | ||
| 3862 | sata_cmd.esgl = cpu_to_le32(1 << 31); | ||
| 3863 | } else if (task->num_scatter == 1) { | ||
| 3864 | __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); | ||
| 3865 | sata_cmd.addr_low = lower_32_bits(dma_addr); | ||
| 3866 | sata_cmd.addr_high = upper_32_bits(dma_addr); | ||
| 3867 | sata_cmd.len = cpu_to_le32(task->total_xfer_len); | ||
| 3868 | sata_cmd.esgl = 0; | ||
| 3869 | } else if (task->num_scatter == 0) { | ||
| 3870 | sata_cmd.addr_low = 0; | ||
| 3871 | sata_cmd.addr_high = 0; | ||
| 3872 | sata_cmd.len = cpu_to_le32(task->total_xfer_len); | ||
| 3873 | sata_cmd.esgl = 0; | ||
| 3874 | } | ||
| 3875 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); | ||
| 3876 | return ret; | ||
| 3877 | } | ||
| 3878 | |||
| 3879 | /** | ||
| 3880 | * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND | ||
| 3881 | * @pm8001_ha: our hba card information. | ||
| 3882 | * @num: the inbound queue number | ||
| 3883 | * @phy_id: the phy id which we wanted to start up. | ||
| 3884 | */ | ||
| 3885 | static int | ||
| 3886 | pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) | ||
| 3887 | { | ||
| 3888 | struct phy_start_req payload; | ||
| 3889 | struct inbound_queue_table *circularQ; | ||
| 3890 | int ret; | ||
| 3891 | u32 tag = 0x01; | ||
| 3892 | u32 opcode = OPC_INB_PHYSTART; | ||
| 3893 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3894 | memset(&payload, 0, sizeof(payload)); | ||
| 3895 | payload.tag = cpu_to_le32(tag); | ||
| 3896 | /* | ||
| 3897 | ** [0:7] PHY Identifier | ||
| 3898 | ** [8:11] link rate 1.5G, 3G, 6G | ||
| 3899 | ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both | ||
| 3900 | ** [14] 0b disable spin up hold; 1b enable spin up hold | ||
| 3901 | */ | ||
| 3902 | payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | | ||
| 3903 | LINKMODE_AUTO | LINKRATE_15 | | ||
| 3904 | LINKRATE_30 | LINKRATE_60 | phy_id); | ||
| 3905 | payload.sas_identify.dev_type = SAS_END_DEV; | ||
| 3906 | payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; | ||
| 3907 | memcpy(payload.sas_identify.sas_addr, | ||
| 3908 | pm8001_ha->sas_addr, SAS_ADDR_SIZE); | ||
| 3909 | payload.sas_identify.phy_id = phy_id; | ||
| 3910 | ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); | ||
| 3911 | return ret; | ||
| 3912 | } | ||
| 3913 | |||
| 3914 | /** | ||
| 3915 | * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND | ||
| 3916 | * @pm8001_ha: our hba card information. | ||
| 3917 | * @num: the inbound queue number | ||
| 3918 | * @phy_id: the phy id which we wanted to start up. | ||
| 3919 | */ | ||
| 3920 | static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3921 | u8 phy_id) | ||
| 3922 | { | ||
| 3923 | struct phy_stop_req payload; | ||
| 3924 | struct inbound_queue_table *circularQ; | ||
| 3925 | int ret; | ||
| 3926 | u32 tag = 0x01; | ||
| 3927 | u32 opcode = OPC_INB_PHYSTOP; | ||
| 3928 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3929 | memset(&payload, 0, sizeof(payload)); | ||
| 3930 | payload.tag = cpu_to_le32(tag); | ||
| 3931 | payload.phy_id = cpu_to_le32(phy_id); | ||
| 3932 | ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); | ||
| 3933 | return ret; | ||
| 3934 | } | ||
| 3935 | |||
| 3936 | /** | ||
| 3937 | * see comments on mpi_reg_resp. | ||
| 3938 | */ | ||
| 3939 | static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3940 | struct pm8001_device *pm8001_dev, u32 flag) | ||
| 3941 | { | ||
| 3942 | struct reg_dev_req payload; | ||
| 3943 | u32 opc; | ||
| 3944 | u32 stp_sspsmp_sata = 0x4; | ||
| 3945 | struct inbound_queue_table *circularQ; | ||
| 3946 | u32 linkrate, phy_id; | ||
| 3947 | int rc, tag = 0xdeadbeef; | ||
| 3948 | struct pm8001_ccb_info *ccb; | ||
| 3949 | u8 retryFlag = 0x1; | ||
| 3950 | u16 firstBurstSize = 0; | ||
| 3951 | u16 ITNT = 2000; | ||
| 3952 | struct domain_device *dev = pm8001_dev->sas_device; | ||
| 3953 | struct domain_device *parent_dev = dev->parent; | ||
| 3954 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 3955 | |||
| 3956 | memset(&payload, 0, sizeof(payload)); | ||
| 3957 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 3958 | if (rc) | ||
| 3959 | return rc; | ||
| 3960 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 3961 | ccb->device = pm8001_dev; | ||
| 3962 | ccb->ccb_tag = tag; | ||
| 3963 | payload.tag = cpu_to_le32(tag); | ||
| 3964 | if (flag == 1) | ||
| 3965 | stp_sspsmp_sata = 0x02; /*direct attached sata */ | ||
| 3966 | else { | ||
| 3967 | if (pm8001_dev->dev_type == SATA_DEV) | ||
| 3968 | stp_sspsmp_sata = 0x00; /* stp*/ | ||
| 3969 | else if (pm8001_dev->dev_type == SAS_END_DEV || | ||
| 3970 | pm8001_dev->dev_type == EDGE_DEV || | ||
| 3971 | pm8001_dev->dev_type == FANOUT_DEV) | ||
| 3972 | stp_sspsmp_sata = 0x01; /*ssp or smp*/ | ||
| 3973 | } | ||
| 3974 | if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) | ||
| 3975 | phy_id = parent_dev->ex_dev.ex_phy->phy_id; | ||
| 3976 | else | ||
| 3977 | phy_id = pm8001_dev->attached_phy; | ||
| 3978 | opc = OPC_INB_REG_DEV; | ||
| 3979 | linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? | ||
| 3980 | pm8001_dev->sas_device->linkrate : dev->port->linkrate; | ||
| 3981 | payload.phyid_portid = | ||
| 3982 | cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) | | ||
| 3983 | ((phy_id & 0x0F) << 4)); | ||
| 3984 | payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | | ||
| 3985 | ((linkrate & 0x0F) * 0x1000000) | | ||
| 3986 | ((stp_sspsmp_sata & 0x03) * 0x10000000)); | ||
| 3987 | payload.firstburstsize_ITNexustimeout = | ||
| 3988 | cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); | ||
| 3989 | memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, | ||
| 3990 | SAS_ADDR_SIZE); | ||
| 3991 | rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 3992 | return rc; | ||
| 3993 | } | ||
| 3994 | |||
| 3995 | /** | ||
| 3996 | * see comments on mpi_reg_resp. | ||
| 3997 | */ | ||
| 3998 | static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, | ||
| 3999 | u32 device_id) | ||
| 4000 | { | ||
| 4001 | struct dereg_dev_req payload; | ||
| 4002 | u32 opc = OPC_INB_DEREG_DEV_HANDLE; | ||
| 4003 | int ret; | ||
| 4004 | struct inbound_queue_table *circularQ; | ||
| 4005 | |||
| 4006 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4007 | memset(&payload, 0, sizeof(payload)); | ||
| 4008 | payload.tag = 1; | ||
| 4009 | payload.device_id = cpu_to_le32(device_id); | ||
| 4010 | PM8001_MSG_DBG(pm8001_ha, | ||
| 4011 | pm8001_printk("unregister device device_id = %d\n", device_id)); | ||
| 4012 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 4013 | return ret; | ||
| 4014 | } | ||
| 4015 | |||
| 4016 | /** | ||
| 4017 | * pm8001_chip_phy_ctl_req - support the local phy operation | ||
| 4018 | * @pm8001_ha: our hba card information. | ||
| 4019 | * @num: the inbound queue number | ||
| 4020 | * @phy_id: the phy id which we wanted to operate | ||
| 4021 | * @phy_op: | ||
| 4022 | */ | ||
| 4023 | static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4024 | u32 phyId, u32 phy_op) | ||
| 4025 | { | ||
| 4026 | struct local_phy_ctl_req payload; | ||
| 4027 | struct inbound_queue_table *circularQ; | ||
| 4028 | int ret; | ||
| 4029 | u32 opc = OPC_INB_LOCAL_PHY_CONTROL; | ||
| 4030 | memset((u8 *)&payload, 0, sizeof(payload)); | ||
| 4031 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4032 | payload.tag = 1; | ||
| 4033 | payload.phyop_phyid = | ||
| 4034 | cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); | ||
| 4035 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 4036 | return ret; | ||
| 4037 | } | ||
| 4038 | |||
| 4039 | static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) | ||
| 4040 | { | ||
| 4041 | u32 value; | ||
| 4042 | #ifdef PM8001_USE_MSIX | ||
| 4043 | return 1; | ||
| 4044 | #endif | ||
| 4045 | value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); | ||
| 4046 | if (value) | ||
| 4047 | return 1; | ||
| 4048 | return 0; | ||
| 4049 | |||
| 4050 | } | ||
| 4051 | |||
| 4052 | /** | ||
| 4053 | * pm8001_chip_isr - PM8001 isr handler. | ||
| 4054 | * @pm8001_ha: our hba card information. | ||
| 4055 | * @irq: irq number. | ||
| 4056 | * @stat: stat. | ||
| 4057 | */ | ||
| 4058 | static irqreturn_t | ||
| 4059 | pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) | ||
| 4060 | { | ||
| 4061 | unsigned long flags; | ||
| 4062 | spin_lock_irqsave(&pm8001_ha->lock, flags); | ||
| 4063 | pm8001_chip_interrupt_disable(pm8001_ha); | ||
| 4064 | process_oq(pm8001_ha); | ||
| 4065 | pm8001_chip_interrupt_enable(pm8001_ha); | ||
| 4066 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 4067 | return IRQ_HANDLED; | ||
| 4068 | } | ||
| 4069 | |||
| 4070 | static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, | ||
| 4071 | u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag) | ||
| 4072 | { | ||
| 4073 | struct task_abort_req task_abort; | ||
| 4074 | struct inbound_queue_table *circularQ; | ||
| 4075 | int ret; | ||
| 4076 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4077 | memset(&task_abort, 0, sizeof(task_abort)); | ||
| 4078 | if (ABORT_SINGLE == (flag & ABORT_MASK)) { | ||
| 4079 | task_abort.abort_all = 0; | ||
| 4080 | task_abort.device_id = cpu_to_le32(dev_id); | ||
| 4081 | task_abort.tag_to_abort = cpu_to_le32(task_tag); | ||
| 4082 | task_abort.tag = cpu_to_le32(cmd_tag); | ||
| 4083 | } else if (ABORT_ALL == (flag & ABORT_MASK)) { | ||
| 4084 | task_abort.abort_all = cpu_to_le32(1); | ||
| 4085 | task_abort.device_id = cpu_to_le32(dev_id); | ||
| 4086 | task_abort.tag = cpu_to_le32(cmd_tag); | ||
| 4087 | } | ||
| 4088 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); | ||
| 4089 | return ret; | ||
| 4090 | } | ||
| 4091 | |||
| 4092 | /** | ||
| 4093 | * pm8001_chip_abort_task - SAS abort task when error or exception happened. | ||
| 4094 | * @task: the task we wanted to aborted. | ||
| 4095 | * @flag: the abort flag. | ||
| 4096 | */ | ||
| 4097 | static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, | ||
| 4098 | struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) | ||
| 4099 | { | ||
| 4100 | u32 opc, device_id; | ||
| 4101 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 4102 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" | ||
| 4103 | " = %x", cmd_tag, task_tag)); | ||
| 4104 | if (pm8001_dev->dev_type == SAS_END_DEV) | ||
| 4105 | opc = OPC_INB_SSP_ABORT; | ||
| 4106 | else if (pm8001_dev->dev_type == SATA_DEV) | ||
| 4107 | opc = OPC_INB_SATA_ABORT; | ||
| 4108 | else | ||
| 4109 | opc = OPC_INB_SMP_ABORT;/* SMP */ | ||
| 4110 | device_id = pm8001_dev->device_id; | ||
| 4111 | rc = send_task_abort(pm8001_ha, opc, device_id, flag, | ||
| 4112 | task_tag, cmd_tag); | ||
| 4113 | if (rc != TMF_RESP_FUNC_COMPLETE) | ||
| 4114 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc)); | ||
| 4115 | return rc; | ||
| 4116 | } | ||
| 4117 | |||
| 4118 | /** | ||
| 4119 | * pm8001_chip_ssp_tm_req - built the task managment command. | ||
| 4120 | * @pm8001_ha: our hba card information. | ||
| 4121 | * @ccb: the ccb information. | ||
| 4122 | * @tmf: task management function. | ||
| 4123 | */ | ||
| 4124 | static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4125 | struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) | ||
| 4126 | { | ||
| 4127 | struct sas_task *task = ccb->task; | ||
| 4128 | struct domain_device *dev = task->dev; | ||
| 4129 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 4130 | u32 opc = OPC_INB_SSPINITMSTART; | ||
| 4131 | struct inbound_queue_table *circularQ; | ||
| 4132 | struct ssp_ini_tm_start_req sspTMCmd; | ||
| 4133 | int ret; | ||
| 4134 | |||
| 4135 | memset(&sspTMCmd, 0, sizeof(sspTMCmd)); | ||
| 4136 | sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); | ||
| 4137 | sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed); | ||
| 4138 | sspTMCmd.tmf = cpu_to_le32(tmf->tmf); | ||
| 4139 | memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); | ||
| 4140 | sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); | ||
| 4141 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4142 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); | ||
| 4143 | return ret; | ||
| 4144 | } | ||
| 4145 | |||
| 4146 | static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4147 | void *payload) | ||
| 4148 | { | ||
| 4149 | u32 opc = OPC_INB_GET_NVMD_DATA; | ||
| 4150 | u32 nvmd_type; | ||
| 4151 | int rc; | ||
| 4152 | u32 tag; | ||
| 4153 | struct pm8001_ccb_info *ccb; | ||
| 4154 | struct inbound_queue_table *circularQ; | ||
| 4155 | struct get_nvm_data_req nvmd_req; | ||
| 4156 | struct fw_control_ex *fw_control_context; | ||
| 4157 | struct pm8001_ioctl_payload *ioctl_payload = payload; | ||
| 4158 | |||
| 4159 | nvmd_type = ioctl_payload->minor_function; | ||
| 4160 | fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); | ||
| 4161 | fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; | ||
| 4162 | fw_control_context->len = ioctl_payload->length; | ||
| 4163 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4164 | memset(&nvmd_req, 0, sizeof(nvmd_req)); | ||
| 4165 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 4166 | if (rc) | ||
| 4167 | return rc; | ||
| 4168 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 4169 | ccb->ccb_tag = tag; | ||
| 4170 | ccb->fw_control_context = fw_control_context; | ||
| 4171 | nvmd_req.tag = cpu_to_le32(tag); | ||
| 4172 | |||
| 4173 | switch (nvmd_type) { | ||
| 4174 | case TWI_DEVICE: { | ||
| 4175 | u32 twi_addr, twi_page_size; | ||
| 4176 | twi_addr = 0xa8; | ||
| 4177 | twi_page_size = 2; | ||
| 4178 | |||
| 4179 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | | ||
| 4180 | twi_page_size << 8 | TWI_DEVICE); | ||
| 4181 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4182 | nvmd_req.resp_addr_hi = | ||
| 4183 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4184 | nvmd_req.resp_addr_lo = | ||
| 4185 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4186 | break; | ||
| 4187 | } | ||
| 4188 | case C_SEEPROM: { | ||
| 4189 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); | ||
| 4190 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4191 | nvmd_req.resp_addr_hi = | ||
| 4192 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4193 | nvmd_req.resp_addr_lo = | ||
| 4194 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4195 | break; | ||
| 4196 | } | ||
| 4197 | case VPD_FLASH: { | ||
| 4198 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); | ||
| 4199 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4200 | nvmd_req.resp_addr_hi = | ||
| 4201 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4202 | nvmd_req.resp_addr_lo = | ||
| 4203 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4204 | break; | ||
| 4205 | } | ||
| 4206 | case EXPAN_ROM: { | ||
| 4207 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); | ||
| 4208 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4209 | nvmd_req.resp_addr_hi = | ||
| 4210 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4211 | nvmd_req.resp_addr_lo = | ||
| 4212 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4213 | break; | ||
| 4214 | } | ||
| 4215 | default: | ||
| 4216 | break; | ||
| 4217 | } | ||
| 4218 | rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); | ||
| 4219 | return rc; | ||
| 4220 | } | ||
| 4221 | |||
| 4222 | static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4223 | void *payload) | ||
| 4224 | { | ||
| 4225 | u32 opc = OPC_INB_SET_NVMD_DATA; | ||
| 4226 | u32 nvmd_type; | ||
| 4227 | int rc; | ||
| 4228 | u32 tag; | ||
| 4229 | struct pm8001_ccb_info *ccb; | ||
| 4230 | struct inbound_queue_table *circularQ; | ||
| 4231 | struct set_nvm_data_req nvmd_req; | ||
| 4232 | struct fw_control_ex *fw_control_context; | ||
| 4233 | struct pm8001_ioctl_payload *ioctl_payload = payload; | ||
| 4234 | |||
| 4235 | nvmd_type = ioctl_payload->minor_function; | ||
| 4236 | fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); | ||
| 4237 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4238 | memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, | ||
| 4239 | ioctl_payload->func_specific, | ||
| 4240 | ioctl_payload->length); | ||
| 4241 | memset(&nvmd_req, 0, sizeof(nvmd_req)); | ||
| 4242 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 4243 | if (rc) | ||
| 4244 | return rc; | ||
| 4245 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 4246 | ccb->fw_control_context = fw_control_context; | ||
| 4247 | ccb->ccb_tag = tag; | ||
| 4248 | nvmd_req.tag = cpu_to_le32(tag); | ||
| 4249 | switch (nvmd_type) { | ||
| 4250 | case TWI_DEVICE: { | ||
| 4251 | u32 twi_addr, twi_page_size; | ||
| 4252 | twi_addr = 0xa8; | ||
| 4253 | twi_page_size = 2; | ||
| 4254 | nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); | ||
| 4255 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | | ||
| 4256 | twi_page_size << 8 | TWI_DEVICE); | ||
| 4257 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4258 | nvmd_req.resp_addr_hi = | ||
| 4259 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4260 | nvmd_req.resp_addr_lo = | ||
| 4261 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4262 | break; | ||
| 4263 | } | ||
| 4264 | case C_SEEPROM: | ||
| 4265 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); | ||
| 4266 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4267 | nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); | ||
| 4268 | nvmd_req.resp_addr_hi = | ||
| 4269 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4270 | nvmd_req.resp_addr_lo = | ||
| 4271 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4272 | break; | ||
| 4273 | case VPD_FLASH: | ||
| 4274 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); | ||
| 4275 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4276 | nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); | ||
| 4277 | nvmd_req.resp_addr_hi = | ||
| 4278 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4279 | nvmd_req.resp_addr_lo = | ||
| 4280 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4281 | break; | ||
| 4282 | case EXPAN_ROM: | ||
| 4283 | nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); | ||
| 4284 | nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); | ||
| 4285 | nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); | ||
| 4286 | nvmd_req.resp_addr_hi = | ||
| 4287 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); | ||
| 4288 | nvmd_req.resp_addr_lo = | ||
| 4289 | cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); | ||
| 4290 | break; | ||
| 4291 | default: | ||
| 4292 | break; | ||
| 4293 | } | ||
| 4294 | rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); | ||
| 4295 | return rc; | ||
| 4296 | } | ||
| 4297 | |||
| 4298 | /** | ||
| 4299 | * pm8001_chip_fw_flash_update_build - support the firmware update operation | ||
| 4300 | * @pm8001_ha: our hba card information. | ||
| 4301 | * @fw_flash_updata_info: firmware flash update param | ||
| 4302 | */ | ||
| 4303 | static int | ||
| 4304 | pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, | ||
| 4305 | void *fw_flash_updata_info, u32 tag) | ||
| 4306 | { | ||
| 4307 | struct fw_flash_Update_req payload; | ||
| 4308 | struct fw_flash_updata_info *info; | ||
| 4309 | struct inbound_queue_table *circularQ; | ||
| 4310 | int ret; | ||
| 4311 | u32 opc = OPC_INB_FW_FLASH_UPDATE; | ||
| 4312 | |||
| 4313 | memset(&payload, 0, sizeof(struct fw_flash_Update_req)); | ||
| 4314 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4315 | info = fw_flash_updata_info; | ||
| 4316 | payload.tag = cpu_to_le32(tag); | ||
| 4317 | payload.cur_image_len = cpu_to_le32(info->cur_image_len); | ||
| 4318 | payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); | ||
| 4319 | payload.total_image_len = cpu_to_le32(info->total_image_len); | ||
| 4320 | payload.len = info->sgl.im_len.len ; | ||
| 4321 | payload.sgl_addr_lo = lower_32_bits(info->sgl.addr); | ||
| 4322 | payload.sgl_addr_hi = upper_32_bits(info->sgl.addr); | ||
| 4323 | ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 4324 | return ret; | ||
| 4325 | } | ||
| 4326 | |||
| 4327 | static int | ||
| 4328 | pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4329 | void *payload) | ||
| 4330 | { | ||
| 4331 | struct fw_flash_updata_info flash_update_info; | ||
| 4332 | struct fw_control_info *fw_control; | ||
| 4333 | struct fw_control_ex *fw_control_context; | ||
| 4334 | int rc; | ||
| 4335 | u32 tag; | ||
| 4336 | struct pm8001_ccb_info *ccb; | ||
| 4337 | void *buffer = NULL; | ||
| 4338 | dma_addr_t phys_addr; | ||
| 4339 | u32 phys_addr_hi; | ||
| 4340 | u32 phys_addr_lo; | ||
| 4341 | struct pm8001_ioctl_payload *ioctl_payload = payload; | ||
| 4342 | |||
| 4343 | fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); | ||
| 4344 | fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; | ||
| 4345 | if (fw_control->len != 0) { | ||
| 4346 | if (pm8001_mem_alloc(pm8001_ha->pdev, | ||
| 4347 | (void **)&buffer, | ||
| 4348 | &phys_addr, | ||
| 4349 | &phys_addr_hi, | ||
| 4350 | &phys_addr_lo, | ||
| 4351 | fw_control->len, 0) != 0) { | ||
| 4352 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 4353 | pm8001_printk("Mem alloc failure\n")); | ||
| 4354 | return -ENOMEM; | ||
| 4355 | } | ||
| 4356 | } | ||
| 4357 | memset(buffer, 0, fw_control->len); | ||
| 4358 | memcpy(buffer, fw_control->buffer, fw_control->len); | ||
| 4359 | flash_update_info.sgl.addr = cpu_to_le64(phys_addr); | ||
| 4360 | flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); | ||
| 4361 | flash_update_info.sgl.im_len.e = 0; | ||
| 4362 | flash_update_info.cur_image_offset = fw_control->offset; | ||
| 4363 | flash_update_info.cur_image_len = fw_control->len; | ||
| 4364 | flash_update_info.total_image_len = fw_control->size; | ||
| 4365 | fw_control_context->fw_control = fw_control; | ||
| 4366 | fw_control_context->virtAddr = buffer; | ||
| 4367 | fw_control_context->len = fw_control->len; | ||
| 4368 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 4369 | if (rc) | ||
| 4370 | return rc; | ||
| 4371 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 4372 | ccb->fw_control_context = fw_control_context; | ||
| 4373 | ccb->ccb_tag = tag; | ||
| 4374 | rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, | ||
| 4375 | tag); | ||
| 4376 | return rc; | ||
| 4377 | } | ||
| 4378 | |||
| 4379 | static int | ||
| 4380 | pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, | ||
| 4381 | struct pm8001_device *pm8001_dev, u32 state) | ||
| 4382 | { | ||
| 4383 | struct set_dev_state_req payload; | ||
| 4384 | struct inbound_queue_table *circularQ; | ||
| 4385 | struct pm8001_ccb_info *ccb; | ||
| 4386 | int rc; | ||
| 4387 | u32 tag; | ||
| 4388 | u32 opc = OPC_INB_SET_DEVICE_STATE; | ||
| 4389 | memset(&payload, 0, sizeof(payload)); | ||
| 4390 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 4391 | if (rc) | ||
| 4392 | return -1; | ||
| 4393 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 4394 | ccb->ccb_tag = tag; | ||
| 4395 | ccb->device = pm8001_dev; | ||
| 4396 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4397 | payload.tag = cpu_to_le32(tag); | ||
| 4398 | payload.device_id = cpu_to_le32(pm8001_dev->device_id); | ||
| 4399 | payload.nds = cpu_to_le32(state); | ||
| 4400 | rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 4401 | return rc; | ||
| 4402 | |||
| 4403 | } | ||
| 4404 | |||
| 4405 | static int | ||
| 4406 | pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) | ||
| 4407 | { | ||
| 4408 | struct sas_re_initialization_req payload; | ||
| 4409 | struct inbound_queue_table *circularQ; | ||
| 4410 | struct pm8001_ccb_info *ccb; | ||
| 4411 | int rc; | ||
| 4412 | u32 tag; | ||
| 4413 | u32 opc = OPC_INB_SAS_RE_INITIALIZE; | ||
| 4414 | memset(&payload, 0, sizeof(payload)); | ||
| 4415 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 4416 | if (rc) | ||
| 4417 | return -1; | ||
| 4418 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 4419 | ccb->ccb_tag = tag; | ||
| 4420 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | ||
| 4421 | payload.tag = cpu_to_le32(tag); | ||
| 4422 | payload.SSAHOLT = cpu_to_le32(0xd << 25); | ||
| 4423 | payload.sata_hol_tmo = cpu_to_le32(80); | ||
| 4424 | payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); | ||
| 4425 | rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); | ||
| 4426 | return rc; | ||
| 4427 | |||
| 4428 | } | ||
| 4429 | |||
| 4430 | const struct pm8001_dispatch pm8001_8001_dispatch = { | ||
| 4431 | .name = "pmc8001", | ||
| 4432 | .chip_init = pm8001_chip_init, | ||
| 4433 | .chip_soft_rst = pm8001_chip_soft_rst, | ||
| 4434 | .chip_rst = pm8001_hw_chip_rst, | ||
| 4435 | .chip_iounmap = pm8001_chip_iounmap, | ||
| 4436 | .isr = pm8001_chip_isr, | ||
| 4437 | .is_our_interupt = pm8001_chip_is_our_interupt, | ||
| 4438 | .isr_process_oq = process_oq, | ||
| 4439 | .interrupt_enable = pm8001_chip_interrupt_enable, | ||
| 4440 | .interrupt_disable = pm8001_chip_interrupt_disable, | ||
| 4441 | .make_prd = pm8001_chip_make_sg, | ||
| 4442 | .smp_req = pm8001_chip_smp_req, | ||
| 4443 | .ssp_io_req = pm8001_chip_ssp_io_req, | ||
| 4444 | .sata_req = pm8001_chip_sata_req, | ||
| 4445 | .phy_start_req = pm8001_chip_phy_start_req, | ||
| 4446 | .phy_stop_req = pm8001_chip_phy_stop_req, | ||
| 4447 | .reg_dev_req = pm8001_chip_reg_dev_req, | ||
| 4448 | .dereg_dev_req = pm8001_chip_dereg_dev_req, | ||
| 4449 | .phy_ctl_req = pm8001_chip_phy_ctl_req, | ||
| 4450 | .task_abort = pm8001_chip_abort_task, | ||
| 4451 | .ssp_tm_req = pm8001_chip_ssp_tm_req, | ||
| 4452 | .get_nvmd_req = pm8001_chip_get_nvmd_req, | ||
| 4453 | .set_nvmd_req = pm8001_chip_set_nvmd_req, | ||
| 4454 | .fw_flash_update_req = pm8001_chip_fw_flash_update_req, | ||
| 4455 | .set_dev_state_req = pm8001_chip_set_dev_state_req, | ||
| 4456 | .sas_re_init_req = pm8001_chip_sas_re_initialization, | ||
| 4457 | }; | ||
| 4458 | |||
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h new file mode 100644 index 000000000000..96e4daa68b8f --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.h | |||
| @@ -0,0 +1,1030 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | #ifndef _PMC8001_REG_H_ | ||
| 41 | #define _PMC8001_REG_H_ | ||
| 42 | |||
| 43 | #include <linux/types.h> | ||
| 44 | #include <scsi/libsas.h> | ||
| 45 | |||
| 46 | |||
| 47 | /* for Request Opcode of IOMB */ | ||
| 48 | #define OPC_INB_ECHO 1 /* 0x000 */ | ||
| 49 | #define OPC_INB_PHYSTART 4 /* 0x004 */ | ||
| 50 | #define OPC_INB_PHYSTOP 5 /* 0x005 */ | ||
| 51 | #define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ | ||
| 52 | #define OPC_INB_SSPINITMSTART 7 /* 0x007 */ | ||
| 53 | #define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */ | ||
| 54 | #define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ | ||
| 55 | #define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ | ||
| 56 | #define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ | ||
| 57 | #define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */ | ||
| 58 | #define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */ | ||
| 59 | #define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */ | ||
| 60 | #define OPC_INB_SSP_ABORT 15 /* 0x00F */ | ||
| 61 | #define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ | ||
| 62 | #define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ | ||
| 63 | #define OPC_INB_SMP_REQUEST 18 /* 0x012 */ | ||
| 64 | /* SMP_RESPONSE is removed */ | ||
| 65 | #define OPC_INB_SMP_RESPONSE 19 /* 0x013 */ | ||
| 66 | #define OPC_INB_SMP_ABORT 20 /* 0x014 */ | ||
| 67 | #define OPC_INB_REG_DEV 22 /* 0x016 */ | ||
| 68 | #define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ | ||
| 69 | #define OPC_INB_SATA_ABORT 24 /* 0x018 */ | ||
| 70 | #define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ | ||
| 71 | #define OPC_INB_GET_DEV_INFO 26 /* 0x01A */ | ||
| 72 | #define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ | ||
| 73 | #define OPC_INB_GPIO 34 /* 0x022 */ | ||
| 74 | #define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ | ||
| 75 | #define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ | ||
| 76 | #define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */ | ||
| 77 | #define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ | ||
| 78 | #define OPC_INB_PORT_CONTROL 39 /* 0x027 */ | ||
| 79 | #define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ | ||
| 80 | #define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ | ||
| 81 | #define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ | ||
| 82 | #define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ | ||
| 83 | #define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ | ||
| 84 | #define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */ | ||
| 85 | |||
| 86 | /* for Response Opcode of IOMB */ | ||
| 87 | #define OPC_OUB_ECHO 1 /* 0x001 */ | ||
| 88 | #define OPC_OUB_HW_EVENT 4 /* 0x004 */ | ||
| 89 | #define OPC_OUB_SSP_COMP 5 /* 0x005 */ | ||
| 90 | #define OPC_OUB_SMP_COMP 6 /* 0x006 */ | ||
| 91 | #define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ | ||
| 92 | #define OPC_OUB_DEV_REGIST 10 /* 0x00A */ | ||
| 93 | #define OPC_OUB_DEREG_DEV 11 /* 0x00B */ | ||
| 94 | #define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ | ||
| 95 | #define OPC_OUB_SATA_COMP 13 /* 0x00D */ | ||
| 96 | #define OPC_OUB_SATA_EVENT 14 /* 0x00E */ | ||
| 97 | #define OPC_OUB_SSP_EVENT 15 /* 0x00F */ | ||
| 98 | #define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */ | ||
| 99 | /* SMP_RECEIVED Notification is removed */ | ||
| 100 | #define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */ | ||
| 101 | #define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ | ||
| 102 | #define OPC_OUB_DEV_INFO 19 /* 0x013 */ | ||
| 103 | #define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ | ||
| 104 | #define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ | ||
| 105 | #define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ | ||
| 106 | #define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ | ||
| 107 | #define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ | ||
| 108 | #define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ | ||
| 109 | #define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ | ||
| 110 | #define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ | ||
| 111 | #define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ | ||
| 112 | #define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */ | ||
| 113 | #define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ | ||
| 114 | #define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ | ||
| 115 | #define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ | ||
| 116 | #define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ | ||
| 117 | #define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ | ||
| 118 | #define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ | ||
| 119 | #define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ | ||
| 120 | #define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ | ||
| 121 | #define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ | ||
| 122 | #define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */ | ||
| 123 | |||
| 124 | /* for phy start*/ | ||
| 125 | #define SPINHOLD_DISABLE (0x00 << 14) | ||
| 126 | #define SPINHOLD_ENABLE (0x01 << 14) | ||
| 127 | #define LINKMODE_SAS (0x01 << 12) | ||
| 128 | #define LINKMODE_DSATA (0x02 << 12) | ||
| 129 | #define LINKMODE_AUTO (0x03 << 12) | ||
| 130 | #define LINKRATE_15 (0x01 << 8) | ||
| 131 | #define LINKRATE_30 (0x02 << 8) | ||
| 132 | #define LINKRATE_60 (0x04 << 8) | ||
| 133 | |||
| 134 | struct mpi_msg_hdr{ | ||
| 135 | __le32 header; /* Bits [11:0] - Message operation code */ | ||
| 136 | /* Bits [15:12] - Message Category */ | ||
| 137 | /* Bits [21:16] - Outboundqueue ID for the | ||
| 138 | operation completion message */ | ||
| 139 | /* Bits [23:22] - Reserved */ | ||
| 140 | /* Bits [28:24] - Buffer Count, indicates how | ||
| 141 | many buffer are allocated for the massage */ | ||
| 142 | /* Bits [30:29] - Reserved */ | ||
| 143 | /* Bits [31] - Message Valid bit */ | ||
| 144 | } __attribute__((packed, aligned(4))); | ||
| 145 | |||
| 146 | |||
| 147 | /* | ||
| 148 | * brief the data structure of PHY Start Command | ||
| 149 | * use to describe enable the phy (64 bytes) | ||
| 150 | */ | ||
| 151 | struct phy_start_req { | ||
| 152 | __le32 tag; | ||
| 153 | __le32 ase_sh_lm_slr_phyid; | ||
| 154 | struct sas_identify_frame sas_identify; | ||
| 155 | u32 reserved[5]; | ||
| 156 | } __attribute__((packed, aligned(4))); | ||
| 157 | |||
| 158 | |||
| 159 | /* | ||
| 160 | * brief the data structure of PHY Start Command | ||
| 161 | * use to disable the phy (64 bytes) | ||
| 162 | */ | ||
| 163 | struct phy_stop_req { | ||
| 164 | __le32 tag; | ||
| 165 | __le32 phy_id; | ||
| 166 | u32 reserved[13]; | ||
| 167 | } __attribute__((packed, aligned(4))); | ||
| 168 | |||
| 169 | |||
| 170 | /* set device bits fis - device to host */ | ||
| 171 | struct set_dev_bits_fis { | ||
| 172 | u8 fis_type; /* 0xA1*/ | ||
| 173 | u8 n_i_pmport; | ||
| 174 | /* b7 : n Bit. Notification bit. If set device needs attention. */ | ||
| 175 | /* b6 : i Bit. Interrupt Bit */ | ||
| 176 | /* b5-b4: reserved2 */ | ||
| 177 | /* b3-b0: PM Port */ | ||
| 178 | u8 status; | ||
| 179 | u8 error; | ||
| 180 | u32 _r_a; | ||
| 181 | } __attribute__ ((packed)); | ||
| 182 | /* PIO setup FIS - device to host */ | ||
| 183 | struct pio_setup_fis { | ||
| 184 | u8 fis_type; /* 0x5f */ | ||
| 185 | u8 i_d_pmPort; | ||
| 186 | /* b7 : reserved */ | ||
| 187 | /* b6 : i bit. Interrupt bit */ | ||
| 188 | /* b5 : d bit. data transfer direction. set to 1 for device to host | ||
| 189 | xfer */ | ||
| 190 | /* b4 : reserved */ | ||
| 191 | /* b3-b0: PM Port */ | ||
| 192 | u8 status; | ||
| 193 | u8 error; | ||
| 194 | u8 lbal; | ||
| 195 | u8 lbam; | ||
| 196 | u8 lbah; | ||
| 197 | u8 device; | ||
| 198 | u8 lbal_exp; | ||
| 199 | u8 lbam_exp; | ||
| 200 | u8 lbah_exp; | ||
| 201 | u8 _r_a; | ||
| 202 | u8 sector_count; | ||
| 203 | u8 sector_count_exp; | ||
| 204 | u8 _r_b; | ||
| 205 | u8 e_status; | ||
| 206 | u8 _r_c[2]; | ||
| 207 | u8 transfer_count; | ||
| 208 | } __attribute__ ((packed)); | ||
| 209 | |||
| 210 | /* | ||
| 211 | * brief the data structure of SATA Completion Response | ||
| 212 | * use to discribe the sata task response (64 bytes) | ||
| 213 | */ | ||
| 214 | struct sata_completion_resp { | ||
| 215 | __le32 tag; | ||
| 216 | __le32 status; | ||
| 217 | __le32 param; | ||
| 218 | u32 sata_resp[12]; | ||
| 219 | } __attribute__((packed, aligned(4))); | ||
| 220 | |||
| 221 | |||
| 222 | /* | ||
| 223 | * brief the data structure of SAS HW Event Notification | ||
| 224 | * use to alert the host about the hardware event(64 bytes) | ||
| 225 | */ | ||
| 226 | struct hw_event_resp { | ||
| 227 | __le32 lr_evt_status_phyid_portid; | ||
| 228 | __le32 evt_param; | ||
| 229 | __le32 npip_portstate; | ||
| 230 | struct sas_identify_frame sas_identify; | ||
| 231 | struct dev_to_host_fis sata_fis; | ||
| 232 | } __attribute__((packed, aligned(4))); | ||
| 233 | |||
| 234 | |||
| 235 | /* | ||
| 236 | * brief the data structure of REGISTER DEVICE Command | ||
| 237 | * use to describe MPI REGISTER DEVICE Command (64 bytes) | ||
| 238 | */ | ||
| 239 | |||
| 240 | struct reg_dev_req { | ||
| 241 | __le32 tag; | ||
| 242 | __le32 phyid_portid; | ||
| 243 | __le32 dtype_dlr_retry; | ||
| 244 | __le32 firstburstsize_ITNexustimeout; | ||
| 245 | u32 sas_addr_hi; | ||
| 246 | u32 sas_addr_low; | ||
| 247 | __le32 upper_device_id; | ||
| 248 | u32 reserved[8]; | ||
| 249 | } __attribute__((packed, aligned(4))); | ||
| 250 | |||
| 251 | |||
| 252 | /* | ||
| 253 | * brief the data structure of DEREGISTER DEVICE Command | ||
| 254 | * use to request spc to remove all internal resources associated | ||
| 255 | * with the device id (64 bytes) | ||
| 256 | */ | ||
| 257 | |||
| 258 | struct dereg_dev_req { | ||
| 259 | __le32 tag; | ||
| 260 | __le32 device_id; | ||
| 261 | u32 reserved[13]; | ||
| 262 | } __attribute__((packed, aligned(4))); | ||
| 263 | |||
| 264 | |||
| 265 | /* | ||
| 266 | * brief the data structure of DEVICE_REGISTRATION Response | ||
| 267 | * use to notify the completion of the device registration (64 bytes) | ||
| 268 | */ | ||
| 269 | |||
| 270 | struct dev_reg_resp { | ||
| 271 | __le32 tag; | ||
| 272 | __le32 status; | ||
| 273 | __le32 device_id; | ||
| 274 | u32 reserved[12]; | ||
| 275 | } __attribute__((packed, aligned(4))); | ||
| 276 | |||
| 277 | |||
| 278 | /* | ||
| 279 | * brief the data structure of Local PHY Control Command | ||
| 280 | * use to issue PHY CONTROL to local phy (64 bytes) | ||
| 281 | */ | ||
| 282 | struct local_phy_ctl_req { | ||
| 283 | __le32 tag; | ||
| 284 | __le32 phyop_phyid; | ||
| 285 | u32 reserved1[13]; | ||
| 286 | } __attribute__((packed, aligned(4))); | ||
| 287 | |||
| 288 | |||
| 289 | /** | ||
| 290 | * brief the data structure of Local Phy Control Response | ||
| 291 | * use to describe MPI Local Phy Control Response (64 bytes) | ||
| 292 | */ | ||
| 293 | struct local_phy_ctl_resp { | ||
| 294 | __le32 tag; | ||
| 295 | __le32 phyop_phyid; | ||
| 296 | __le32 status; | ||
| 297 | u32 reserved[12]; | ||
| 298 | } __attribute__((packed, aligned(4))); | ||
| 299 | |||
| 300 | |||
| 301 | #define OP_BITS 0x0000FF00 | ||
| 302 | #define ID_BITS 0x0000000F | ||
| 303 | |||
| 304 | /* | ||
| 305 | * brief the data structure of PORT Control Command | ||
| 306 | * use to control port properties (64 bytes) | ||
| 307 | */ | ||
| 308 | |||
| 309 | struct port_ctl_req { | ||
| 310 | __le32 tag; | ||
| 311 | __le32 portop_portid; | ||
| 312 | __le32 param0; | ||
| 313 | __le32 param1; | ||
| 314 | u32 reserved1[11]; | ||
| 315 | } __attribute__((packed, aligned(4))); | ||
| 316 | |||
| 317 | |||
| 318 | /* | ||
| 319 | * brief the data structure of HW Event Ack Command | ||
| 320 | * use to acknowledge receive HW event (64 bytes) | ||
| 321 | */ | ||
| 322 | |||
| 323 | struct hw_event_ack_req { | ||
| 324 | __le32 tag; | ||
| 325 | __le32 sea_phyid_portid; | ||
| 326 | __le32 param0; | ||
| 327 | __le32 param1; | ||
| 328 | u32 reserved1[11]; | ||
| 329 | } __attribute__((packed, aligned(4))); | ||
| 330 | |||
| 331 | |||
| 332 | /* | ||
| 333 | * brief the data structure of SSP Completion Response | ||
| 334 | * use to indicate a SSP Completion (n bytes) | ||
| 335 | */ | ||
| 336 | struct ssp_completion_resp { | ||
| 337 | __le32 tag; | ||
| 338 | __le32 status; | ||
| 339 | __le32 param; | ||
| 340 | __le32 ssptag_rescv_rescpad; | ||
| 341 | struct ssp_response_iu ssp_resp_iu; | ||
| 342 | __le32 residual_count; | ||
| 343 | } __attribute__((packed, aligned(4))); | ||
| 344 | |||
| 345 | |||
| 346 | #define SSP_RESCV_BIT 0x00010000 | ||
| 347 | |||
| 348 | /* | ||
| 349 | * brief the data structure of SATA EVNET esponse | ||
| 350 | * use to indicate a SATA Completion (64 bytes) | ||
| 351 | */ | ||
| 352 | |||
| 353 | struct sata_event_resp { | ||
| 354 | __le32 tag; | ||
| 355 | __le32 event; | ||
| 356 | __le32 port_id; | ||
| 357 | __le32 device_id; | ||
| 358 | u32 reserved[11]; | ||
| 359 | } __attribute__((packed, aligned(4))); | ||
| 360 | |||
| 361 | /* | ||
| 362 | * brief the data structure of SSP EVNET esponse | ||
| 363 | * use to indicate a SSP Completion (64 bytes) | ||
| 364 | */ | ||
| 365 | |||
| 366 | struct ssp_event_resp { | ||
| 367 | __le32 tag; | ||
| 368 | __le32 event; | ||
| 369 | __le32 port_id; | ||
| 370 | __le32 device_id; | ||
| 371 | u32 reserved[11]; | ||
| 372 | } __attribute__((packed, aligned(4))); | ||
| 373 | |||
| 374 | /** | ||
| 375 | * brief the data structure of General Event Notification Response | ||
| 376 | * use to describe MPI General Event Notification Response (64 bytes) | ||
| 377 | */ | ||
| 378 | struct general_event_resp { | ||
| 379 | __le32 status; | ||
| 380 | __le32 inb_IOMB_payload[14]; | ||
| 381 | } __attribute__((packed, aligned(4))); | ||
| 382 | |||
| 383 | |||
| 384 | #define GENERAL_EVENT_PAYLOAD 14 | ||
| 385 | #define OPCODE_BITS 0x00000fff | ||
| 386 | |||
| 387 | /* | ||
| 388 | * brief the data structure of SMP Request Command | ||
| 389 | * use to describe MPI SMP REQUEST Command (64 bytes) | ||
| 390 | */ | ||
| 391 | struct smp_req { | ||
| 392 | __le32 tag; | ||
| 393 | __le32 device_id; | ||
| 394 | __le32 len_ip_ir; | ||
| 395 | /* Bits [0] - Indirect response */ | ||
| 396 | /* Bits [1] - Indirect Payload */ | ||
| 397 | /* Bits [15:2] - Reserved */ | ||
| 398 | /* Bits [23:16] - direct payload Len */ | ||
| 399 | /* Bits [31:24] - Reserved */ | ||
| 400 | u8 smp_req16[16]; | ||
| 401 | union { | ||
| 402 | u8 smp_req[32]; | ||
| 403 | struct { | ||
| 404 | __le64 long_req_addr;/* sg dma address, LE */ | ||
| 405 | __le32 long_req_size;/* LE */ | ||
| 406 | u32 _r_a; | ||
| 407 | __le64 long_resp_addr;/* sg dma address, LE */ | ||
| 408 | __le32 long_resp_size;/* LE */ | ||
| 409 | u32 _r_b; | ||
| 410 | } long_smp_req;/* sequencer extension */ | ||
| 411 | }; | ||
| 412 | } __attribute__((packed, aligned(4))); | ||
| 413 | /* | ||
| 414 | * brief the data structure of SMP Completion Response | ||
| 415 | * use to describe MPI SMP Completion Response (64 bytes) | ||
| 416 | */ | ||
| 417 | struct smp_completion_resp { | ||
| 418 | __le32 tag; | ||
| 419 | __le32 status; | ||
| 420 | __le32 param; | ||
| 421 | __le32 _r_a[12]; | ||
| 422 | } __attribute__((packed, aligned(4))); | ||
| 423 | |||
| 424 | /* | ||
| 425 | *brief the data structure of SSP SMP SATA Abort Command | ||
| 426 | * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) | ||
| 427 | */ | ||
| 428 | struct task_abort_req { | ||
| 429 | __le32 tag; | ||
| 430 | __le32 device_id; | ||
| 431 | __le32 tag_to_abort; | ||
| 432 | __le32 abort_all; | ||
| 433 | u32 reserved[11]; | ||
| 434 | } __attribute__((packed, aligned(4))); | ||
| 435 | |||
| 436 | /* These flags used for SSP SMP & SATA Abort */ | ||
| 437 | #define ABORT_MASK 0x3 | ||
| 438 | #define ABORT_SINGLE 0x0 | ||
| 439 | #define ABORT_ALL 0x1 | ||
| 440 | |||
| 441 | /** | ||
| 442 | * brief the data structure of SSP SATA SMP Abort Response | ||
| 443 | * use to describe SSP SMP & SATA Abort Response ( 64 bytes) | ||
| 444 | */ | ||
| 445 | struct task_abort_resp { | ||
| 446 | __le32 tag; | ||
| 447 | __le32 status; | ||
| 448 | __le32 scp; | ||
| 449 | u32 reserved[12]; | ||
| 450 | } __attribute__((packed, aligned(4))); | ||
| 451 | |||
| 452 | |||
| 453 | /** | ||
| 454 | * brief the data structure of SAS Diagnostic Start/End Command | ||
| 455 | * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) | ||
| 456 | */ | ||
| 457 | struct sas_diag_start_end_req { | ||
| 458 | __le32 tag; | ||
| 459 | __le32 operation_phyid; | ||
| 460 | u32 reserved[13]; | ||
| 461 | } __attribute__((packed, aligned(4))); | ||
| 462 | |||
| 463 | |||
| 464 | /** | ||
| 465 | * brief the data structure of SAS Diagnostic Execute Command | ||
| 466 | * use to describe MPI SAS Diagnostic Execute Command (64 bytes) | ||
| 467 | */ | ||
| 468 | struct sas_diag_execute_req{ | ||
| 469 | __le32 tag; | ||
| 470 | __le32 cmdtype_cmddesc_phyid; | ||
| 471 | __le32 pat1_pat2; | ||
| 472 | __le32 threshold; | ||
| 473 | __le32 codepat_errmsk; | ||
| 474 | __le32 pmon; | ||
| 475 | __le32 pERF1CTL; | ||
| 476 | u32 reserved[8]; | ||
| 477 | } __attribute__((packed, aligned(4))); | ||
| 478 | |||
| 479 | |||
| 480 | #define SAS_DIAG_PARAM_BYTES 24 | ||
| 481 | |||
| 482 | /* | ||
| 483 | * brief the data structure of Set Device State Command | ||
| 484 | * use to describe MPI Set Device State Command (64 bytes) | ||
| 485 | */ | ||
| 486 | struct set_dev_state_req { | ||
| 487 | __le32 tag; | ||
| 488 | __le32 device_id; | ||
| 489 | __le32 nds; | ||
| 490 | u32 reserved[12]; | ||
| 491 | } __attribute__((packed, aligned(4))); | ||
| 492 | |||
| 493 | /* | ||
| 494 | * brief the data structure of sas_re_initialization | ||
| 495 | */ | ||
| 496 | struct sas_re_initialization_req { | ||
| 497 | |||
| 498 | __le32 tag; | ||
| 499 | __le32 SSAHOLT;/* bit29-set max port; | ||
| 500 | ** bit28-set open reject cmd retries. | ||
| 501 | ** bit27-set open reject data retries. | ||
| 502 | ** bit26-set open reject option, remap:1 or not:0. | ||
| 503 | ** bit25-set sata head of line time out. | ||
| 504 | */ | ||
| 505 | __le32 reserved_maxPorts; | ||
| 506 | __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16; | ||
| 507 | * data retries: bit15-bit0. | ||
| 508 | */ | ||
| 509 | __le32 sata_hol_tmo; | ||
| 510 | u32 reserved1[10]; | ||
| 511 | } __attribute__((packed, aligned(4))); | ||
| 512 | |||
| 513 | /* | ||
| 514 | * brief the data structure of SATA Start Command | ||
| 515 | * use to describe MPI SATA IO Start Command (64 bytes) | ||
| 516 | */ | ||
| 517 | |||
| 518 | struct sata_start_req { | ||
| 519 | __le32 tag; | ||
| 520 | __le32 device_id; | ||
| 521 | __le32 data_len; | ||
| 522 | __le32 ncqtag_atap_dir_m; | ||
| 523 | struct host_to_dev_fis sata_fis; | ||
| 524 | u32 reserved1; | ||
| 525 | u32 reserved2; | ||
| 526 | u32 addr_low; | ||
| 527 | u32 addr_high; | ||
| 528 | __le32 len; | ||
| 529 | __le32 esgl; | ||
| 530 | } __attribute__((packed, aligned(4))); | ||
| 531 | |||
| 532 | /** | ||
| 533 | * brief the data structure of SSP INI TM Start Command | ||
| 534 | * use to describe MPI SSP INI TM Start Command (64 bytes) | ||
| 535 | */ | ||
| 536 | struct ssp_ini_tm_start_req { | ||
| 537 | __le32 tag; | ||
| 538 | __le32 device_id; | ||
| 539 | __le32 relate_tag; | ||
| 540 | __le32 tmf; | ||
| 541 | u8 lun[8]; | ||
| 542 | __le32 ds_ads_m; | ||
| 543 | u32 reserved[8]; | ||
| 544 | } __attribute__((packed, aligned(4))); | ||
| 545 | |||
| 546 | |||
| 547 | struct ssp_info_unit { | ||
| 548 | u8 lun[8];/* SCSI Logical Unit Number */ | ||
| 549 | u8 reserved1;/* reserved */ | ||
| 550 | u8 efb_prio_attr; | ||
| 551 | /* B7 : enabledFirstBurst */ | ||
| 552 | /* B6-3 : taskPriority */ | ||
| 553 | /* B2-0 : taskAttribute */ | ||
| 554 | u8 reserved2; /* reserved */ | ||
| 555 | u8 additional_cdb_len; | ||
| 556 | /* B7-2 : additional_cdb_len */ | ||
| 557 | /* B1-0 : reserved */ | ||
| 558 | u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ | ||
| 559 | } __attribute__((packed, aligned(4))); | ||
| 560 | |||
| 561 | |||
| 562 | /** | ||
| 563 | * brief the data structure of SSP INI IO Start Command | ||
| 564 | * use to describe MPI SSP INI IO Start Command (64 bytes) | ||
| 565 | */ | ||
| 566 | struct ssp_ini_io_start_req { | ||
| 567 | __le32 tag; | ||
| 568 | __le32 device_id; | ||
| 569 | __le32 data_len; | ||
| 570 | __le32 dir_m_tlr; | ||
| 571 | struct ssp_info_unit ssp_iu; | ||
| 572 | __le32 addr_low; | ||
| 573 | __le32 addr_high; | ||
| 574 | __le32 len; | ||
| 575 | __le32 esgl; | ||
| 576 | } __attribute__((packed, aligned(4))); | ||
| 577 | |||
| 578 | |||
| 579 | /** | ||
| 580 | * brief the data structure of Firmware download | ||
| 581 | * use to describe MPI FW DOWNLOAD Command (64 bytes) | ||
| 582 | */ | ||
| 583 | struct fw_flash_Update_req { | ||
| 584 | __le32 tag; | ||
| 585 | __le32 cur_image_offset; | ||
| 586 | __le32 cur_image_len; | ||
| 587 | __le32 total_image_len; | ||
| 588 | u32 reserved0[7]; | ||
| 589 | __le32 sgl_addr_lo; | ||
| 590 | __le32 sgl_addr_hi; | ||
| 591 | __le32 len; | ||
| 592 | __le32 ext_reserved; | ||
| 593 | } __attribute__((packed, aligned(4))); | ||
| 594 | |||
| 595 | |||
| 596 | #define FWFLASH_IOMB_RESERVED_LEN 0x07 | ||
| 597 | /** | ||
| 598 | * brief the data structure of FW_FLASH_UPDATE Response | ||
| 599 | * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) | ||
| 600 | * | ||
| 601 | */ | ||
| 602 | struct fw_flash_Update_resp { | ||
| 603 | dma_addr_t tag; | ||
| 604 | __le32 status; | ||
| 605 | u32 reserved[13]; | ||
| 606 | } __attribute__((packed, aligned(4))); | ||
| 607 | |||
| 608 | |||
| 609 | /** | ||
| 610 | * brief the data structure of Get NVM Data Command | ||
| 611 | * use to get data from NVM in HBA(64 bytes) | ||
| 612 | */ | ||
| 613 | struct get_nvm_data_req { | ||
| 614 | __le32 tag; | ||
| 615 | __le32 len_ir_vpdd; | ||
| 616 | __le32 vpd_offset; | ||
| 617 | u32 reserved[8]; | ||
| 618 | __le32 resp_addr_lo; | ||
| 619 | __le32 resp_addr_hi; | ||
| 620 | __le32 resp_len; | ||
| 621 | u32 reserved1; | ||
| 622 | } __attribute__((packed, aligned(4))); | ||
| 623 | |||
| 624 | |||
| 625 | struct set_nvm_data_req { | ||
| 626 | __le32 tag; | ||
| 627 | __le32 len_ir_vpdd; | ||
| 628 | __le32 vpd_offset; | ||
| 629 | u32 reserved[8]; | ||
| 630 | __le32 resp_addr_lo; | ||
| 631 | __le32 resp_addr_hi; | ||
| 632 | __le32 resp_len; | ||
| 633 | u32 reserved1; | ||
| 634 | } __attribute__((packed, aligned(4))); | ||
| 635 | |||
| 636 | |||
| 637 | #define TWI_DEVICE 0x0 | ||
| 638 | #define C_SEEPROM 0x1 | ||
| 639 | #define VPD_FLASH 0x4 | ||
| 640 | #define AAP1_RDUMP 0x5 | ||
| 641 | #define IOP_RDUMP 0x6 | ||
| 642 | #define EXPAN_ROM 0x7 | ||
| 643 | |||
| 644 | #define IPMode 0x80000000 | ||
| 645 | #define NVMD_TYPE 0x0000000F | ||
| 646 | #define NVMD_STAT 0x0000FFFF | ||
| 647 | #define NVMD_LEN 0xFF000000 | ||
| 648 | /** | ||
| 649 | * brief the data structure of Get NVMD Data Response | ||
| 650 | * use to describe MPI Get NVMD Data Response (64 bytes) | ||
| 651 | */ | ||
| 652 | struct get_nvm_data_resp { | ||
| 653 | __le32 tag; | ||
| 654 | __le32 ir_tda_bn_dps_das_nvm; | ||
| 655 | __le32 dlen_status; | ||
| 656 | __le32 nvm_data[12]; | ||
| 657 | } __attribute__((packed, aligned(4))); | ||
| 658 | |||
| 659 | |||
| 660 | /** | ||
| 661 | * brief the data structure of SAS Diagnostic Start/End Response | ||
| 662 | * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) | ||
| 663 | * | ||
| 664 | */ | ||
| 665 | struct sas_diag_start_end_resp { | ||
| 666 | __le32 tag; | ||
| 667 | __le32 status; | ||
| 668 | u32 reserved[13]; | ||
| 669 | } __attribute__((packed, aligned(4))); | ||
| 670 | |||
| 671 | |||
| 672 | /** | ||
| 673 | * brief the data structure of SAS Diagnostic Execute Response | ||
| 674 | * use to describe MPI SAS Diagnostic Execute Response (64 bytes) | ||
| 675 | * | ||
| 676 | */ | ||
| 677 | struct sas_diag_execute_resp { | ||
| 678 | __le32 tag; | ||
| 679 | __le32 cmdtype_cmddesc_phyid; | ||
| 680 | __le32 Status; | ||
| 681 | __le32 ReportData; | ||
| 682 | u32 reserved[11]; | ||
| 683 | } __attribute__((packed, aligned(4))); | ||
| 684 | |||
| 685 | |||
| 686 | /** | ||
| 687 | * brief the data structure of Set Device State Response | ||
| 688 | * use to describe MPI Set Device State Response (64 bytes) | ||
| 689 | * | ||
| 690 | */ | ||
| 691 | struct set_dev_state_resp { | ||
| 692 | __le32 tag; | ||
| 693 | __le32 status; | ||
| 694 | __le32 device_id; | ||
| 695 | __le32 pds_nds; | ||
| 696 | u32 reserved[11]; | ||
| 697 | } __attribute__((packed, aligned(4))); | ||
| 698 | |||
| 699 | |||
| 700 | #define NDS_BITS 0x0F | ||
| 701 | #define PDS_BITS 0xF0 | ||
| 702 | |||
| 703 | /* | ||
| 704 | * HW Events type | ||
| 705 | */ | ||
| 706 | |||
| 707 | #define HW_EVENT_RESET_START 0x01 | ||
| 708 | #define HW_EVENT_CHIP_RESET_COMPLETE 0x02 | ||
| 709 | #define HW_EVENT_PHY_STOP_STATUS 0x03 | ||
| 710 | #define HW_EVENT_SAS_PHY_UP 0x04 | ||
| 711 | #define HW_EVENT_SATA_PHY_UP 0x05 | ||
| 712 | #define HW_EVENT_SATA_SPINUP_HOLD 0x06 | ||
| 713 | #define HW_EVENT_PHY_DOWN 0x07 | ||
| 714 | #define HW_EVENT_PORT_INVALID 0x08 | ||
| 715 | #define HW_EVENT_BROADCAST_CHANGE 0x09 | ||
| 716 | #define HW_EVENT_PHY_ERROR 0x0A | ||
| 717 | #define HW_EVENT_BROADCAST_SES 0x0B | ||
| 718 | #define HW_EVENT_INBOUND_CRC_ERROR 0x0C | ||
| 719 | #define HW_EVENT_HARD_RESET_RECEIVED 0x0D | ||
| 720 | #define HW_EVENT_MALFUNCTION 0x0E | ||
| 721 | #define HW_EVENT_ID_FRAME_TIMEOUT 0x0F | ||
| 722 | #define HW_EVENT_BROADCAST_EXP 0x10 | ||
| 723 | #define HW_EVENT_PHY_START_STATUS 0x11 | ||
| 724 | #define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 | ||
| 725 | #define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 | ||
| 726 | #define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 | ||
| 727 | #define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 | ||
| 728 | #define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 | ||
| 729 | #define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 | ||
| 730 | #define HW_EVENT_PORT_RECOVER 0x18 | ||
| 731 | #define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 | ||
| 732 | #define HW_EVENT_PORT_RESET_COMPLETE 0x20 | ||
| 733 | #define EVENT_BROADCAST_ASYNCH_EVENT 0x21 | ||
| 734 | |||
| 735 | /* port state */ | ||
| 736 | #define PORT_NOT_ESTABLISHED 0x00 | ||
| 737 | #define PORT_VALID 0x01 | ||
| 738 | #define PORT_LOSTCOMM 0x02 | ||
| 739 | #define PORT_IN_RESET 0x04 | ||
| 740 | #define PORT_INVALID 0x08 | ||
| 741 | |||
| 742 | /* | ||
| 743 | * SSP/SMP/SATA IO Completion Status values | ||
| 744 | */ | ||
| 745 | |||
| 746 | #define IO_SUCCESS 0x00 | ||
| 747 | #define IO_ABORTED 0x01 | ||
| 748 | #define IO_OVERFLOW 0x02 | ||
| 749 | #define IO_UNDERFLOW 0x03 | ||
| 750 | #define IO_FAILED 0x04 | ||
| 751 | #define IO_ABORT_RESET 0x05 | ||
| 752 | #define IO_NOT_VALID 0x06 | ||
| 753 | #define IO_NO_DEVICE 0x07 | ||
| 754 | #define IO_ILLEGAL_PARAMETER 0x08 | ||
| 755 | #define IO_LINK_FAILURE 0x09 | ||
| 756 | #define IO_PROG_ERROR 0x0A | ||
| 757 | #define IO_EDC_IN_ERROR 0x0B | ||
| 758 | #define IO_EDC_OUT_ERROR 0x0C | ||
| 759 | #define IO_ERROR_HW_TIMEOUT 0x0D | ||
| 760 | #define IO_XFER_ERROR_BREAK 0x0E | ||
| 761 | #define IO_XFER_ERROR_PHY_NOT_READY 0x0F | ||
| 762 | #define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 | ||
| 763 | #define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 | ||
| 764 | #define IO_OPEN_CNX_ERROR_BREAK 0x12 | ||
| 765 | #define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 | ||
| 766 | #define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 | ||
| 767 | #define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 | ||
| 768 | #define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 | ||
| 769 | #define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 | ||
| 770 | #define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 | ||
| 771 | #define IO_XFER_ERROR_NAK_RECEIVED 0x19 | ||
| 772 | #define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A | ||
| 773 | #define IO_XFER_ERROR_PEER_ABORTED 0x1B | ||
| 774 | #define IO_XFER_ERROR_RX_FRAME 0x1C | ||
| 775 | #define IO_XFER_ERROR_DMA 0x1D | ||
| 776 | #define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E | ||
| 777 | #define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F | ||
| 778 | #define IO_XFER_ERROR_SATA 0x20 | ||
| 779 | #define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 | ||
| 780 | #define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 | ||
| 781 | #define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 | ||
| 782 | #define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 | ||
| 783 | #define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 | ||
| 784 | #define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 | ||
| 785 | #define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 | ||
| 786 | #define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 | ||
| 787 | |||
| 788 | #define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 | ||
| 789 | #define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 | ||
| 790 | #define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 | ||
| 791 | |||
| 792 | #define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 | ||
| 793 | #define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 | ||
| 794 | #define IO_XFER_CMD_FRAME_ISSUED 0x36 | ||
| 795 | #define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 | ||
| 796 | #define IO_PORT_IN_RESET 0x38 | ||
| 797 | #define IO_DS_NON_OPERATIONAL 0x39 | ||
| 798 | #define IO_DS_IN_RECOVERY 0x3A | ||
| 799 | #define IO_TM_TAG_NOT_FOUND 0x3B | ||
| 800 | #define IO_XFER_PIO_SETUP_ERROR 0x3C | ||
| 801 | #define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D | ||
| 802 | #define IO_DS_IN_ERROR 0x3E | ||
| 803 | #define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F | ||
| 804 | #define IO_ABORT_IN_PROGRESS 0x40 | ||
| 805 | #define IO_ABORT_DELAYED 0x41 | ||
| 806 | #define IO_INVALID_LENGTH 0x42 | ||
| 807 | |||
| 808 | /* WARNING: This error code must always be the last number. | ||
| 809 | * If you add error code, modify this code also | ||
| 810 | * It is used as an index | ||
| 811 | */ | ||
| 812 | #define IO_ERROR_UNKNOWN_GENERIC 0x43 | ||
| 813 | |||
| 814 | /* MSGU CONFIGURATION TABLE*/ | ||
| 815 | |||
| 816 | #define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */ | ||
| 817 | #define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */ | ||
| 818 | #define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */ | ||
| 819 | #define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */ | ||
| 820 | #define MSGU_IBDB_SET 0x04 | ||
| 821 | #define MSGU_HOST_INT_STATUS 0x08 | ||
| 822 | #define MSGU_HOST_INT_MASK 0x0C | ||
| 823 | #define MSGU_IOPIB_INT_STATUS 0x18 | ||
| 824 | #define MSGU_IOPIB_INT_MASK 0x1C | ||
| 825 | #define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */ | ||
| 826 | #define MSGU_MSGU_CONTROL 0x24 | ||
| 827 | #define MSGU_ODR 0x3C/* RevB */ | ||
| 828 | #define MSGU_ODCR 0x40/* RevB */ | ||
| 829 | #define MSGU_SCRATCH_PAD_0 0x44 | ||
| 830 | #define MSGU_SCRATCH_PAD_1 0x48 | ||
| 831 | #define MSGU_SCRATCH_PAD_2 0x4C | ||
| 832 | #define MSGU_SCRATCH_PAD_3 0x50 | ||
| 833 | #define MSGU_HOST_SCRATCH_PAD_0 0x54 | ||
| 834 | #define MSGU_HOST_SCRATCH_PAD_1 0x58 | ||
| 835 | #define MSGU_HOST_SCRATCH_PAD_2 0x5C | ||
| 836 | #define MSGU_HOST_SCRATCH_PAD_3 0x60 | ||
| 837 | #define MSGU_HOST_SCRATCH_PAD_4 0x64 | ||
| 838 | #define MSGU_HOST_SCRATCH_PAD_5 0x68 | ||
| 839 | #define MSGU_HOST_SCRATCH_PAD_6 0x6C | ||
| 840 | #define MSGU_HOST_SCRATCH_PAD_7 0x70 | ||
| 841 | #define MSGU_ODMR 0x74/* RevB */ | ||
| 842 | |||
| 843 | /* bit definition for ODMR register */ | ||
| 844 | #define ODMR_MASK_ALL 0xFFFFFFFF/* mask all | ||
| 845 | interrupt vector */ | ||
| 846 | #define ODMR_CLEAR_ALL 0/* clear all | ||
| 847 | interrupt vector */ | ||
| 848 | /* bit definition for ODCR register */ | ||
| 849 | #define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all | ||
| 850 | interrupt vector*/ | ||
| 851 | /* MSIX Interupts */ | ||
| 852 | #define MSIX_TABLE_OFFSET 0x2000 | ||
| 853 | #define MSIX_TABLE_ELEMENT_SIZE 0x10 | ||
| 854 | #define MSIX_INTERRUPT_CONTROL_OFFSET 0xC | ||
| 855 | #define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET) | ||
| 856 | #define MSIX_INTERRUPT_DISABLE 0x1 | ||
| 857 | #define MSIX_INTERRUPT_ENABLE 0x0 | ||
| 858 | |||
| 859 | |||
| 860 | /* state definition for Scratch Pad1 register */ | ||
| 861 | #define SCRATCH_PAD1_POR 0x00 /* power on reset state */ | ||
| 862 | #define SCRATCH_PAD1_SFR 0x01 /* soft reset state */ | ||
| 863 | #define SCRATCH_PAD1_ERR 0x02 /* error state */ | ||
| 864 | #define SCRATCH_PAD1_RDY 0x03 /* ready state */ | ||
| 865 | #define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */ | ||
| 866 | #define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */ | ||
| 867 | #define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1 | ||
| 868 | Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */ | ||
| 869 | #define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1 | ||
| 870 | Reserved bit 3 to 9 */ | ||
| 871 | |||
| 872 | /* state definition for Scratch Pad2 register */ | ||
| 873 | #define SCRATCH_PAD2_POR 0x00 /* power on state */ | ||
| 874 | #define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ | ||
| 875 | #define SCRATCH_PAD2_ERR 0x02 /* error state */ | ||
| 876 | #define SCRATCH_PAD2_RDY 0x03 /* ready state */ | ||
| 877 | #define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/ | ||
| 878 | #define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ | ||
| 879 | #define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 | ||
| 880 | Mask, bit1-0 State */ | ||
| 881 | #define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1 | ||
| 882 | Reserved bit 2 to 9 */ | ||
| 883 | |||
| 884 | #define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ | ||
| 885 | #define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ | ||
| 886 | |||
| 887 | /* main configuration offset - byte offset */ | ||
| 888 | #define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */ | ||
| 889 | #define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */ | ||
| 890 | #define MAIN_FW_REVISION 0x08/* DWORD 0x02 */ | ||
| 891 | #define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */ | ||
| 892 | #define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */ | ||
| 893 | #define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */ | ||
| 894 | #define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */ | ||
| 895 | #define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */ | ||
| 896 | #define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */ | ||
| 897 | #define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */ | ||
| 898 | #define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */ | ||
| 899 | #define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */ | ||
| 900 | #define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */ | ||
| 901 | #define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */ | ||
| 902 | #define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */ | ||
| 903 | #define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */ | ||
| 904 | #define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */ | ||
| 905 | #define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */ | ||
| 906 | #define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */ | ||
| 907 | #define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */ | ||
| 908 | #define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */ | ||
| 909 | #define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */ | ||
| 910 | #define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */ | ||
| 911 | #define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */ | ||
| 912 | #define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */ | ||
| 913 | #define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */ | ||
| 914 | #define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */ | ||
| 915 | #define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */ | ||
| 916 | #define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */ | ||
| 917 | #define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */ | ||
| 918 | #define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */ | ||
| 919 | #define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */ | ||
| 920 | #define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */ | ||
| 921 | #define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */ | ||
| 922 | #define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */ | ||
| 923 | |||
| 924 | /* Gereral Status Table offset - byte offset */ | ||
| 925 | #define GST_GSTLEN_MPIS_OFFSET 0x00 | ||
| 926 | #define GST_IQ_FREEZE_STATE0_OFFSET 0x04 | ||
| 927 | #define GST_IQ_FREEZE_STATE1_OFFSET 0x08 | ||
| 928 | #define GST_MSGUTCNT_OFFSET 0x0C | ||
| 929 | #define GST_IOPTCNT_OFFSET 0x10 | ||
| 930 | #define GST_PHYSTATE_OFFSET 0x18 | ||
| 931 | #define GST_PHYSTATE0_OFFSET 0x18 | ||
| 932 | #define GST_PHYSTATE1_OFFSET 0x1C | ||
| 933 | #define GST_PHYSTATE2_OFFSET 0x20 | ||
| 934 | #define GST_PHYSTATE3_OFFSET 0x24 | ||
| 935 | #define GST_PHYSTATE4_OFFSET 0x28 | ||
| 936 | #define GST_PHYSTATE5_OFFSET 0x2C | ||
| 937 | #define GST_PHYSTATE6_OFFSET 0x30 | ||
| 938 | #define GST_PHYSTATE7_OFFSET 0x34 | ||
| 939 | #define GST_RERRINFO_OFFSET 0x44 | ||
| 940 | |||
| 941 | /* General Status Table - MPI state */ | ||
| 942 | #define GST_MPI_STATE_UNINIT 0x00 | ||
| 943 | #define GST_MPI_STATE_INIT 0x01 | ||
| 944 | #define GST_MPI_STATE_TERMINATION 0x02 | ||
| 945 | #define GST_MPI_STATE_ERROR 0x03 | ||
| 946 | #define GST_MPI_STATE_MASK 0x07 | ||
| 947 | |||
| 948 | #define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 | ||
| 949 | #define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 | ||
| 950 | /* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ | ||
| 951 | #define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 | ||
| 952 | #define PCIE_EVENT_INTERRUPT 0x003044 | ||
| 953 | #define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 | ||
| 954 | #define PCIE_ERROR_INTERRUPT 0x00304C | ||
| 955 | /* signature defintion for host scratch pad0 register */ | ||
| 956 | #define SPC_SOFT_RESET_SIGNATURE 0x252acbcd | ||
| 957 | /* Signature for Soft Reset */ | ||
| 958 | |||
| 959 | /* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ | ||
| 960 | #define SPC_REG_RESET 0x000000/* reset register */ | ||
| 961 | |||
| 962 | /* bit difination for SPC_RESET register */ | ||
| 963 | #define SPC_REG_RESET_OSSP 0x00000001 | ||
| 964 | #define SPC_REG_RESET_RAAE 0x00000002 | ||
| 965 | #define SPC_REG_RESET_PCS_SPBC 0x00000004 | ||
| 966 | #define SPC_REG_RESET_PCS_IOP_SS 0x00000008 | ||
| 967 | #define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 | ||
| 968 | #define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 | ||
| 969 | #define SPC_REG_RESET_PCS_LM 0x00000040 | ||
| 970 | #define SPC_REG_RESET_PCS 0x00000080 | ||
| 971 | #define SPC_REG_RESET_GSM 0x00000100 | ||
| 972 | #define SPC_REG_RESET_DDR2 0x00010000 | ||
| 973 | #define SPC_REG_RESET_BDMA_CORE 0x00020000 | ||
| 974 | #define SPC_REG_RESET_BDMA_SXCBI 0x00040000 | ||
| 975 | #define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 | ||
| 976 | #define SPC_REG_RESET_PCIE_PWR 0x00100000 | ||
| 977 | #define SPC_REG_RESET_PCIE_SFT 0x00200000 | ||
| 978 | #define SPC_REG_RESET_PCS_SXCBI 0x00400000 | ||
| 979 | #define SPC_REG_RESET_LMS_SXCBI 0x00800000 | ||
| 980 | #define SPC_REG_RESET_PMIC_SXCBI 0x01000000 | ||
| 981 | #define SPC_REG_RESET_PMIC_CORE 0x02000000 | ||
| 982 | #define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 | ||
| 983 | #define SPC_REG_RESET_DEVICE 0x80000000 | ||
| 984 | |||
| 985 | /* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ | ||
| 986 | #define SPC_IBW_AXI_TRANSLATION_LOW 0x003258 | ||
| 987 | |||
| 988 | #define MBIC_AAP1_ADDR_BASE 0x060000 | ||
| 989 | #define MBIC_IOP_ADDR_BASE 0x070000 | ||
| 990 | #define GSM_ADDR_BASE 0x0700000 | ||
| 991 | /* Dynamic map through Bar4 - 0x00700000 */ | ||
| 992 | #define GSM_CONFIG_RESET 0x00000000 | ||
| 993 | #define RAM_ECC_DB_ERR 0x00000018 | ||
| 994 | #define GSM_READ_ADDR_PARITY_INDIC 0x00000058 | ||
| 995 | #define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 | ||
| 996 | #define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 | ||
| 997 | #define GSM_READ_ADDR_PARITY_CHECK 0x00000038 | ||
| 998 | #define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 | ||
| 999 | #define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 | ||
| 1000 | |||
| 1001 | #define RB6_ACCESS_REG 0x6A0000 | ||
| 1002 | #define HDAC_EXEC_CMD 0x0002 | ||
| 1003 | #define HDA_C_PA 0xcb | ||
| 1004 | #define HDA_SEQ_ID_BITS 0x00ff0000 | ||
| 1005 | #define HDA_GSM_OFFSET_BITS 0x00FFFFFF | ||
| 1006 | #define MBIC_AAP1_ADDR_BASE 0x060000 | ||
| 1007 | #define MBIC_IOP_ADDR_BASE 0x070000 | ||
| 1008 | #define GSM_ADDR_BASE 0x0700000 | ||
| 1009 | #define SPC_TOP_LEVEL_ADDR_BASE 0x000000 | ||
| 1010 | #define GSM_CONFIG_RESET_VALUE 0x00003b00 | ||
| 1011 | #define GPIO_ADDR_BASE 0x00090000 | ||
| 1012 | #define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c | ||
| 1013 | |||
| 1014 | /* RB6 offset */ | ||
| 1015 | #define SPC_RB6_OFFSET 0x80C0 | ||
| 1016 | /* Magic number of soft reset for RB6 */ | ||
| 1017 | #define RB6_MAGIC_NUMBER_RST 0x1234 | ||
| 1018 | |||
| 1019 | /* Device Register status */ | ||
| 1020 | #define DEVREG_SUCCESS 0x00 | ||
| 1021 | #define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 | ||
| 1022 | #define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 | ||
| 1023 | #define DEVREG_FAILURE_INVALID_PHY_ID 0x03 | ||
| 1024 | #define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 | ||
| 1025 | #define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 | ||
| 1026 | #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 | ||
| 1027 | #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 | ||
| 1028 | |||
| 1029 | #endif | ||
| 1030 | |||
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c new file mode 100644 index 000000000000..42ebe725d5a5 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_init.c | |||
| @@ -0,0 +1,891 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include "pm8001_sas.h" | ||
| 42 | #include "pm8001_chips.h" | ||
| 43 | |||
| 44 | static struct scsi_transport_template *pm8001_stt; | ||
| 45 | |||
| 46 | static const struct pm8001_chip_info pm8001_chips[] = { | ||
| 47 | [chip_8001] = { 8, &pm8001_8001_dispatch,}, | ||
| 48 | }; | ||
| 49 | static int pm8001_id; | ||
| 50 | |||
| 51 | LIST_HEAD(hba_list); | ||
| 52 | |||
| 53 | /** | ||
| 54 | * The main structure which LLDD must register for scsi core. | ||
| 55 | */ | ||
| 56 | static struct scsi_host_template pm8001_sht = { | ||
| 57 | .module = THIS_MODULE, | ||
| 58 | .name = DRV_NAME, | ||
| 59 | .queuecommand = sas_queuecommand, | ||
| 60 | .target_alloc = sas_target_alloc, | ||
| 61 | .slave_configure = pm8001_slave_configure, | ||
| 62 | .slave_destroy = sas_slave_destroy, | ||
| 63 | .scan_finished = pm8001_scan_finished, | ||
| 64 | .scan_start = pm8001_scan_start, | ||
| 65 | .change_queue_depth = sas_change_queue_depth, | ||
| 66 | .change_queue_type = sas_change_queue_type, | ||
| 67 | .bios_param = sas_bios_param, | ||
| 68 | .can_queue = 1, | ||
| 69 | .cmd_per_lun = 1, | ||
| 70 | .this_id = -1, | ||
| 71 | .sg_tablesize = SG_ALL, | ||
| 72 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, | ||
| 73 | .use_clustering = ENABLE_CLUSTERING, | ||
| 74 | .eh_device_reset_handler = sas_eh_device_reset_handler, | ||
| 75 | .eh_bus_reset_handler = sas_eh_bus_reset_handler, | ||
| 76 | .slave_alloc = pm8001_slave_alloc, | ||
| 77 | .target_destroy = sas_target_destroy, | ||
| 78 | .ioctl = sas_ioctl, | ||
| 79 | .shost_attrs = pm8001_host_attrs, | ||
| 80 | }; | ||
| 81 | |||
| 82 | /** | ||
| 83 | * Sas layer call this function to execute specific task. | ||
| 84 | */ | ||
| 85 | static struct sas_domain_function_template pm8001_transport_ops = { | ||
| 86 | .lldd_dev_found = pm8001_dev_found, | ||
| 87 | .lldd_dev_gone = pm8001_dev_gone, | ||
| 88 | |||
| 89 | .lldd_execute_task = pm8001_queue_command, | ||
| 90 | .lldd_control_phy = pm8001_phy_control, | ||
| 91 | |||
| 92 | .lldd_abort_task = pm8001_abort_task, | ||
| 93 | .lldd_abort_task_set = pm8001_abort_task_set, | ||
| 94 | .lldd_clear_aca = pm8001_clear_aca, | ||
| 95 | .lldd_clear_task_set = pm8001_clear_task_set, | ||
| 96 | .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, | ||
| 97 | .lldd_lu_reset = pm8001_lu_reset, | ||
| 98 | .lldd_query_task = pm8001_query_task, | ||
| 99 | }; | ||
| 100 | |||
| 101 | /** | ||
| 102 | *pm8001_phy_init - initiate our adapter phys | ||
| 103 | *@pm8001_ha: our hba structure. | ||
| 104 | *@phy_id: phy id. | ||
| 105 | */ | ||
| 106 | static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, | ||
| 107 | int phy_id) | ||
| 108 | { | ||
| 109 | struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; | ||
| 110 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
| 111 | phy->phy_state = 0; | ||
| 112 | phy->pm8001_ha = pm8001_ha; | ||
| 113 | sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; | ||
| 114 | sas_phy->class = SAS; | ||
| 115 | sas_phy->iproto = SAS_PROTOCOL_ALL; | ||
| 116 | sas_phy->tproto = 0; | ||
| 117 | sas_phy->type = PHY_TYPE_PHYSICAL; | ||
| 118 | sas_phy->role = PHY_ROLE_INITIATOR; | ||
| 119 | sas_phy->oob_mode = OOB_NOT_CONNECTED; | ||
| 120 | sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; | ||
| 121 | sas_phy->id = phy_id; | ||
| 122 | sas_phy->sas_addr = &pm8001_ha->sas_addr[0]; | ||
| 123 | sas_phy->frame_rcvd = &phy->frame_rcvd[0]; | ||
| 124 | sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; | ||
| 125 | sas_phy->lldd_phy = phy; | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 129 | *pm8001_free - free hba | ||
| 130 | *@pm8001_ha: our hba structure. | ||
| 131 | * | ||
| 132 | */ | ||
| 133 | static void pm8001_free(struct pm8001_hba_info *pm8001_ha) | ||
| 134 | { | ||
| 135 | int i; | ||
| 136 | struct pm8001_wq *wq; | ||
| 137 | |||
| 138 | if (!pm8001_ha) | ||
| 139 | return; | ||
| 140 | |||
| 141 | for (i = 0; i < USI_MAX_MEMCNT; i++) { | ||
| 142 | if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { | ||
| 143 | pci_free_consistent(pm8001_ha->pdev, | ||
| 144 | pm8001_ha->memoryMap.region[i].element_size, | ||
| 145 | pm8001_ha->memoryMap.region[i].virt_ptr, | ||
| 146 | pm8001_ha->memoryMap.region[i].phys_addr); | ||
| 147 | } | ||
| 148 | } | ||
| 149 | PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); | ||
| 150 | if (pm8001_ha->shost) | ||
| 151 | scsi_host_put(pm8001_ha->shost); | ||
| 152 | list_for_each_entry(wq, &pm8001_ha->wq_list, entry) | ||
| 153 | cancel_delayed_work(&wq->work_q); | ||
| 154 | kfree(pm8001_ha->tags); | ||
| 155 | kfree(pm8001_ha); | ||
| 156 | } | ||
| 157 | |||
| 158 | #ifdef PM8001_USE_TASKLET | ||
| 159 | static void pm8001_tasklet(unsigned long opaque) | ||
| 160 | { | ||
| 161 | struct pm8001_hba_info *pm8001_ha; | ||
| 162 | pm8001_ha = (struct pm8001_hba_info *)opaque;; | ||
| 163 | if (unlikely(!pm8001_ha)) | ||
| 164 | BUG_ON(1); | ||
| 165 | PM8001_CHIP_DISP->isr(pm8001_ha); | ||
| 166 | } | ||
| 167 | #endif | ||
| 168 | |||
| 169 | |||
| 170 | /** | ||
| 171 | * pm8001_interrupt - when HBA originate a interrupt,we should invoke this | ||
| 172 | * dispatcher to handle each case. | ||
| 173 | * @irq: irq number. | ||
| 174 | * @opaque: the passed general host adapter struct | ||
| 175 | */ | ||
| 176 | static irqreturn_t pm8001_interrupt(int irq, void *opaque) | ||
| 177 | { | ||
| 178 | struct pm8001_hba_info *pm8001_ha; | ||
| 179 | irqreturn_t ret = IRQ_HANDLED; | ||
| 180 | struct sas_ha_struct *sha = opaque; | ||
| 181 | pm8001_ha = sha->lldd_ha; | ||
| 182 | if (unlikely(!pm8001_ha)) | ||
| 183 | return IRQ_NONE; | ||
| 184 | if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) | ||
| 185 | return IRQ_NONE; | ||
| 186 | #ifdef PM8001_USE_TASKLET | ||
| 187 | tasklet_schedule(&pm8001_ha->tasklet); | ||
| 188 | #else | ||
| 189 | ret = PM8001_CHIP_DISP->isr(pm8001_ha); | ||
| 190 | #endif | ||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 195 | * pm8001_alloc - initiate our hba structure and 6 DMAs area. | ||
| 196 | * @pm8001_ha:our hba structure. | ||
| 197 | * | ||
| 198 | */ | ||
| 199 | static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha) | ||
| 200 | { | ||
| 201 | int i; | ||
| 202 | spin_lock_init(&pm8001_ha->lock); | ||
| 203 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) | ||
| 204 | pm8001_phy_init(pm8001_ha, i); | ||
| 205 | |||
| 206 | pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); | ||
| 207 | if (!pm8001_ha->tags) | ||
| 208 | goto err_out; | ||
| 209 | /* MPI Memory region 1 for AAP Event Log for fw */ | ||
| 210 | pm8001_ha->memoryMap.region[AAP1].num_elements = 1; | ||
| 211 | pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; | ||
| 212 | pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE; | ||
| 213 | pm8001_ha->memoryMap.region[AAP1].alignment = 32; | ||
| 214 | |||
| 215 | /* MPI Memory region 2 for IOP Event Log for fw */ | ||
| 216 | pm8001_ha->memoryMap.region[IOP].num_elements = 1; | ||
| 217 | pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE; | ||
| 218 | pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; | ||
| 219 | pm8001_ha->memoryMap.region[IOP].alignment = 32; | ||
| 220 | |||
| 221 | /* MPI Memory region 3 for consumer Index of inbound queues */ | ||
| 222 | pm8001_ha->memoryMap.region[CI].num_elements = 1; | ||
| 223 | pm8001_ha->memoryMap.region[CI].element_size = 4; | ||
| 224 | pm8001_ha->memoryMap.region[CI].total_len = 4; | ||
| 225 | pm8001_ha->memoryMap.region[CI].alignment = 4; | ||
| 226 | |||
| 227 | /* MPI Memory region 4 for producer Index of outbound queues */ | ||
| 228 | pm8001_ha->memoryMap.region[PI].num_elements = 1; | ||
| 229 | pm8001_ha->memoryMap.region[PI].element_size = 4; | ||
| 230 | pm8001_ha->memoryMap.region[PI].total_len = 4; | ||
| 231 | pm8001_ha->memoryMap.region[PI].alignment = 4; | ||
| 232 | |||
| 233 | /* MPI Memory region 5 inbound queues */ | ||
| 234 | pm8001_ha->memoryMap.region[IB].num_elements = 256; | ||
| 235 | pm8001_ha->memoryMap.region[IB].element_size = 64; | ||
| 236 | pm8001_ha->memoryMap.region[IB].total_len = 256 * 64; | ||
| 237 | pm8001_ha->memoryMap.region[IB].alignment = 64; | ||
| 238 | |||
| 239 | /* MPI Memory region 6 inbound queues */ | ||
| 240 | pm8001_ha->memoryMap.region[OB].num_elements = 256; | ||
| 241 | pm8001_ha->memoryMap.region[OB].element_size = 64; | ||
| 242 | pm8001_ha->memoryMap.region[OB].total_len = 256 * 64; | ||
| 243 | pm8001_ha->memoryMap.region[OB].alignment = 64; | ||
| 244 | |||
| 245 | /* Memory region write DMA*/ | ||
| 246 | pm8001_ha->memoryMap.region[NVMD].num_elements = 1; | ||
| 247 | pm8001_ha->memoryMap.region[NVMD].element_size = 4096; | ||
| 248 | pm8001_ha->memoryMap.region[NVMD].total_len = 4096; | ||
| 249 | /* Memory region for devices*/ | ||
| 250 | pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1; | ||
| 251 | pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES * | ||
| 252 | sizeof(struct pm8001_device); | ||
| 253 | pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES * | ||
| 254 | sizeof(struct pm8001_device); | ||
| 255 | |||
| 256 | /* Memory region for ccb_info*/ | ||
| 257 | pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1; | ||
| 258 | pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB * | ||
| 259 | sizeof(struct pm8001_ccb_info); | ||
| 260 | pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * | ||
| 261 | sizeof(struct pm8001_ccb_info); | ||
| 262 | |||
| 263 | for (i = 0; i < USI_MAX_MEMCNT; i++) { | ||
| 264 | if (pm8001_mem_alloc(pm8001_ha->pdev, | ||
| 265 | &pm8001_ha->memoryMap.region[i].virt_ptr, | ||
| 266 | &pm8001_ha->memoryMap.region[i].phys_addr, | ||
| 267 | &pm8001_ha->memoryMap.region[i].phys_addr_hi, | ||
| 268 | &pm8001_ha->memoryMap.region[i].phys_addr_lo, | ||
| 269 | pm8001_ha->memoryMap.region[i].total_len, | ||
| 270 | pm8001_ha->memoryMap.region[i].alignment) != 0) { | ||
| 271 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 272 | pm8001_printk("Mem%d alloc failed\n", | ||
| 273 | i)); | ||
| 274 | goto err_out; | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; | ||
| 279 | for (i = 0; i < PM8001_MAX_DEVICES; i++) { | ||
| 280 | pm8001_ha->devices[i].dev_type = NO_DEVICE; | ||
| 281 | pm8001_ha->devices[i].id = i; | ||
| 282 | pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; | ||
| 283 | pm8001_ha->devices[i].running_req = 0; | ||
| 284 | } | ||
| 285 | pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr; | ||
| 286 | for (i = 0; i < PM8001_MAX_CCB; i++) { | ||
| 287 | pm8001_ha->ccb_info[i].ccb_dma_handle = | ||
| 288 | pm8001_ha->memoryMap.region[CCB_MEM].phys_addr + | ||
| 289 | i * sizeof(struct pm8001_ccb_info); | ||
| 290 | pm8001_ha->ccb_info[i].task = NULL; | ||
| 291 | pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff; | ||
| 292 | pm8001_ha->ccb_info[i].device = NULL; | ||
| 293 | ++pm8001_ha->tags_num; | ||
| 294 | } | ||
| 295 | pm8001_ha->flags = PM8001F_INIT_TIME; | ||
| 296 | /* Initialize tags */ | ||
| 297 | pm8001_tag_init(pm8001_ha); | ||
| 298 | return 0; | ||
| 299 | err_out: | ||
| 300 | return 1; | ||
| 301 | } | ||
| 302 | |||
| 303 | /** | ||
| 304 | * pm8001_ioremap - remap the pci high physical address to kernal virtual | ||
| 305 | * address so that we can access them. | ||
| 306 | * @pm8001_ha:our hba structure. | ||
| 307 | */ | ||
| 308 | static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) | ||
| 309 | { | ||
| 310 | u32 bar; | ||
| 311 | u32 logicalBar = 0; | ||
| 312 | struct pci_dev *pdev; | ||
| 313 | |||
| 314 | pdev = pm8001_ha->pdev; | ||
| 315 | /* map pci mem (PMC pci base 0-3)*/ | ||
| 316 | for (bar = 0; bar < 6; bar++) { | ||
| 317 | /* | ||
| 318 | ** logical BARs for SPC: | ||
| 319 | ** bar 0 and 1 - logical BAR0 | ||
| 320 | ** bar 2 and 3 - logical BAR1 | ||
| 321 | ** bar4 - logical BAR2 | ||
| 322 | ** bar5 - logical BAR3 | ||
| 323 | ** Skip the appropriate assignments: | ||
| 324 | */ | ||
| 325 | if ((bar == 1) || (bar == 3)) | ||
| 326 | continue; | ||
| 327 | if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { | ||
| 328 | pm8001_ha->io_mem[logicalBar].membase = | ||
| 329 | pci_resource_start(pdev, bar); | ||
| 330 | pm8001_ha->io_mem[logicalBar].membase &= | ||
| 331 | (u32)PCI_BASE_ADDRESS_MEM_MASK; | ||
| 332 | pm8001_ha->io_mem[logicalBar].memsize = | ||
| 333 | pci_resource_len(pdev, bar); | ||
| 334 | pm8001_ha->io_mem[logicalBar].memvirtaddr = | ||
| 335 | ioremap(pm8001_ha->io_mem[logicalBar].membase, | ||
| 336 | pm8001_ha->io_mem[logicalBar].memsize); | ||
| 337 | PM8001_INIT_DBG(pm8001_ha, | ||
| 338 | pm8001_printk("PCI: bar %d, logicalBar %d " | ||
| 339 | "virt_addr=%lx,len=%d\n", bar, logicalBar, | ||
| 340 | (unsigned long) | ||
| 341 | pm8001_ha->io_mem[logicalBar].memvirtaddr, | ||
| 342 | pm8001_ha->io_mem[logicalBar].memsize)); | ||
| 343 | } else { | ||
| 344 | pm8001_ha->io_mem[logicalBar].membase = 0; | ||
| 345 | pm8001_ha->io_mem[logicalBar].memsize = 0; | ||
| 346 | pm8001_ha->io_mem[logicalBar].memvirtaddr = 0; | ||
| 347 | } | ||
| 348 | logicalBar++; | ||
| 349 | } | ||
| 350 | return 0; | ||
| 351 | } | ||
| 352 | |||
| 353 | /** | ||
| 354 | * pm8001_pci_alloc - initialize our ha card structure | ||
| 355 | * @pdev: pci device. | ||
| 356 | * @ent: ent | ||
| 357 | * @shost: scsi host struct which has been initialized before. | ||
| 358 | */ | ||
| 359 | static struct pm8001_hba_info *__devinit | ||
| 360 | pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost) | ||
| 361 | { | ||
| 362 | struct pm8001_hba_info *pm8001_ha; | ||
| 363 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 364 | |||
| 365 | |||
| 366 | pm8001_ha = sha->lldd_ha; | ||
| 367 | if (!pm8001_ha) | ||
| 368 | return NULL; | ||
| 369 | |||
| 370 | pm8001_ha->pdev = pdev; | ||
| 371 | pm8001_ha->dev = &pdev->dev; | ||
| 372 | pm8001_ha->chip_id = chip_id; | ||
| 373 | pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; | ||
| 374 | pm8001_ha->irq = pdev->irq; | ||
| 375 | pm8001_ha->sas = sha; | ||
| 376 | pm8001_ha->shost = shost; | ||
| 377 | pm8001_ha->id = pm8001_id++; | ||
| 378 | INIT_LIST_HEAD(&pm8001_ha->wq_list); | ||
| 379 | pm8001_ha->logging_level = 0x01; | ||
| 380 | sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); | ||
| 381 | #ifdef PM8001_USE_TASKLET | ||
| 382 | tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, | ||
| 383 | (unsigned long)pm8001_ha); | ||
| 384 | #endif | ||
| 385 | pm8001_ioremap(pm8001_ha); | ||
| 386 | if (!pm8001_alloc(pm8001_ha)) | ||
| 387 | return pm8001_ha; | ||
| 388 | pm8001_free(pm8001_ha); | ||
| 389 | return NULL; | ||
| 390 | } | ||
| 391 | |||
| 392 | /** | ||
| 393 | * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit | ||
| 394 | * @pdev: pci device. | ||
| 395 | */ | ||
| 396 | static int pci_go_44(struct pci_dev *pdev) | ||
| 397 | { | ||
| 398 | int rc; | ||
| 399 | |||
| 400 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) { | ||
| 401 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44)); | ||
| 402 | if (rc) { | ||
| 403 | rc = pci_set_consistent_dma_mask(pdev, | ||
| 404 | DMA_BIT_MASK(32)); | ||
| 405 | if (rc) { | ||
| 406 | dev_printk(KERN_ERR, &pdev->dev, | ||
| 407 | "44-bit DMA enable failed\n"); | ||
| 408 | return rc; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | } else { | ||
| 412 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 413 | if (rc) { | ||
| 414 | dev_printk(KERN_ERR, &pdev->dev, | ||
| 415 | "32-bit DMA enable failed\n"); | ||
| 416 | return rc; | ||
| 417 | } | ||
| 418 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 419 | if (rc) { | ||
| 420 | dev_printk(KERN_ERR, &pdev->dev, | ||
| 421 | "32-bit consistent DMA enable failed\n"); | ||
| 422 | return rc; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | return rc; | ||
| 426 | } | ||
| 427 | |||
| 428 | /** | ||
| 429 | * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them. | ||
| 430 | * @shost: scsi host which has been allocated outside. | ||
| 431 | * @chip_info: our ha struct. | ||
| 432 | */ | ||
| 433 | static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost, | ||
| 434 | const struct pm8001_chip_info *chip_info) | ||
| 435 | { | ||
| 436 | int phy_nr, port_nr; | ||
| 437 | struct asd_sas_phy **arr_phy; | ||
| 438 | struct asd_sas_port **arr_port; | ||
| 439 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 440 | |||
| 441 | phy_nr = chip_info->n_phy; | ||
| 442 | port_nr = phy_nr; | ||
| 443 | memset(sha, 0x00, sizeof(*sha)); | ||
| 444 | arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); | ||
| 445 | if (!arr_phy) | ||
| 446 | goto exit; | ||
| 447 | arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); | ||
| 448 | if (!arr_port) | ||
| 449 | goto exit_free2; | ||
| 450 | |||
| 451 | sha->sas_phy = arr_phy; | ||
| 452 | sha->sas_port = arr_port; | ||
| 453 | sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL); | ||
| 454 | if (!sha->lldd_ha) | ||
| 455 | goto exit_free1; | ||
| 456 | |||
| 457 | shost->transportt = pm8001_stt; | ||
| 458 | shost->max_id = PM8001_MAX_DEVICES; | ||
| 459 | shost->max_lun = 8; | ||
| 460 | shost->max_channel = 0; | ||
| 461 | shost->unique_id = pm8001_id; | ||
| 462 | shost->max_cmd_len = 16; | ||
| 463 | shost->can_queue = PM8001_CAN_QUEUE; | ||
| 464 | shost->cmd_per_lun = 32; | ||
| 465 | return 0; | ||
| 466 | exit_free1: | ||
| 467 | kfree(arr_port); | ||
| 468 | exit_free2: | ||
| 469 | kfree(arr_phy); | ||
| 470 | exit: | ||
| 471 | return -1; | ||
| 472 | } | ||
| 473 | |||
| 474 | /** | ||
| 475 | * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas | ||
| 476 | * @shost: scsi host which has been allocated outside | ||
| 477 | * @chip_info: our ha struct. | ||
| 478 | */ | ||
| 479 | static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost, | ||
| 480 | const struct pm8001_chip_info *chip_info) | ||
| 481 | { | ||
| 482 | int i = 0; | ||
| 483 | struct pm8001_hba_info *pm8001_ha; | ||
| 484 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 485 | |||
| 486 | pm8001_ha = sha->lldd_ha; | ||
| 487 | for (i = 0; i < chip_info->n_phy; i++) { | ||
| 488 | sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; | ||
| 489 | sha->sas_port[i] = &pm8001_ha->port[i].sas_port; | ||
| 490 | } | ||
| 491 | sha->sas_ha_name = DRV_NAME; | ||
| 492 | sha->dev = pm8001_ha->dev; | ||
| 493 | |||
| 494 | sha->lldd_module = THIS_MODULE; | ||
| 495 | sha->sas_addr = &pm8001_ha->sas_addr[0]; | ||
| 496 | sha->num_phys = chip_info->n_phy; | ||
| 497 | sha->lldd_max_execute_num = 1; | ||
| 498 | sha->lldd_queue_size = PM8001_CAN_QUEUE; | ||
| 499 | sha->core.shost = shost; | ||
| 500 | } | ||
| 501 | |||
| 502 | /** | ||
| 503 | * pm8001_init_sas_add - initialize sas address | ||
| 504 | * @chip_info: our ha struct. | ||
| 505 | * | ||
| 506 | * Currently we just set the fixed SAS address to our HBA,for manufacture, | ||
| 507 | * it should read from the EEPROM | ||
| 508 | */ | ||
| 509 | static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) | ||
| 510 | { | ||
| 511 | u8 i; | ||
| 512 | #ifdef PM8001_READ_VPD | ||
| 513 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 514 | pm8001_ha->nvmd_completion = &completion; | ||
| 515 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0); | ||
| 516 | wait_for_completion(&completion); | ||
| 517 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) { | ||
| 518 | memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, | ||
| 519 | SAS_ADDR_SIZE); | ||
| 520 | PM8001_INIT_DBG(pm8001_ha, | ||
| 521 | pm8001_printk("phy %d sas_addr = %x \n", i, | ||
| 522 | (u64)pm8001_ha->phy[i].dev_sas_addr)); | ||
| 523 | } | ||
| 524 | #else | ||
| 525 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) { | ||
| 526 | pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL; | ||
| 527 | pm8001_ha->phy[i].dev_sas_addr = | ||
| 528 | cpu_to_be64((u64) | ||
| 529 | (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); | ||
| 530 | } | ||
| 531 | memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, | ||
| 532 | SAS_ADDR_SIZE); | ||
| 533 | #endif | ||
| 534 | } | ||
| 535 | |||
| 536 | #ifdef PM8001_USE_MSIX | ||
| 537 | /** | ||
| 538 | * pm8001_setup_msix - enable MSI-X interrupt | ||
| 539 | * @chip_info: our ha struct. | ||
| 540 | * @irq_handler: irq_handler | ||
| 541 | */ | ||
| 542 | static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, | ||
| 543 | irq_handler_t irq_handler) | ||
| 544 | { | ||
| 545 | u32 i = 0, j = 0; | ||
| 546 | u32 number_of_intr = 1; | ||
| 547 | int flag = 0; | ||
| 548 | u32 max_entry; | ||
| 549 | int rc; | ||
| 550 | max_entry = sizeof(pm8001_ha->msix_entries) / | ||
| 551 | sizeof(pm8001_ha->msix_entries[0]); | ||
| 552 | flag |= IRQF_DISABLED; | ||
| 553 | for (i = 0; i < max_entry ; i++) | ||
| 554 | pm8001_ha->msix_entries[i].entry = i; | ||
| 555 | rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, | ||
| 556 | number_of_intr); | ||
| 557 | pm8001_ha->number_of_intr = number_of_intr; | ||
| 558 | if (!rc) { | ||
| 559 | for (i = 0; i < number_of_intr; i++) { | ||
| 560 | if (request_irq(pm8001_ha->msix_entries[i].vector, | ||
| 561 | irq_handler, flag, DRV_NAME, | ||
| 562 | SHOST_TO_SAS_HA(pm8001_ha->shost))) { | ||
| 563 | for (j = 0; j < i; j++) | ||
| 564 | free_irq( | ||
| 565 | pm8001_ha->msix_entries[j].vector, | ||
| 566 | SHOST_TO_SAS_HA(pm8001_ha->shost)); | ||
| 567 | pci_disable_msix(pm8001_ha->pdev); | ||
| 568 | break; | ||
| 569 | } | ||
| 570 | } | ||
| 571 | } | ||
| 572 | return rc; | ||
| 573 | } | ||
| 574 | #endif | ||
| 575 | |||
| 576 | /** | ||
| 577 | * pm8001_request_irq - register interrupt | ||
| 578 | * @chip_info: our ha struct. | ||
| 579 | */ | ||
| 580 | static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) | ||
| 581 | { | ||
| 582 | struct pci_dev *pdev; | ||
| 583 | irq_handler_t irq_handler = pm8001_interrupt; | ||
| 584 | int rc; | ||
| 585 | |||
| 586 | pdev = pm8001_ha->pdev; | ||
| 587 | |||
| 588 | #ifdef PM8001_USE_MSIX | ||
| 589 | if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) | ||
| 590 | return pm8001_setup_msix(pm8001_ha, irq_handler); | ||
| 591 | else | ||
| 592 | goto intx; | ||
| 593 | #endif | ||
| 594 | |||
| 595 | intx: | ||
| 596 | /* intialize the INT-X interrupt */ | ||
| 597 | rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, | ||
| 598 | SHOST_TO_SAS_HA(pm8001_ha->shost)); | ||
| 599 | return rc; | ||
| 600 | } | ||
| 601 | |||
| 602 | /** | ||
| 603 | * pm8001_pci_probe - probe supported device | ||
| 604 | * @pdev: pci device which kernel has been prepared for. | ||
| 605 | * @ent: pci device id | ||
| 606 | * | ||
| 607 | * This function is the main initialization function, when register a new | ||
| 608 | * pci driver it is invoked, all struct an hardware initilization should be done | ||
| 609 | * here, also, register interrupt | ||
| 610 | */ | ||
| 611 | static int __devinit pm8001_pci_probe(struct pci_dev *pdev, | ||
| 612 | const struct pci_device_id *ent) | ||
| 613 | { | ||
| 614 | unsigned int rc; | ||
| 615 | u32 pci_reg; | ||
| 616 | struct pm8001_hba_info *pm8001_ha; | ||
| 617 | struct Scsi_Host *shost = NULL; | ||
| 618 | const struct pm8001_chip_info *chip; | ||
| 619 | |||
| 620 | dev_printk(KERN_INFO, &pdev->dev, | ||
| 621 | "pm8001: driver version %s\n", DRV_VERSION); | ||
| 622 | rc = pci_enable_device(pdev); | ||
| 623 | if (rc) | ||
| 624 | goto err_out_enable; | ||
| 625 | pci_set_master(pdev); | ||
| 626 | /* | ||
| 627 | * Enable pci slot busmaster by setting pci command register. | ||
| 628 | * This is required by FW for Cyclone card. | ||
| 629 | */ | ||
| 630 | |||
| 631 | pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg); | ||
| 632 | pci_reg |= 0x157; | ||
| 633 | pci_write_config_dword(pdev, PCI_COMMAND, pci_reg); | ||
| 634 | rc = pci_request_regions(pdev, DRV_NAME); | ||
| 635 | if (rc) | ||
| 636 | goto err_out_disable; | ||
| 637 | rc = pci_go_44(pdev); | ||
| 638 | if (rc) | ||
| 639 | goto err_out_regions; | ||
| 640 | |||
| 641 | shost = scsi_host_alloc(&pm8001_sht, sizeof(void *)); | ||
| 642 | if (!shost) { | ||
| 643 | rc = -ENOMEM; | ||
| 644 | goto err_out_regions; | ||
| 645 | } | ||
| 646 | chip = &pm8001_chips[ent->driver_data]; | ||
| 647 | SHOST_TO_SAS_HA(shost) = | ||
| 648 | kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); | ||
| 649 | if (!SHOST_TO_SAS_HA(shost)) { | ||
| 650 | rc = -ENOMEM; | ||
| 651 | goto err_out_free_host; | ||
| 652 | } | ||
| 653 | |||
| 654 | rc = pm8001_prep_sas_ha_init(shost, chip); | ||
| 655 | if (rc) { | ||
| 656 | rc = -ENOMEM; | ||
| 657 | goto err_out_free; | ||
| 658 | } | ||
| 659 | pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); | ||
| 660 | pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); | ||
| 661 | if (!pm8001_ha) { | ||
| 662 | rc = -ENOMEM; | ||
| 663 | goto err_out_free; | ||
| 664 | } | ||
| 665 | list_add_tail(&pm8001_ha->list, &hba_list); | ||
| 666 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); | ||
| 667 | rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); | ||
| 668 | if (rc) | ||
| 669 | goto err_out_ha_free; | ||
| 670 | |||
| 671 | rc = scsi_add_host(shost, &pdev->dev); | ||
| 672 | if (rc) | ||
| 673 | goto err_out_ha_free; | ||
| 674 | rc = pm8001_request_irq(pm8001_ha); | ||
| 675 | if (rc) | ||
| 676 | goto err_out_shost; | ||
| 677 | |||
| 678 | PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); | ||
| 679 | pm8001_init_sas_add(pm8001_ha); | ||
| 680 | pm8001_post_sas_ha_init(shost, chip); | ||
| 681 | rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); | ||
| 682 | if (rc) | ||
| 683 | goto err_out_shost; | ||
| 684 | scsi_scan_host(pm8001_ha->shost); | ||
| 685 | return 0; | ||
| 686 | |||
| 687 | err_out_shost: | ||
| 688 | scsi_remove_host(pm8001_ha->shost); | ||
| 689 | err_out_ha_free: | ||
| 690 | pm8001_free(pm8001_ha); | ||
| 691 | err_out_free: | ||
| 692 | kfree(SHOST_TO_SAS_HA(shost)); | ||
| 693 | err_out_free_host: | ||
| 694 | kfree(shost); | ||
| 695 | err_out_regions: | ||
| 696 | pci_release_regions(pdev); | ||
| 697 | err_out_disable: | ||
| 698 | pci_disable_device(pdev); | ||
| 699 | err_out_enable: | ||
| 700 | return rc; | ||
| 701 | } | ||
| 702 | |||
| 703 | static void __devexit pm8001_pci_remove(struct pci_dev *pdev) | ||
| 704 | { | ||
| 705 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); | ||
| 706 | struct pm8001_hba_info *pm8001_ha; | ||
| 707 | int i; | ||
| 708 | pm8001_ha = sha->lldd_ha; | ||
| 709 | pci_set_drvdata(pdev, NULL); | ||
| 710 | sas_unregister_ha(sha); | ||
| 711 | sas_remove_host(pm8001_ha->shost); | ||
| 712 | list_del(&pm8001_ha->list); | ||
| 713 | scsi_remove_host(pm8001_ha->shost); | ||
| 714 | PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); | ||
| 715 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); | ||
| 716 | |||
| 717 | #ifdef PM8001_USE_MSIX | ||
| 718 | for (i = 0; i < pm8001_ha->number_of_intr; i++) | ||
| 719 | synchronize_irq(pm8001_ha->msix_entries[i].vector); | ||
| 720 | for (i = 0; i < pm8001_ha->number_of_intr; i++) | ||
| 721 | free_irq(pm8001_ha->msix_entries[i].vector, sha); | ||
| 722 | pci_disable_msix(pdev); | ||
| 723 | #else | ||
| 724 | free_irq(pm8001_ha->irq, sha); | ||
| 725 | #endif | ||
| 726 | #ifdef PM8001_USE_TASKLET | ||
| 727 | tasklet_kill(&pm8001_ha->tasklet); | ||
| 728 | #endif | ||
| 729 | pm8001_free(pm8001_ha); | ||
| 730 | kfree(sha->sas_phy); | ||
| 731 | kfree(sha->sas_port); | ||
| 732 | kfree(sha); | ||
| 733 | pci_release_regions(pdev); | ||
| 734 | pci_disable_device(pdev); | ||
| 735 | } | ||
| 736 | |||
| 737 | /** | ||
| 738 | * pm8001_pci_suspend - power management suspend main entry point | ||
| 739 | * @pdev: PCI device struct | ||
| 740 | * @state: PM state change to (usually PCI_D3) | ||
| 741 | * | ||
| 742 | * Returns 0 success, anything else error. | ||
| 743 | */ | ||
| 744 | static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
| 745 | { | ||
| 746 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); | ||
| 747 | struct pm8001_hba_info *pm8001_ha; | ||
| 748 | int i , pos; | ||
| 749 | u32 device_state; | ||
| 750 | pm8001_ha = sha->lldd_ha; | ||
| 751 | flush_scheduled_work(); | ||
| 752 | scsi_block_requests(pm8001_ha->shost); | ||
| 753 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | ||
| 754 | if (pos == 0) { | ||
| 755 | printk(KERN_ERR " PCI PM not supported\n"); | ||
| 756 | return -ENODEV; | ||
| 757 | } | ||
| 758 | PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); | ||
| 759 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); | ||
| 760 | #ifdef PM8001_USE_MSIX | ||
| 761 | for (i = 0; i < pm8001_ha->number_of_intr; i++) | ||
| 762 | synchronize_irq(pm8001_ha->msix_entries[i].vector); | ||
| 763 | for (i = 0; i < pm8001_ha->number_of_intr; i++) | ||
| 764 | free_irq(pm8001_ha->msix_entries[i].vector, sha); | ||
| 765 | pci_disable_msix(pdev); | ||
| 766 | #else | ||
| 767 | free_irq(pm8001_ha->irq, sha); | ||
| 768 | #endif | ||
| 769 | #ifdef PM8001_USE_TASKLET | ||
| 770 | tasklet_kill(&pm8001_ha->tasklet); | ||
| 771 | #endif | ||
| 772 | device_state = pci_choose_state(pdev, state); | ||
| 773 | pm8001_printk("pdev=0x%p, slot=%s, entering " | ||
| 774 | "operating state [D%d]\n", pdev, | ||
| 775 | pm8001_ha->name, device_state); | ||
| 776 | pci_save_state(pdev); | ||
| 777 | pci_disable_device(pdev); | ||
| 778 | pci_set_power_state(pdev, device_state); | ||
| 779 | return 0; | ||
| 780 | } | ||
| 781 | |||
| 782 | /** | ||
| 783 | * pm8001_pci_resume - power management resume main entry point | ||
| 784 | * @pdev: PCI device struct | ||
| 785 | * | ||
| 786 | * Returns 0 success, anything else error. | ||
| 787 | */ | ||
| 788 | static int pm8001_pci_resume(struct pci_dev *pdev) | ||
| 789 | { | ||
| 790 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); | ||
| 791 | struct pm8001_hba_info *pm8001_ha; | ||
| 792 | int rc; | ||
| 793 | u32 device_state; | ||
| 794 | pm8001_ha = sha->lldd_ha; | ||
| 795 | device_state = pdev->current_state; | ||
| 796 | |||
| 797 | pm8001_printk("pdev=0x%p, slot=%s, resuming from previous " | ||
| 798 | "operating state [D%d]\n", pdev, pm8001_ha->name, device_state); | ||
| 799 | |||
| 800 | pci_set_power_state(pdev, PCI_D0); | ||
| 801 | pci_enable_wake(pdev, PCI_D0, 0); | ||
| 802 | pci_restore_state(pdev); | ||
| 803 | rc = pci_enable_device(pdev); | ||
| 804 | if (rc) { | ||
| 805 | pm8001_printk("slot=%s Enable device failed during resume\n", | ||
| 806 | pm8001_ha->name); | ||
| 807 | goto err_out_enable; | ||
| 808 | } | ||
| 809 | |||
| 810 | pci_set_master(pdev); | ||
| 811 | rc = pci_go_44(pdev); | ||
| 812 | if (rc) | ||
| 813 | goto err_out_disable; | ||
| 814 | |||
| 815 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); | ||
| 816 | rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); | ||
| 817 | if (rc) | ||
| 818 | goto err_out_disable; | ||
| 819 | PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); | ||
| 820 | rc = pm8001_request_irq(pm8001_ha); | ||
| 821 | if (rc) | ||
| 822 | goto err_out_disable; | ||
| 823 | #ifdef PM8001_USE_TASKLET | ||
| 824 | tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, | ||
| 825 | (unsigned long)pm8001_ha); | ||
| 826 | #endif | ||
| 827 | PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); | ||
| 828 | scsi_unblock_requests(pm8001_ha->shost); | ||
| 829 | return 0; | ||
| 830 | |||
| 831 | err_out_disable: | ||
| 832 | scsi_remove_host(pm8001_ha->shost); | ||
| 833 | pci_disable_device(pdev); | ||
| 834 | err_out_enable: | ||
| 835 | return rc; | ||
| 836 | } | ||
| 837 | |||
| 838 | static struct pci_device_id __devinitdata pm8001_pci_table[] = { | ||
| 839 | { | ||
| 840 | PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 | ||
| 841 | }, | ||
| 842 | { | ||
| 843 | PCI_DEVICE(0x117c, 0x0042), | ||
| 844 | .driver_data = chip_8001 | ||
| 845 | }, | ||
| 846 | {} /* terminate list */ | ||
| 847 | }; | ||
| 848 | |||
| 849 | static struct pci_driver pm8001_pci_driver = { | ||
| 850 | .name = DRV_NAME, | ||
| 851 | .id_table = pm8001_pci_table, | ||
| 852 | .probe = pm8001_pci_probe, | ||
| 853 | .remove = __devexit_p(pm8001_pci_remove), | ||
| 854 | .suspend = pm8001_pci_suspend, | ||
| 855 | .resume = pm8001_pci_resume, | ||
| 856 | }; | ||
| 857 | |||
| 858 | /** | ||
| 859 | * pm8001_init - initialize scsi transport template | ||
| 860 | */ | ||
| 861 | static int __init pm8001_init(void) | ||
| 862 | { | ||
| 863 | int rc; | ||
| 864 | pm8001_id = 0; | ||
| 865 | pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); | ||
| 866 | if (!pm8001_stt) | ||
| 867 | return -ENOMEM; | ||
| 868 | rc = pci_register_driver(&pm8001_pci_driver); | ||
| 869 | if (rc) | ||
| 870 | goto err_out; | ||
| 871 | return 0; | ||
| 872 | err_out: | ||
| 873 | sas_release_transport(pm8001_stt); | ||
| 874 | return rc; | ||
| 875 | } | ||
| 876 | |||
| 877 | static void __exit pm8001_exit(void) | ||
| 878 | { | ||
| 879 | pci_unregister_driver(&pm8001_pci_driver); | ||
| 880 | sas_release_transport(pm8001_stt); | ||
| 881 | } | ||
| 882 | |||
| 883 | module_init(pm8001_init); | ||
| 884 | module_exit(pm8001_exit); | ||
| 885 | |||
| 886 | MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); | ||
| 887 | MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); | ||
| 888 | MODULE_VERSION(DRV_VERSION); | ||
| 889 | MODULE_LICENSE("GPL"); | ||
| 890 | MODULE_DEVICE_TABLE(pci, pm8001_pci_table); | ||
| 891 | |||
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c new file mode 100644 index 000000000000..1f767a0e727a --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
| @@ -0,0 +1,1103 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include "pm8001_sas.h" | ||
| 42 | |||
| 43 | /** | ||
| 44 | * pm8001_find_tag - from sas task to find out tag that belongs to this task | ||
| 45 | * @task: the task sent to the LLDD | ||
| 46 | * @tag: the found tag associated with the task | ||
| 47 | */ | ||
| 48 | static int pm8001_find_tag(struct sas_task *task, u32 *tag) | ||
| 49 | { | ||
| 50 | if (task->lldd_task) { | ||
| 51 | struct pm8001_ccb_info *ccb; | ||
| 52 | ccb = task->lldd_task; | ||
| 53 | *tag = ccb->ccb_tag; | ||
| 54 | return 1; | ||
| 55 | } | ||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | /** | ||
| 60 | * pm8001_tag_clear - clear the tags bitmap | ||
| 61 | * @pm8001_ha: our hba struct | ||
| 62 | * @tag: the found tag associated with the task | ||
| 63 | */ | ||
| 64 | static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) | ||
| 65 | { | ||
| 66 | void *bitmap = pm8001_ha->tags; | ||
| 67 | clear_bit(tag, bitmap); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) | ||
| 71 | { | ||
| 72 | pm8001_tag_clear(pm8001_ha, tag); | ||
| 73 | } | ||
| 74 | |||
| 75 | static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) | ||
| 76 | { | ||
| 77 | void *bitmap = pm8001_ha->tags; | ||
| 78 | set_bit(tag, bitmap); | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * pm8001_tag_alloc - allocate a empty tag for task used. | ||
| 83 | * @pm8001_ha: our hba struct | ||
| 84 | * @tag_out: the found empty tag . | ||
| 85 | */ | ||
| 86 | inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) | ||
| 87 | { | ||
| 88 | unsigned int index, tag; | ||
| 89 | void *bitmap = pm8001_ha->tags; | ||
| 90 | |||
| 91 | index = find_first_zero_bit(bitmap, pm8001_ha->tags_num); | ||
| 92 | tag = index; | ||
| 93 | if (tag >= pm8001_ha->tags_num) | ||
| 94 | return -SAS_QUEUE_FULL; | ||
| 95 | pm8001_tag_set(pm8001_ha, tag); | ||
| 96 | *tag_out = tag; | ||
| 97 | return 0; | ||
| 98 | } | ||
| 99 | |||
| 100 | void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) | ||
| 101 | { | ||
| 102 | int i; | ||
| 103 | for (i = 0; i < pm8001_ha->tags_num; ++i) | ||
| 104 | pm8001_tag_clear(pm8001_ha, i); | ||
| 105 | } | ||
| 106 | |||
| 107 | /** | ||
| 108 | * pm8001_mem_alloc - allocate memory for pm8001. | ||
| 109 | * @pdev: pci device. | ||
| 110 | * @virt_addr: the allocated virtual address | ||
| 111 | * @pphys_addr_hi: the physical address high byte address. | ||
| 112 | * @pphys_addr_lo: the physical address low byte address. | ||
| 113 | * @mem_size: memory size. | ||
| 114 | */ | ||
| 115 | int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, | ||
| 116 | dma_addr_t *pphys_addr, u32 *pphys_addr_hi, | ||
| 117 | u32 *pphys_addr_lo, u32 mem_size, u32 align) | ||
| 118 | { | ||
| 119 | caddr_t mem_virt_alloc; | ||
| 120 | dma_addr_t mem_dma_handle; | ||
| 121 | u64 phys_align; | ||
| 122 | u64 align_offset = 0; | ||
| 123 | if (align) | ||
| 124 | align_offset = (dma_addr_t)align - 1; | ||
| 125 | mem_virt_alloc = | ||
| 126 | pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); | ||
| 127 | if (!mem_virt_alloc) { | ||
| 128 | pm8001_printk("memory allocation error\n"); | ||
| 129 | return -1; | ||
| 130 | } | ||
| 131 | memset((void *)mem_virt_alloc, 0, mem_size+align); | ||
| 132 | *pphys_addr = mem_dma_handle; | ||
| 133 | phys_align = (*pphys_addr + align_offset) & ~align_offset; | ||
| 134 | *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; | ||
| 135 | *pphys_addr_hi = upper_32_bits(phys_align); | ||
| 136 | *pphys_addr_lo = lower_32_bits(phys_align); | ||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | /** | ||
| 140 | * pm8001_find_ha_by_dev - from domain device which come from sas layer to | ||
| 141 | * find out our hba struct. | ||
| 142 | * @dev: the domain device which from sas layer. | ||
| 143 | */ | ||
| 144 | static | ||
| 145 | struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) | ||
| 146 | { | ||
| 147 | struct sas_ha_struct *sha = dev->port->ha; | ||
| 148 | struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; | ||
| 149 | return pm8001_ha; | ||
| 150 | } | ||
| 151 | |||
| 152 | /** | ||
| 153 | * pm8001_phy_control - this function should be registered to | ||
| 154 | * sas_domain_function_template to provide libsas used, note: this is just | ||
| 155 | * control the HBA phy rather than other expander phy if you want control | ||
| 156 | * other phy, you should use SMP command. | ||
| 157 | * @sas_phy: which phy in HBA phys. | ||
| 158 | * @func: the operation. | ||
| 159 | * @funcdata: always NULL. | ||
| 160 | */ | ||
| 161 | int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
| 162 | void *funcdata) | ||
| 163 | { | ||
| 164 | int rc = 0, phy_id = sas_phy->id; | ||
| 165 | struct pm8001_hba_info *pm8001_ha = NULL; | ||
| 166 | struct sas_phy_linkrates *rates; | ||
| 167 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 168 | pm8001_ha = sas_phy->ha->lldd_ha; | ||
| 169 | pm8001_ha->phy[phy_id].enable_completion = &completion; | ||
| 170 | switch (func) { | ||
| 171 | case PHY_FUNC_SET_LINK_RATE: | ||
| 172 | rates = funcdata; | ||
| 173 | if (rates->minimum_linkrate) { | ||
| 174 | pm8001_ha->phy[phy_id].minimum_linkrate = | ||
| 175 | rates->minimum_linkrate; | ||
| 176 | } | ||
| 177 | if (rates->maximum_linkrate) { | ||
| 178 | pm8001_ha->phy[phy_id].maximum_linkrate = | ||
| 179 | rates->maximum_linkrate; | ||
| 180 | } | ||
| 181 | if (pm8001_ha->phy[phy_id].phy_state == 0) { | ||
| 182 | PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); | ||
| 183 | wait_for_completion(&completion); | ||
| 184 | } | ||
| 185 | PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, | ||
| 186 | PHY_LINK_RESET); | ||
| 187 | break; | ||
| 188 | case PHY_FUNC_HARD_RESET: | ||
| 189 | if (pm8001_ha->phy[phy_id].phy_state == 0) { | ||
| 190 | PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); | ||
| 191 | wait_for_completion(&completion); | ||
| 192 | } | ||
| 193 | PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, | ||
| 194 | PHY_HARD_RESET); | ||
| 195 | break; | ||
| 196 | case PHY_FUNC_LINK_RESET: | ||
| 197 | if (pm8001_ha->phy[phy_id].phy_state == 0) { | ||
| 198 | PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); | ||
| 199 | wait_for_completion(&completion); | ||
| 200 | } | ||
| 201 | PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, | ||
| 202 | PHY_LINK_RESET); | ||
| 203 | break; | ||
| 204 | case PHY_FUNC_RELEASE_SPINUP_HOLD: | ||
| 205 | PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, | ||
| 206 | PHY_LINK_RESET); | ||
| 207 | break; | ||
| 208 | case PHY_FUNC_DISABLE: | ||
| 209 | PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); | ||
| 210 | break; | ||
| 211 | default: | ||
| 212 | rc = -EOPNOTSUPP; | ||
| 213 | } | ||
| 214 | msleep(300); | ||
| 215 | return rc; | ||
| 216 | } | ||
| 217 | |||
| 218 | int pm8001_slave_alloc(struct scsi_device *scsi_dev) | ||
| 219 | { | ||
| 220 | struct domain_device *dev = sdev_to_domain_dev(scsi_dev); | ||
| 221 | if (dev_is_sata(dev)) { | ||
| 222 | /* We don't need to rescan targets | ||
| 223 | * if REPORT_LUNS request is failed | ||
| 224 | */ | ||
| 225 | if (scsi_dev->lun > 0) | ||
| 226 | return -ENXIO; | ||
| 227 | scsi_dev->tagged_supported = 1; | ||
| 228 | } | ||
| 229 | return sas_slave_alloc(scsi_dev); | ||
| 230 | } | ||
| 231 | |||
| 232 | /** | ||
| 233 | * pm8001_scan_start - we should enable all HBA phys by sending the phy_start | ||
| 234 | * command to HBA. | ||
| 235 | * @shost: the scsi host data. | ||
| 236 | */ | ||
| 237 | void pm8001_scan_start(struct Scsi_Host *shost) | ||
| 238 | { | ||
| 239 | int i; | ||
| 240 | struct pm8001_hba_info *pm8001_ha; | ||
| 241 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
| 242 | pm8001_ha = sha->lldd_ha; | ||
| 243 | PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); | ||
| 244 | for (i = 0; i < pm8001_ha->chip->n_phy; ++i) | ||
| 245 | PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); | ||
| 246 | } | ||
| 247 | |||
| 248 | int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) | ||
| 249 | { | ||
| 250 | /* give the phy enabling interrupt event time to come in (1s | ||
| 251 | * is empirically about all it takes) */ | ||
| 252 | if (time < HZ) | ||
| 253 | return 0; | ||
| 254 | /* Wait for discovery to finish */ | ||
| 255 | scsi_flush_work(shost); | ||
| 256 | return 1; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task | ||
| 261 | * @pm8001_ha: our hba card information | ||
| 262 | * @ccb: the ccb which attached to smp task | ||
| 263 | */ | ||
| 264 | static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, | ||
| 265 | struct pm8001_ccb_info *ccb) | ||
| 266 | { | ||
| 267 | return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); | ||
| 268 | } | ||
| 269 | |||
| 270 | u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) | ||
| 271 | { | ||
| 272 | struct ata_queued_cmd *qc = task->uldd_task; | ||
| 273 | if (qc) { | ||
| 274 | if (qc->tf.command == ATA_CMD_FPDMA_WRITE || | ||
| 275 | qc->tf.command == ATA_CMD_FPDMA_READ) { | ||
| 276 | *tag = qc->tag; | ||
| 277 | return 1; | ||
| 278 | } | ||
| 279 | } | ||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 284 | * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task | ||
| 285 | * @pm8001_ha: our hba card information | ||
| 286 | * @ccb: the ccb which attached to sata task | ||
| 287 | */ | ||
| 288 | static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, | ||
| 289 | struct pm8001_ccb_info *ccb) | ||
| 290 | { | ||
| 291 | return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); | ||
| 292 | } | ||
| 293 | |||
| 294 | /** | ||
| 295 | * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data | ||
| 296 | * @pm8001_ha: our hba card information | ||
| 297 | * @ccb: the ccb which attached to TM | ||
| 298 | * @tmf: the task management IU | ||
| 299 | */ | ||
| 300 | static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, | ||
| 301 | struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) | ||
| 302 | { | ||
| 303 | return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); | ||
| 304 | } | ||
| 305 | |||
| 306 | /** | ||
| 307 | * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task | ||
| 308 | * @pm8001_ha: our hba card information | ||
| 309 | * @ccb: the ccb which attached to ssp task | ||
| 310 | */ | ||
| 311 | static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, | ||
| 312 | struct pm8001_ccb_info *ccb) | ||
| 313 | { | ||
| 314 | return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); | ||
| 315 | } | ||
| 316 | int pm8001_slave_configure(struct scsi_device *sdev) | ||
| 317 | { | ||
| 318 | struct domain_device *dev = sdev_to_domain_dev(sdev); | ||
| 319 | int ret = sas_slave_configure(sdev); | ||
| 320 | if (ret) | ||
| 321 | return ret; | ||
| 322 | if (dev_is_sata(dev)) { | ||
| 323 | #ifdef PM8001_DISABLE_NCQ | ||
| 324 | struct ata_port *ap = dev->sata_dev.ap; | ||
| 325 | struct ata_device *adev = ap->link.device; | ||
| 326 | adev->flags |= ATA_DFLAG_NCQ_OFF; | ||
| 327 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); | ||
| 328 | #endif | ||
| 329 | } | ||
| 330 | return 0; | ||
| 331 | } | ||
| 332 | /** | ||
| 333 | * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. | ||
| 334 | * @task: the task to be execute. | ||
| 335 | * @num: if can_queue great than 1, the task can be queued up. for SMP task, | ||
| 336 | * we always execute one one time. | ||
| 337 | * @gfp_flags: gfp_flags. | ||
| 338 | * @is_tmf: if it is task management task. | ||
| 339 | * @tmf: the task management IU | ||
| 340 | */ | ||
| 341 | #define DEV_IS_GONE(pm8001_dev) \ | ||
| 342 | ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) | ||
| 343 | static int pm8001_task_exec(struct sas_task *task, const int num, | ||
| 344 | gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) | ||
| 345 | { | ||
| 346 | struct domain_device *dev = task->dev; | ||
| 347 | struct pm8001_hba_info *pm8001_ha; | ||
| 348 | struct pm8001_device *pm8001_dev; | ||
| 349 | struct sas_task *t = task; | ||
| 350 | struct pm8001_ccb_info *ccb; | ||
| 351 | u32 tag = 0xdeadbeef, rc, n_elem = 0; | ||
| 352 | u32 n = num; | ||
| 353 | unsigned long flags = 0; | ||
| 354 | |||
| 355 | if (!dev->port) { | ||
| 356 | struct task_status_struct *tsm = &t->task_status; | ||
| 357 | tsm->resp = SAS_TASK_UNDELIVERED; | ||
| 358 | tsm->stat = SAS_PHY_DOWN; | ||
| 359 | if (dev->dev_type != SATA_DEV) | ||
| 360 | t->task_done(t); | ||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | pm8001_ha = pm8001_find_ha_by_dev(task->dev); | ||
| 364 | PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); | ||
| 365 | spin_lock_irqsave(&pm8001_ha->lock, flags); | ||
| 366 | do { | ||
| 367 | dev = t->dev; | ||
| 368 | pm8001_dev = dev->lldd_dev; | ||
| 369 | if (DEV_IS_GONE(pm8001_dev)) { | ||
| 370 | if (pm8001_dev) { | ||
| 371 | PM8001_IO_DBG(pm8001_ha, | ||
| 372 | pm8001_printk("device %d not ready.\n", | ||
| 373 | pm8001_dev->device_id)); | ||
| 374 | } else { | ||
| 375 | PM8001_IO_DBG(pm8001_ha, | ||
| 376 | pm8001_printk("device %016llx not " | ||
| 377 | "ready.\n", SAS_ADDR(dev->sas_addr))); | ||
| 378 | } | ||
| 379 | rc = SAS_PHY_DOWN; | ||
| 380 | goto out_done; | ||
| 381 | } | ||
| 382 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 383 | if (rc) | ||
| 384 | goto err_out; | ||
| 385 | ccb = &pm8001_ha->ccb_info[tag]; | ||
| 386 | |||
| 387 | if (!sas_protocol_ata(t->task_proto)) { | ||
| 388 | if (t->num_scatter) { | ||
| 389 | n_elem = dma_map_sg(pm8001_ha->dev, | ||
| 390 | t->scatter, | ||
| 391 | t->num_scatter, | ||
| 392 | t->data_dir); | ||
| 393 | if (!n_elem) { | ||
| 394 | rc = -ENOMEM; | ||
| 395 | goto err_out_tag; | ||
| 396 | } | ||
| 397 | } | ||
| 398 | } else { | ||
| 399 | n_elem = t->num_scatter; | ||
| 400 | } | ||
| 401 | |||
| 402 | t->lldd_task = ccb; | ||
| 403 | ccb->n_elem = n_elem; | ||
| 404 | ccb->ccb_tag = tag; | ||
| 405 | ccb->task = t; | ||
| 406 | switch (t->task_proto) { | ||
| 407 | case SAS_PROTOCOL_SMP: | ||
| 408 | rc = pm8001_task_prep_smp(pm8001_ha, ccb); | ||
| 409 | break; | ||
| 410 | case SAS_PROTOCOL_SSP: | ||
| 411 | if (is_tmf) | ||
| 412 | rc = pm8001_task_prep_ssp_tm(pm8001_ha, | ||
| 413 | ccb, tmf); | ||
| 414 | else | ||
| 415 | rc = pm8001_task_prep_ssp(pm8001_ha, ccb); | ||
| 416 | break; | ||
| 417 | case SAS_PROTOCOL_SATA: | ||
| 418 | case SAS_PROTOCOL_STP: | ||
| 419 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
| 420 | rc = pm8001_task_prep_ata(pm8001_ha, ccb); | ||
| 421 | break; | ||
| 422 | default: | ||
| 423 | dev_printk(KERN_ERR, pm8001_ha->dev, | ||
| 424 | "unknown sas_task proto: 0x%x\n", | ||
| 425 | t->task_proto); | ||
| 426 | rc = -EINVAL; | ||
| 427 | break; | ||
| 428 | } | ||
| 429 | |||
| 430 | if (rc) { | ||
| 431 | PM8001_IO_DBG(pm8001_ha, | ||
| 432 | pm8001_printk("rc is %x\n", rc)); | ||
| 433 | goto err_out_tag; | ||
| 434 | } | ||
| 435 | /* TODO: select normal or high priority */ | ||
| 436 | spin_lock(&t->task_state_lock); | ||
| 437 | t->task_state_flags |= SAS_TASK_AT_INITIATOR; | ||
| 438 | spin_unlock(&t->task_state_lock); | ||
| 439 | pm8001_dev->running_req++; | ||
| 440 | if (n > 1) | ||
| 441 | t = list_entry(t->list.next, struct sas_task, list); | ||
| 442 | } while (--n); | ||
| 443 | rc = 0; | ||
| 444 | goto out_done; | ||
| 445 | |||
| 446 | err_out_tag: | ||
| 447 | pm8001_tag_free(pm8001_ha, tag); | ||
| 448 | err_out: | ||
| 449 | dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); | ||
| 450 | if (!sas_protocol_ata(t->task_proto)) | ||
| 451 | if (n_elem) | ||
| 452 | dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, | ||
| 453 | t->data_dir); | ||
| 454 | out_done: | ||
| 455 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 456 | return rc; | ||
| 457 | } | ||
| 458 | |||
| 459 | /** | ||
| 460 | * pm8001_queue_command - register for upper layer used, all IO commands sent | ||
| 461 | * to HBA are from this interface. | ||
| 462 | * @task: the task to be execute. | ||
| 463 | * @num: if can_queue great than 1, the task can be queued up. for SMP task, | ||
| 464 | * we always execute one one time | ||
| 465 | * @gfp_flags: gfp_flags | ||
| 466 | */ | ||
| 467 | int pm8001_queue_command(struct sas_task *task, const int num, | ||
| 468 | gfp_t gfp_flags) | ||
| 469 | { | ||
| 470 | return pm8001_task_exec(task, num, gfp_flags, 0, NULL); | ||
| 471 | } | ||
| 472 | |||
| 473 | void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx) | ||
| 474 | { | ||
| 475 | pm8001_tag_clear(pm8001_ha, ccb_idx); | ||
| 476 | } | ||
| 477 | |||
| 478 | /** | ||
| 479 | * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. | ||
| 480 | * @pm8001_ha: our hba card information | ||
| 481 | * @ccb: the ccb which attached to ssp task | ||
| 482 | * @task: the task to be free. | ||
| 483 | * @ccb_idx: ccb index. | ||
| 484 | */ | ||
| 485 | void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, | ||
| 486 | struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) | ||
| 487 | { | ||
| 488 | if (!ccb->task) | ||
| 489 | return; | ||
| 490 | if (!sas_protocol_ata(task->task_proto)) | ||
| 491 | if (ccb->n_elem) | ||
| 492 | dma_unmap_sg(pm8001_ha->dev, task->scatter, | ||
| 493 | task->num_scatter, task->data_dir); | ||
| 494 | |||
| 495 | switch (task->task_proto) { | ||
| 496 | case SAS_PROTOCOL_SMP: | ||
| 497 | dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, | ||
| 498 | PCI_DMA_FROMDEVICE); | ||
| 499 | dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, | ||
| 500 | PCI_DMA_TODEVICE); | ||
| 501 | break; | ||
| 502 | |||
| 503 | case SAS_PROTOCOL_SATA: | ||
| 504 | case SAS_PROTOCOL_STP: | ||
| 505 | case SAS_PROTOCOL_SSP: | ||
| 506 | default: | ||
| 507 | /* do nothing */ | ||
| 508 | break; | ||
| 509 | } | ||
| 510 | task->lldd_task = NULL; | ||
| 511 | ccb->task = NULL; | ||
| 512 | ccb->ccb_tag = 0xFFFFFFFF; | ||
| 513 | pm8001_ccb_free(pm8001_ha, ccb_idx); | ||
| 514 | } | ||
| 515 | |||
| 516 | /** | ||
| 517 | * pm8001_alloc_dev - find a empty pm8001_device | ||
| 518 | * @pm8001_ha: our hba card information | ||
| 519 | */ | ||
| 520 | struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) | ||
| 521 | { | ||
| 522 | u32 dev; | ||
| 523 | for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { | ||
| 524 | if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { | ||
| 525 | pm8001_ha->devices[dev].id = dev; | ||
| 526 | return &pm8001_ha->devices[dev]; | ||
| 527 | } | ||
| 528 | } | ||
| 529 | if (dev == PM8001_MAX_DEVICES) { | ||
| 530 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 531 | pm8001_printk("max support %d devices, ignore ..\n", | ||
| 532 | PM8001_MAX_DEVICES)); | ||
| 533 | } | ||
| 534 | return NULL; | ||
| 535 | } | ||
| 536 | |||
| 537 | static void pm8001_free_dev(struct pm8001_device *pm8001_dev) | ||
| 538 | { | ||
| 539 | u32 id = pm8001_dev->id; | ||
| 540 | memset(pm8001_dev, 0, sizeof(*pm8001_dev)); | ||
| 541 | pm8001_dev->id = id; | ||
| 542 | pm8001_dev->dev_type = NO_DEVICE; | ||
| 543 | pm8001_dev->device_id = PM8001_MAX_DEVICES; | ||
| 544 | pm8001_dev->sas_device = NULL; | ||
| 545 | } | ||
| 546 | |||
| 547 | /** | ||
| 548 | * pm8001_dev_found_notify - libsas notify a device is found. | ||
| 549 | * @dev: the device structure which sas layer used. | ||
| 550 | * | ||
| 551 | * when libsas find a sas domain device, it should tell the LLDD that | ||
| 552 | * device is found, and then LLDD register this device to HBA firmware | ||
| 553 | * by the command "OPC_INB_REG_DEV", after that the HBA will assign a | ||
| 554 | * device ID(according to device's sas address) and returned it to LLDD. From | ||
| 555 | * now on, we communicate with HBA FW with the device ID which HBA assigned | ||
| 556 | * rather than sas address. it is the neccessary step for our HBA but it is | ||
| 557 | * the optional for other HBA driver. | ||
| 558 | */ | ||
| 559 | static int pm8001_dev_found_notify(struct domain_device *dev) | ||
| 560 | { | ||
| 561 | unsigned long flags = 0; | ||
| 562 | int res = 0; | ||
| 563 | struct pm8001_hba_info *pm8001_ha = NULL; | ||
| 564 | struct domain_device *parent_dev = dev->parent; | ||
| 565 | struct pm8001_device *pm8001_device; | ||
| 566 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 567 | u32 flag = 0; | ||
| 568 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 569 | spin_lock_irqsave(&pm8001_ha->lock, flags); | ||
| 570 | |||
| 571 | pm8001_device = pm8001_alloc_dev(pm8001_ha); | ||
| 572 | pm8001_device->sas_device = dev; | ||
| 573 | if (!pm8001_device) { | ||
| 574 | res = -1; | ||
| 575 | goto found_out; | ||
| 576 | } | ||
| 577 | dev->lldd_dev = pm8001_device; | ||
| 578 | pm8001_device->dev_type = dev->dev_type; | ||
| 579 | pm8001_device->dcompletion = &completion; | ||
| 580 | if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { | ||
| 581 | int phy_id; | ||
| 582 | struct ex_phy *phy; | ||
| 583 | for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; | ||
| 584 | phy_id++) { | ||
| 585 | phy = &parent_dev->ex_dev.ex_phy[phy_id]; | ||
| 586 | if (SAS_ADDR(phy->attached_sas_addr) | ||
| 587 | == SAS_ADDR(dev->sas_addr)) { | ||
| 588 | pm8001_device->attached_phy = phy_id; | ||
| 589 | break; | ||
| 590 | } | ||
| 591 | } | ||
| 592 | if (phy_id == parent_dev->ex_dev.num_phys) { | ||
| 593 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 594 | pm8001_printk("Error: no attached dev:%016llx" | ||
| 595 | " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), | ||
| 596 | SAS_ADDR(parent_dev->sas_addr))); | ||
| 597 | res = -1; | ||
| 598 | } | ||
| 599 | } else { | ||
| 600 | if (dev->dev_type == SATA_DEV) { | ||
| 601 | pm8001_device->attached_phy = | ||
| 602 | dev->rphy->identify.phy_identifier; | ||
| 603 | flag = 1; /* directly sata*/ | ||
| 604 | } | ||
| 605 | } /*register this device to HBA*/ | ||
| 606 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n")); | ||
| 607 | PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); | ||
| 608 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 609 | wait_for_completion(&completion); | ||
| 610 | if (dev->dev_type == SAS_END_DEV) | ||
| 611 | msleep(50); | ||
| 612 | pm8001_ha->flags = PM8001F_RUN_TIME ; | ||
| 613 | return 0; | ||
| 614 | found_out: | ||
| 615 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 616 | return res; | ||
| 617 | } | ||
| 618 | |||
| 619 | int pm8001_dev_found(struct domain_device *dev) | ||
| 620 | { | ||
| 621 | return pm8001_dev_found_notify(dev); | ||
| 622 | } | ||
| 623 | |||
| 624 | /** | ||
| 625 | * pm8001_alloc_task - allocate a task structure for TMF | ||
| 626 | */ | ||
| 627 | static struct sas_task *pm8001_alloc_task(void) | ||
| 628 | { | ||
| 629 | struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); | ||
| 630 | if (task) { | ||
| 631 | INIT_LIST_HEAD(&task->list); | ||
| 632 | spin_lock_init(&task->task_state_lock); | ||
| 633 | task->task_state_flags = SAS_TASK_STATE_PENDING; | ||
| 634 | init_timer(&task->timer); | ||
| 635 | init_completion(&task->completion); | ||
| 636 | } | ||
| 637 | return task; | ||
| 638 | } | ||
| 639 | |||
| 640 | static void pm8001_free_task(struct sas_task *task) | ||
| 641 | { | ||
| 642 | if (task) { | ||
| 643 | BUG_ON(!list_empty(&task->list)); | ||
| 644 | kfree(task); | ||
| 645 | } | ||
| 646 | } | ||
| 647 | |||
| 648 | static void pm8001_task_done(struct sas_task *task) | ||
| 649 | { | ||
| 650 | if (!del_timer(&task->timer)) | ||
| 651 | return; | ||
| 652 | complete(&task->completion); | ||
| 653 | } | ||
| 654 | |||
| 655 | static void pm8001_tmf_timedout(unsigned long data) | ||
| 656 | { | ||
| 657 | struct sas_task *task = (struct sas_task *)data; | ||
| 658 | |||
| 659 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | ||
| 660 | complete(&task->completion); | ||
| 661 | } | ||
| 662 | |||
| 663 | #define PM8001_TASK_TIMEOUT 20 | ||
| 664 | /** | ||
| 665 | * pm8001_exec_internal_tmf_task - execute some task management commands. | ||
| 666 | * @dev: the wanted device. | ||
| 667 | * @tmf: which task management wanted to be take. | ||
| 668 | * @para_len: para_len. | ||
| 669 | * @parameter: ssp task parameter. | ||
| 670 | * | ||
| 671 | * when errors or exception happened, we may want to do something, for example | ||
| 672 | * abort the issued task which result in this execption, it is done by calling | ||
| 673 | * this function, note it is also with the task execute interface. | ||
| 674 | */ | ||
| 675 | static int pm8001_exec_internal_tmf_task(struct domain_device *dev, | ||
| 676 | void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) | ||
| 677 | { | ||
| 678 | int res, retry; | ||
| 679 | struct sas_task *task = NULL; | ||
| 680 | struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 681 | |||
| 682 | for (retry = 0; retry < 3; retry++) { | ||
| 683 | task = pm8001_alloc_task(); | ||
| 684 | if (!task) | ||
| 685 | return -ENOMEM; | ||
| 686 | |||
| 687 | task->dev = dev; | ||
| 688 | task->task_proto = dev->tproto; | ||
| 689 | memcpy(&task->ssp_task, parameter, para_len); | ||
| 690 | task->task_done = pm8001_task_done; | ||
| 691 | task->timer.data = (unsigned long)task; | ||
| 692 | task->timer.function = pm8001_tmf_timedout; | ||
| 693 | task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; | ||
| 694 | add_timer(&task->timer); | ||
| 695 | |||
| 696 | res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); | ||
| 697 | |||
| 698 | if (res) { | ||
| 699 | del_timer(&task->timer); | ||
| 700 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 701 | pm8001_printk("Executing internal task " | ||
| 702 | "failed\n")); | ||
| 703 | goto ex_err; | ||
| 704 | } | ||
| 705 | wait_for_completion(&task->completion); | ||
| 706 | res = -TMF_RESP_FUNC_FAILED; | ||
| 707 | /* Even TMF timed out, return direct. */ | ||
| 708 | if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
| 709 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { | ||
| 710 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 711 | pm8001_printk("TMF task[%x]timeout.\n", | ||
| 712 | tmf->tmf)); | ||
| 713 | goto ex_err; | ||
| 714 | } | ||
| 715 | } | ||
| 716 | |||
| 717 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 718 | task->task_status.stat == SAM_GOOD) { | ||
| 719 | res = TMF_RESP_FUNC_COMPLETE; | ||
| 720 | break; | ||
| 721 | } | ||
| 722 | |||
| 723 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 724 | task->task_status.stat == SAS_DATA_UNDERRUN) { | ||
| 725 | /* no error, but return the number of bytes of | ||
| 726 | * underrun */ | ||
| 727 | res = task->task_status.residual; | ||
| 728 | break; | ||
| 729 | } | ||
| 730 | |||
| 731 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 732 | task->task_status.stat == SAS_DATA_OVERRUN) { | ||
| 733 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 734 | pm8001_printk("Blocked task error.\n")); | ||
| 735 | res = -EMSGSIZE; | ||
| 736 | break; | ||
| 737 | } else { | ||
| 738 | PM8001_EH_DBG(pm8001_ha, | ||
| 739 | pm8001_printk(" Task to dev %016llx response:" | ||
| 740 | "0x%x status 0x%x\n", | ||
| 741 | SAS_ADDR(dev->sas_addr), | ||
| 742 | task->task_status.resp, | ||
| 743 | task->task_status.stat)); | ||
| 744 | pm8001_free_task(task); | ||
| 745 | task = NULL; | ||
| 746 | } | ||
| 747 | } | ||
| 748 | ex_err: | ||
| 749 | BUG_ON(retry == 3 && task != NULL); | ||
| 750 | if (task != NULL) | ||
| 751 | pm8001_free_task(task); | ||
| 752 | return res; | ||
| 753 | } | ||
| 754 | |||
| 755 | static int | ||
| 756 | pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, | ||
| 757 | struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, | ||
| 758 | u32 task_tag) | ||
| 759 | { | ||
| 760 | int res, retry; | ||
| 761 | u32 ccb_tag; | ||
| 762 | struct pm8001_ccb_info *ccb; | ||
| 763 | struct sas_task *task = NULL; | ||
| 764 | |||
| 765 | for (retry = 0; retry < 3; retry++) { | ||
| 766 | task = pm8001_alloc_task(); | ||
| 767 | if (!task) | ||
| 768 | return -ENOMEM; | ||
| 769 | |||
| 770 | task->dev = dev; | ||
| 771 | task->task_proto = dev->tproto; | ||
| 772 | task->task_done = pm8001_task_done; | ||
| 773 | task->timer.data = (unsigned long)task; | ||
| 774 | task->timer.function = pm8001_tmf_timedout; | ||
| 775 | task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; | ||
| 776 | add_timer(&task->timer); | ||
| 777 | |||
| 778 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); | ||
| 779 | if (res) | ||
| 780 | return res; | ||
| 781 | ccb = &pm8001_ha->ccb_info[ccb_tag]; | ||
| 782 | ccb->device = pm8001_dev; | ||
| 783 | ccb->ccb_tag = ccb_tag; | ||
| 784 | ccb->task = task; | ||
| 785 | |||
| 786 | res = PM8001_CHIP_DISP->task_abort(pm8001_ha, | ||
| 787 | pm8001_dev, flag, task_tag, ccb_tag); | ||
| 788 | |||
| 789 | if (res) { | ||
| 790 | del_timer(&task->timer); | ||
| 791 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 792 | pm8001_printk("Executing internal task " | ||
| 793 | "failed\n")); | ||
| 794 | goto ex_err; | ||
| 795 | } | ||
| 796 | wait_for_completion(&task->completion); | ||
| 797 | res = TMF_RESP_FUNC_FAILED; | ||
| 798 | /* Even TMF timed out, return direct. */ | ||
| 799 | if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
| 800 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { | ||
| 801 | PM8001_FAIL_DBG(pm8001_ha, | ||
| 802 | pm8001_printk("TMF task timeout.\n")); | ||
| 803 | goto ex_err; | ||
| 804 | } | ||
| 805 | } | ||
| 806 | |||
| 807 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 808 | task->task_status.stat == SAM_GOOD) { | ||
| 809 | res = TMF_RESP_FUNC_COMPLETE; | ||
| 810 | break; | ||
| 811 | |||
| 812 | } else { | ||
| 813 | PM8001_EH_DBG(pm8001_ha, | ||
| 814 | pm8001_printk(" Task to dev %016llx response: " | ||
| 815 | "0x%x status 0x%x\n", | ||
| 816 | SAS_ADDR(dev->sas_addr), | ||
| 817 | task->task_status.resp, | ||
| 818 | task->task_status.stat)); | ||
| 819 | pm8001_free_task(task); | ||
| 820 | task = NULL; | ||
| 821 | } | ||
| 822 | } | ||
| 823 | ex_err: | ||
| 824 | BUG_ON(retry == 3 && task != NULL); | ||
| 825 | if (task != NULL) | ||
| 826 | pm8001_free_task(task); | ||
| 827 | return res; | ||
| 828 | } | ||
| 829 | |||
| 830 | /** | ||
| 831 | * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" | ||
| 832 | * @dev: the device structure which sas layer used. | ||
| 833 | */ | ||
| 834 | static void pm8001_dev_gone_notify(struct domain_device *dev) | ||
| 835 | { | ||
| 836 | unsigned long flags = 0; | ||
| 837 | u32 tag; | ||
| 838 | struct pm8001_hba_info *pm8001_ha; | ||
| 839 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 840 | u32 device_id = pm8001_dev->device_id; | ||
| 841 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 842 | spin_lock_irqsave(&pm8001_ha->lock, flags); | ||
| 843 | pm8001_tag_alloc(pm8001_ha, &tag); | ||
| 844 | if (pm8001_dev) { | ||
| 845 | PM8001_DISC_DBG(pm8001_ha, | ||
| 846 | pm8001_printk("found dev[%d:%x] is gone.\n", | ||
| 847 | pm8001_dev->device_id, pm8001_dev->dev_type)); | ||
| 848 | if (pm8001_dev->running_req) { | ||
| 849 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 850 | pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , | ||
| 851 | dev, 1, 0); | ||
| 852 | spin_lock_irqsave(&pm8001_ha->lock, flags); | ||
| 853 | } | ||
| 854 | PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); | ||
| 855 | pm8001_free_dev(pm8001_dev); | ||
| 856 | } else { | ||
| 857 | PM8001_DISC_DBG(pm8001_ha, | ||
| 858 | pm8001_printk("Found dev has gone.\n")); | ||
| 859 | } | ||
| 860 | dev->lldd_dev = NULL; | ||
| 861 | spin_unlock_irqrestore(&pm8001_ha->lock, flags); | ||
| 862 | } | ||
| 863 | |||
| 864 | void pm8001_dev_gone(struct domain_device *dev) | ||
| 865 | { | ||
| 866 | pm8001_dev_gone_notify(dev); | ||
| 867 | } | ||
| 868 | |||
| 869 | static int pm8001_issue_ssp_tmf(struct domain_device *dev, | ||
| 870 | u8 *lun, struct pm8001_tmf_task *tmf) | ||
| 871 | { | ||
| 872 | struct sas_ssp_task ssp_task; | ||
| 873 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) | ||
| 874 | return TMF_RESP_FUNC_ESUPP; | ||
| 875 | |||
| 876 | strncpy((u8 *)&ssp_task.LUN, lun, 8); | ||
| 877 | return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), | ||
| 878 | tmf); | ||
| 879 | } | ||
| 880 | |||
| 881 | /** | ||
| 882 | * Standard mandates link reset for ATA (type 0) and hard reset for | ||
| 883 | * SSP (type 1) , only for RECOVERY | ||
| 884 | */ | ||
| 885 | int pm8001_I_T_nexus_reset(struct domain_device *dev) | ||
| 886 | { | ||
| 887 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 888 | struct pm8001_device *pm8001_dev; | ||
| 889 | struct pm8001_hba_info *pm8001_ha; | ||
| 890 | struct sas_phy *phy; | ||
| 891 | if (!dev || !dev->lldd_dev) | ||
| 892 | return -1; | ||
| 893 | |||
| 894 | pm8001_dev = dev->lldd_dev; | ||
| 895 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 896 | phy = sas_find_local_phy(dev); | ||
| 897 | |||
| 898 | if (dev_is_sata(dev)) { | ||
| 899 | DECLARE_COMPLETION_ONSTACK(completion_setstate); | ||
| 900 | rc = sas_phy_reset(phy, 1); | ||
| 901 | msleep(2000); | ||
| 902 | rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , | ||
| 903 | dev, 1, 0); | ||
| 904 | pm8001_dev->setds_completion = &completion_setstate; | ||
| 905 | rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, | ||
| 906 | pm8001_dev, 0x01); | ||
| 907 | wait_for_completion(&completion_setstate); | ||
| 908 | } else{ | ||
| 909 | rc = sas_phy_reset(phy, 1); | ||
| 910 | msleep(2000); | ||
| 911 | } | ||
| 912 | PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", | ||
| 913 | pm8001_dev->device_id, rc)); | ||
| 914 | return rc; | ||
| 915 | } | ||
| 916 | |||
| 917 | /* mandatory SAM-3, the task reset the specified LUN*/ | ||
| 918 | int pm8001_lu_reset(struct domain_device *dev, u8 *lun) | ||
| 919 | { | ||
| 920 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 921 | struct pm8001_tmf_task tmf_task; | ||
| 922 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 923 | struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 924 | if (dev_is_sata(dev)) { | ||
| 925 | struct sas_phy *phy = sas_find_local_phy(dev); | ||
| 926 | rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , | ||
| 927 | dev, 1, 0); | ||
| 928 | rc = sas_phy_reset(phy, 1); | ||
| 929 | rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, | ||
| 930 | pm8001_dev, 0x01); | ||
| 931 | msleep(2000); | ||
| 932 | } else { | ||
| 933 | tmf_task.tmf = TMF_LU_RESET; | ||
| 934 | rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); | ||
| 935 | } | ||
| 936 | /* If failed, fall-through I_T_Nexus reset */ | ||
| 937 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", | ||
| 938 | pm8001_dev->device_id, rc)); | ||
| 939 | return rc; | ||
| 940 | } | ||
| 941 | |||
| 942 | /* optional SAM-3 */ | ||
| 943 | int pm8001_query_task(struct sas_task *task) | ||
| 944 | { | ||
| 945 | u32 tag = 0xdeadbeef; | ||
| 946 | int i = 0; | ||
| 947 | struct scsi_lun lun; | ||
| 948 | struct pm8001_tmf_task tmf_task; | ||
| 949 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 950 | if (unlikely(!task || !task->lldd_task || !task->dev)) | ||
| 951 | return rc; | ||
| 952 | |||
| 953 | if (task->task_proto & SAS_PROTOCOL_SSP) { | ||
| 954 | struct scsi_cmnd *cmnd = task->uldd_task; | ||
| 955 | struct domain_device *dev = task->dev; | ||
| 956 | struct pm8001_hba_info *pm8001_ha = | ||
| 957 | pm8001_find_ha_by_dev(dev); | ||
| 958 | |||
| 959 | int_to_scsilun(cmnd->device->lun, &lun); | ||
| 960 | rc = pm8001_find_tag(task, &tag); | ||
| 961 | if (rc == 0) { | ||
| 962 | rc = TMF_RESP_FUNC_FAILED; | ||
| 963 | return rc; | ||
| 964 | } | ||
| 965 | PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); | ||
| 966 | for (i = 0; i < 16; i++) | ||
| 967 | printk(KERN_INFO "%02x ", cmnd->cmnd[i]); | ||
| 968 | printk(KERN_INFO "]\n"); | ||
| 969 | tmf_task.tmf = TMF_QUERY_TASK; | ||
| 970 | tmf_task.tag_of_task_to_be_managed = tag; | ||
| 971 | |||
| 972 | rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); | ||
| 973 | switch (rc) { | ||
| 974 | /* The task is still in Lun, release it then */ | ||
| 975 | case TMF_RESP_FUNC_SUCC: | ||
| 976 | PM8001_EH_DBG(pm8001_ha, | ||
| 977 | pm8001_printk("The task is still in Lun \n")); | ||
| 978 | /* The task is not in Lun or failed, reset the phy */ | ||
| 979 | case TMF_RESP_FUNC_FAILED: | ||
| 980 | case TMF_RESP_FUNC_COMPLETE: | ||
| 981 | PM8001_EH_DBG(pm8001_ha, | ||
| 982 | pm8001_printk("The task is not in Lun or failed," | ||
| 983 | " reset the phy \n")); | ||
| 984 | break; | ||
| 985 | } | ||
| 986 | } | ||
| 987 | pm8001_printk(":rc= %d\n", rc); | ||
| 988 | return rc; | ||
| 989 | } | ||
| 990 | |||
| 991 | /* mandatory SAM-3, still need free task/ccb info, abord the specified task */ | ||
| 992 | int pm8001_abort_task(struct sas_task *task) | ||
| 993 | { | ||
| 994 | unsigned long flags; | ||
| 995 | u32 tag = 0xdeadbeef; | ||
| 996 | u32 device_id; | ||
| 997 | struct domain_device *dev ; | ||
| 998 | struct pm8001_hba_info *pm8001_ha = NULL; | ||
| 999 | struct pm8001_ccb_info *ccb; | ||
| 1000 | struct scsi_lun lun; | ||
| 1001 | struct pm8001_device *pm8001_dev; | ||
| 1002 | struct pm8001_tmf_task tmf_task; | ||
| 1003 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 1004 | if (unlikely(!task || !task->lldd_task || !task->dev)) | ||
| 1005 | return rc; | ||
| 1006 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
| 1007 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | ||
| 1008 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1009 | rc = TMF_RESP_FUNC_COMPLETE; | ||
| 1010 | goto out; | ||
| 1011 | } | ||
| 1012 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1013 | if (task->task_proto & SAS_PROTOCOL_SSP) { | ||
| 1014 | struct scsi_cmnd *cmnd = task->uldd_task; | ||
| 1015 | dev = task->dev; | ||
| 1016 | ccb = task->lldd_task; | ||
| 1017 | pm8001_dev = dev->lldd_dev; | ||
| 1018 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 1019 | int_to_scsilun(cmnd->device->lun, &lun); | ||
| 1020 | rc = pm8001_find_tag(task, &tag); | ||
| 1021 | if (rc == 0) { | ||
| 1022 | printk(KERN_INFO "No such tag in %s\n", __func__); | ||
| 1023 | rc = TMF_RESP_FUNC_FAILED; | ||
| 1024 | return rc; | ||
| 1025 | } | ||
| 1026 | device_id = pm8001_dev->device_id; | ||
| 1027 | PM8001_EH_DBG(pm8001_ha, | ||
| 1028 | pm8001_printk("abort io to deviceid= %d\n", device_id)); | ||
| 1029 | tmf_task.tmf = TMF_ABORT_TASK; | ||
| 1030 | tmf_task.tag_of_task_to_be_managed = tag; | ||
| 1031 | rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); | ||
| 1032 | pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, | ||
| 1033 | pm8001_dev->sas_device, 0, tag); | ||
| 1034 | } else if (task->task_proto & SAS_PROTOCOL_SATA || | ||
| 1035 | task->task_proto & SAS_PROTOCOL_STP) { | ||
| 1036 | dev = task->dev; | ||
| 1037 | pm8001_dev = dev->lldd_dev; | ||
| 1038 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 1039 | rc = pm8001_find_tag(task, &tag); | ||
| 1040 | if (rc == 0) { | ||
| 1041 | printk(KERN_INFO "No such tag in %s\n", __func__); | ||
| 1042 | rc = TMF_RESP_FUNC_FAILED; | ||
| 1043 | return rc; | ||
| 1044 | } | ||
| 1045 | rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, | ||
| 1046 | pm8001_dev->sas_device, 0, tag); | ||
| 1047 | } else if (task->task_proto & SAS_PROTOCOL_SMP) { | ||
| 1048 | /* SMP */ | ||
| 1049 | dev = task->dev; | ||
| 1050 | pm8001_dev = dev->lldd_dev; | ||
| 1051 | pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 1052 | rc = pm8001_find_tag(task, &tag); | ||
| 1053 | if (rc == 0) { | ||
| 1054 | printk(KERN_INFO "No such tag in %s\n", __func__); | ||
| 1055 | rc = TMF_RESP_FUNC_FAILED; | ||
| 1056 | return rc; | ||
| 1057 | } | ||
| 1058 | rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, | ||
| 1059 | pm8001_dev->sas_device, 0, tag); | ||
| 1060 | |||
| 1061 | } | ||
| 1062 | out: | ||
| 1063 | if (rc != TMF_RESP_FUNC_COMPLETE) | ||
| 1064 | pm8001_printk("rc= %d\n", rc); | ||
| 1065 | return rc; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) | ||
| 1069 | { | ||
| 1070 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 1071 | struct pm8001_tmf_task tmf_task; | ||
| 1072 | |||
| 1073 | tmf_task.tmf = TMF_ABORT_TASK_SET; | ||
| 1074 | rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); | ||
| 1075 | return rc; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | int pm8001_clear_aca(struct domain_device *dev, u8 *lun) | ||
| 1079 | { | ||
| 1080 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 1081 | struct pm8001_tmf_task tmf_task; | ||
| 1082 | |||
| 1083 | tmf_task.tmf = TMF_CLEAR_ACA; | ||
| 1084 | rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); | ||
| 1085 | |||
| 1086 | return rc; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) | ||
| 1090 | { | ||
| 1091 | int rc = TMF_RESP_FUNC_FAILED; | ||
| 1092 | struct pm8001_tmf_task tmf_task; | ||
| 1093 | struct pm8001_device *pm8001_dev = dev->lldd_dev; | ||
| 1094 | struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); | ||
| 1095 | |||
| 1096 | PM8001_EH_DBG(pm8001_ha, | ||
| 1097 | pm8001_printk("I_T_L_Q clear task set[%x]\n", | ||
| 1098 | pm8001_dev->device_id)); | ||
| 1099 | tmf_task.tmf = TMF_CLEAR_TASK_SET; | ||
| 1100 | rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); | ||
| 1101 | return rc; | ||
| 1102 | } | ||
| 1103 | |||
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h new file mode 100644 index 000000000000..30f2ede55a75 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.h | |||
| @@ -0,0 +1,481 @@ | |||
| 1 | /* | ||
| 2 | * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008-2009 USI Co., Ltd. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions | ||
| 9 | * are met: | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions, and the following disclaimer, | ||
| 12 | * without modification. | ||
| 13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 14 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 15 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 16 | * including a substantially similar Disclaimer requirement for further | ||
| 17 | * binary redistribution. | ||
| 18 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 19 | * of any contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * Alternatively, this software may be distributed under the terms of the | ||
| 23 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 24 | * Software Foundation. | ||
| 25 | * | ||
| 26 | * NO WARRANTY | ||
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 31 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 32 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 33 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 34 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 35 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 36 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 37 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 38 | * | ||
| 39 | */ | ||
| 40 | |||
| 41 | #ifndef _PM8001_SAS_H_ | ||
| 42 | #define _PM8001_SAS_H_ | ||
| 43 | |||
| 44 | #include <linux/kernel.h> | ||
| 45 | #include <linux/module.h> | ||
| 46 | #include <linux/spinlock.h> | ||
| 47 | #include <linux/delay.h> | ||
| 48 | #include <linux/types.h> | ||
| 49 | #include <linux/ctype.h> | ||
| 50 | #include <linux/dma-mapping.h> | ||
| 51 | #include <linux/pci.h> | ||
| 52 | #include <linux/interrupt.h> | ||
| 53 | #include <linux/smp_lock.h> | ||
| 54 | #include <scsi/libsas.h> | ||
| 55 | #include <scsi/scsi_tcq.h> | ||
| 56 | #include <scsi/sas_ata.h> | ||
| 57 | #include <asm/atomic.h> | ||
| 58 | #include "pm8001_defs.h" | ||
| 59 | |||
| 60 | #define DRV_NAME "pm8001" | ||
| 61 | #define DRV_VERSION "0.1.36" | ||
| 62 | #define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */ | ||
| 63 | #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ | ||
| 64 | #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ | ||
| 65 | #define PM8001_IO_LOGGING 0x08 /* I/O path logging */ | ||
| 66 | #define PM8001_EH_LOGGING 0x10 /* Error message logging */ | ||
| 67 | #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ | ||
| 68 | #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ | ||
| 69 | #define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ | ||
| 70 | __func__, __LINE__, ## arg) | ||
| 71 | #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ | ||
| 72 | do { \ | ||
| 73 | if (unlikely(HBA->logging_level & LEVEL)) \ | ||
| 74 | do { \ | ||
| 75 | CMD; \ | ||
| 76 | } while (0); \ | ||
| 77 | } while (0); | ||
| 78 | |||
| 79 | #define PM8001_EH_DBG(HBA, CMD) \ | ||
| 80 | PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD) | ||
| 81 | |||
| 82 | #define PM8001_INIT_DBG(HBA, CMD) \ | ||
| 83 | PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD) | ||
| 84 | |||
| 85 | #define PM8001_DISC_DBG(HBA, CMD) \ | ||
| 86 | PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD) | ||
| 87 | |||
| 88 | #define PM8001_IO_DBG(HBA, CMD) \ | ||
| 89 | PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD) | ||
| 90 | |||
| 91 | #define PM8001_FAIL_DBG(HBA, CMD) \ | ||
| 92 | PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD) | ||
| 93 | |||
| 94 | #define PM8001_IOCTL_DBG(HBA, CMD) \ | ||
| 95 | PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD) | ||
| 96 | |||
| 97 | #define PM8001_MSG_DBG(HBA, CMD) \ | ||
| 98 | PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD) | ||
| 99 | |||
| 100 | |||
| 101 | #define PM8001_USE_TASKLET | ||
| 102 | #define PM8001_USE_MSIX | ||
| 103 | |||
| 104 | |||
| 105 | #define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) | ||
| 106 | |||
| 107 | #define PM8001_NAME_LENGTH 32/* generic length of strings */ | ||
| 108 | extern struct list_head hba_list; | ||
| 109 | extern const struct pm8001_dispatch pm8001_8001_dispatch; | ||
| 110 | |||
| 111 | struct pm8001_hba_info; | ||
| 112 | struct pm8001_ccb_info; | ||
| 113 | struct pm8001_device; | ||
| 114 | struct pm8001_tmf_task; | ||
| 115 | struct pm8001_dispatch { | ||
| 116 | char *name; | ||
| 117 | int (*chip_init)(struct pm8001_hba_info *pm8001_ha); | ||
| 118 | int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); | ||
| 119 | void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); | ||
| 120 | int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); | ||
| 121 | void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); | ||
| 122 | irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); | ||
| 123 | u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); | ||
| 124 | int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); | ||
| 125 | void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); | ||
| 126 | void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); | ||
| 127 | void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); | ||
| 128 | int (*smp_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 129 | struct pm8001_ccb_info *ccb); | ||
| 130 | int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 131 | struct pm8001_ccb_info *ccb); | ||
| 132 | int (*sata_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 133 | struct pm8001_ccb_info *ccb); | ||
| 134 | int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); | ||
| 135 | int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); | ||
| 136 | int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 137 | struct pm8001_device *pm8001_dev, u32 flag); | ||
| 138 | int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id); | ||
| 139 | int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 140 | u32 phy_id, u32 phy_op); | ||
| 141 | int (*task_abort)(struct pm8001_hba_info *pm8001_ha, | ||
| 142 | struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, | ||
| 143 | u32 cmd_tag); | ||
| 144 | int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 145 | struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf); | ||
| 146 | int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); | ||
| 147 | int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); | ||
| 148 | int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 149 | void *payload); | ||
| 150 | int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 151 | struct pm8001_device *pm8001_dev, u32 state); | ||
| 152 | int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 153 | u32 state); | ||
| 154 | int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha, | ||
| 155 | u32 state); | ||
| 156 | int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); | ||
| 157 | }; | ||
| 158 | |||
| 159 | struct pm8001_chip_info { | ||
| 160 | u32 n_phy; | ||
| 161 | const struct pm8001_dispatch *dispatch; | ||
| 162 | }; | ||
| 163 | #define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch) | ||
| 164 | |||
| 165 | struct pm8001_port { | ||
| 166 | struct asd_sas_port sas_port; | ||
| 167 | }; | ||
| 168 | |||
| 169 | struct pm8001_phy { | ||
| 170 | struct pm8001_hba_info *pm8001_ha; | ||
| 171 | struct pm8001_port *port; | ||
| 172 | struct asd_sas_phy sas_phy; | ||
| 173 | struct sas_identify identify; | ||
| 174 | struct scsi_device *sdev; | ||
| 175 | u64 dev_sas_addr; | ||
| 176 | u32 phy_type; | ||
| 177 | struct completion *enable_completion; | ||
| 178 | u32 frame_rcvd_size; | ||
| 179 | u8 frame_rcvd[32]; | ||
| 180 | u8 phy_attached; | ||
| 181 | u8 phy_state; | ||
| 182 | enum sas_linkrate minimum_linkrate; | ||
| 183 | enum sas_linkrate maximum_linkrate; | ||
| 184 | }; | ||
| 185 | |||
| 186 | struct pm8001_device { | ||
| 187 | enum sas_dev_type dev_type; | ||
| 188 | struct domain_device *sas_device; | ||
| 189 | u32 attached_phy; | ||
| 190 | u32 id; | ||
| 191 | struct completion *dcompletion; | ||
| 192 | struct completion *setds_completion; | ||
| 193 | u32 device_id; | ||
| 194 | u32 running_req; | ||
| 195 | }; | ||
| 196 | |||
| 197 | struct pm8001_prd_imt { | ||
| 198 | __le32 len; | ||
| 199 | __le32 e; | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct pm8001_prd { | ||
| 203 | __le64 addr; /* 64-bit buffer address */ | ||
| 204 | struct pm8001_prd_imt im_len; /* 64-bit length */ | ||
| 205 | } __attribute__ ((packed)); | ||
| 206 | /* | ||
| 207 | * CCB(Command Control Block) | ||
| 208 | */ | ||
| 209 | struct pm8001_ccb_info { | ||
| 210 | struct list_head entry; | ||
| 211 | struct sas_task *task; | ||
| 212 | u32 n_elem; | ||
| 213 | u32 ccb_tag; | ||
| 214 | dma_addr_t ccb_dma_handle; | ||
| 215 | struct pm8001_device *device; | ||
| 216 | struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG]; | ||
| 217 | struct fw_control_ex *fw_control_context; | ||
| 218 | }; | ||
| 219 | |||
| 220 | struct mpi_mem { | ||
| 221 | void *virt_ptr; | ||
| 222 | dma_addr_t phys_addr; | ||
| 223 | u32 phys_addr_hi; | ||
| 224 | u32 phys_addr_lo; | ||
| 225 | u32 total_len; | ||
| 226 | u32 num_elements; | ||
| 227 | u32 element_size; | ||
| 228 | u32 alignment; | ||
| 229 | }; | ||
| 230 | |||
| 231 | struct mpi_mem_req { | ||
| 232 | /* The number of element in the mpiMemory array */ | ||
| 233 | u32 count; | ||
| 234 | /* The array of structures that define memroy regions*/ | ||
| 235 | struct mpi_mem region[USI_MAX_MEMCNT]; | ||
| 236 | }; | ||
| 237 | |||
| 238 | struct main_cfg_table { | ||
| 239 | u32 signature; | ||
| 240 | u32 interface_rev; | ||
| 241 | u32 firmware_rev; | ||
| 242 | u32 max_out_io; | ||
| 243 | u32 max_sgl; | ||
| 244 | u32 ctrl_cap_flag; | ||
| 245 | u32 gst_offset; | ||
| 246 | u32 inbound_queue_offset; | ||
| 247 | u32 outbound_queue_offset; | ||
| 248 | u32 inbound_q_nppd_hppd; | ||
| 249 | u32 outbound_hw_event_pid0_3; | ||
| 250 | u32 outbound_hw_event_pid4_7; | ||
| 251 | u32 outbound_ncq_event_pid0_3; | ||
| 252 | u32 outbound_ncq_event_pid4_7; | ||
| 253 | u32 outbound_tgt_ITNexus_event_pid0_3; | ||
| 254 | u32 outbound_tgt_ITNexus_event_pid4_7; | ||
| 255 | u32 outbound_tgt_ssp_event_pid0_3; | ||
| 256 | u32 outbound_tgt_ssp_event_pid4_7; | ||
| 257 | u32 outbound_tgt_smp_event_pid0_3; | ||
| 258 | u32 outbound_tgt_smp_event_pid4_7; | ||
| 259 | u32 upper_event_log_addr; | ||
| 260 | u32 lower_event_log_addr; | ||
| 261 | u32 event_log_size; | ||
| 262 | u32 event_log_option; | ||
| 263 | u32 upper_iop_event_log_addr; | ||
| 264 | u32 lower_iop_event_log_addr; | ||
| 265 | u32 iop_event_log_size; | ||
| 266 | u32 iop_event_log_option; | ||
| 267 | u32 fatal_err_interrupt; | ||
| 268 | u32 fatal_err_dump_offset0; | ||
| 269 | u32 fatal_err_dump_length0; | ||
| 270 | u32 fatal_err_dump_offset1; | ||
| 271 | u32 fatal_err_dump_length1; | ||
| 272 | u32 hda_mode_flag; | ||
| 273 | u32 anolog_setup_table_offset; | ||
| 274 | }; | ||
| 275 | struct general_status_table { | ||
| 276 | u32 gst_len_mpistate; | ||
| 277 | u32 iq_freeze_state0; | ||
| 278 | u32 iq_freeze_state1; | ||
| 279 | u32 msgu_tcnt; | ||
| 280 | u32 iop_tcnt; | ||
| 281 | u32 reserved; | ||
| 282 | u32 phy_state[8]; | ||
| 283 | u32 reserved1; | ||
| 284 | u32 reserved2; | ||
| 285 | u32 reserved3; | ||
| 286 | u32 recover_err_info[8]; | ||
| 287 | }; | ||
| 288 | struct inbound_queue_table { | ||
| 289 | u32 element_pri_size_cnt; | ||
| 290 | u32 upper_base_addr; | ||
| 291 | u32 lower_base_addr; | ||
| 292 | u32 ci_upper_base_addr; | ||
| 293 | u32 ci_lower_base_addr; | ||
| 294 | u32 pi_pci_bar; | ||
| 295 | u32 pi_offset; | ||
| 296 | u32 total_length; | ||
| 297 | void *base_virt; | ||
| 298 | void *ci_virt; | ||
| 299 | u32 reserved; | ||
| 300 | __le32 consumer_index; | ||
| 301 | u32 producer_idx; | ||
| 302 | }; | ||
| 303 | struct outbound_queue_table { | ||
| 304 | u32 element_size_cnt; | ||
| 305 | u32 upper_base_addr; | ||
| 306 | u32 lower_base_addr; | ||
| 307 | void *base_virt; | ||
| 308 | u32 pi_upper_base_addr; | ||
| 309 | u32 pi_lower_base_addr; | ||
| 310 | u32 ci_pci_bar; | ||
| 311 | u32 ci_offset; | ||
| 312 | u32 total_length; | ||
| 313 | void *pi_virt; | ||
| 314 | u32 interrup_vec_cnt_delay; | ||
| 315 | u32 dinterrup_to_pci_offset; | ||
| 316 | __le32 producer_index; | ||
| 317 | u32 consumer_idx; | ||
| 318 | }; | ||
| 319 | struct pm8001_hba_memspace { | ||
| 320 | void __iomem *memvirtaddr; | ||
| 321 | u64 membase; | ||
| 322 | u32 memsize; | ||
| 323 | }; | ||
| 324 | struct pm8001_hba_info { | ||
| 325 | char name[PM8001_NAME_LENGTH]; | ||
| 326 | struct list_head list; | ||
| 327 | unsigned long flags; | ||
| 328 | spinlock_t lock;/* host-wide lock */ | ||
| 329 | struct pci_dev *pdev;/* our device */ | ||
| 330 | struct device *dev; | ||
| 331 | struct pm8001_hba_memspace io_mem[6]; | ||
| 332 | struct mpi_mem_req memoryMap; | ||
| 333 | void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ | ||
| 334 | void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ | ||
| 335 | void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ | ||
| 336 | void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ | ||
| 337 | void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ | ||
| 338 | struct main_cfg_table main_cfg_tbl; | ||
| 339 | struct general_status_table gs_tbl; | ||
| 340 | struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; | ||
| 341 | struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; | ||
| 342 | u8 sas_addr[SAS_ADDR_SIZE]; | ||
| 343 | struct sas_ha_struct *sas;/* SCSI/SAS glue */ | ||
| 344 | struct Scsi_Host *shost; | ||
| 345 | u32 chip_id; | ||
| 346 | const struct pm8001_chip_info *chip; | ||
| 347 | struct completion *nvmd_completion; | ||
| 348 | int tags_num; | ||
| 349 | unsigned long *tags; | ||
| 350 | struct pm8001_phy phy[PM8001_MAX_PHYS]; | ||
| 351 | struct pm8001_port port[PM8001_MAX_PHYS]; | ||
| 352 | u32 id; | ||
| 353 | u32 irq; | ||
| 354 | struct pm8001_device *devices; | ||
| 355 | struct pm8001_ccb_info *ccb_info; | ||
| 356 | #ifdef PM8001_USE_MSIX | ||
| 357 | struct msix_entry msix_entries[16];/*for msi-x interrupt*/ | ||
| 358 | int number_of_intr;/*will be used in remove()*/ | ||
| 359 | #endif | ||
| 360 | #ifdef PM8001_USE_TASKLET | ||
| 361 | struct tasklet_struct tasklet; | ||
| 362 | #endif | ||
| 363 | struct list_head wq_list; | ||
| 364 | u32 logging_level; | ||
| 365 | u32 fw_status; | ||
| 366 | const struct firmware *fw_image; | ||
| 367 | }; | ||
| 368 | |||
| 369 | struct pm8001_wq { | ||
| 370 | struct delayed_work work_q; | ||
| 371 | struct pm8001_hba_info *pm8001_ha; | ||
| 372 | void *data; | ||
| 373 | int handler; | ||
| 374 | struct list_head entry; | ||
| 375 | }; | ||
| 376 | |||
| 377 | struct pm8001_fw_image_header { | ||
| 378 | u8 vender_id[8]; | ||
| 379 | u8 product_id; | ||
| 380 | u8 hardware_rev; | ||
| 381 | u8 dest_partition; | ||
| 382 | u8 reserved; | ||
| 383 | u8 fw_rev[4]; | ||
| 384 | __be32 image_length; | ||
| 385 | __be32 image_crc; | ||
| 386 | __be32 startup_entry; | ||
| 387 | } __attribute__((packed, aligned(4))); | ||
| 388 | |||
| 389 | /* define task management IU */ | ||
| 390 | struct pm8001_tmf_task { | ||
| 391 | u8 tmf; | ||
| 392 | u32 tag_of_task_to_be_managed; | ||
| 393 | }; | ||
| 394 | /** | ||
| 395 | * FW Flash Update status values | ||
| 396 | */ | ||
| 397 | #define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00 | ||
| 398 | #define FLASH_UPDATE_IN_PROGRESS 0x01 | ||
| 399 | #define FLASH_UPDATE_HDR_ERR 0x02 | ||
| 400 | #define FLASH_UPDATE_OFFSET_ERR 0x03 | ||
| 401 | #define FLASH_UPDATE_CRC_ERR 0x04 | ||
| 402 | #define FLASH_UPDATE_LENGTH_ERR 0x05 | ||
| 403 | #define FLASH_UPDATE_HW_ERR 0x06 | ||
| 404 | #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 | ||
| 405 | #define FLASH_UPDATE_DISABLED 0x11 | ||
| 406 | |||
| 407 | /** | ||
| 408 | * brief param structure for firmware flash update. | ||
| 409 | */ | ||
| 410 | struct fw_flash_updata_info { | ||
| 411 | u32 cur_image_offset; | ||
| 412 | u32 cur_image_len; | ||
| 413 | u32 total_image_len; | ||
| 414 | struct pm8001_prd sgl; | ||
| 415 | }; | ||
| 416 | |||
| 417 | struct fw_control_info { | ||
| 418 | u32 retcode;/*ret code (status)*/ | ||
| 419 | u32 phase;/*ret code phase*/ | ||
| 420 | u32 phaseCmplt;/*percent complete for the current | ||
| 421 | update phase */ | ||
| 422 | u32 version;/*Hex encoded firmware version number*/ | ||
| 423 | u32 offset;/*Used for downloading firmware */ | ||
| 424 | u32 len; /*len of buffer*/ | ||
| 425 | u32 size;/* Used in OS VPD and Trace get size | ||
| 426 | operations.*/ | ||
| 427 | u32 reserved;/* padding required for 64 bit | ||
| 428 | alignment */ | ||
| 429 | u8 buffer[1];/* Start of buffer */ | ||
| 430 | }; | ||
| 431 | struct fw_control_ex { | ||
| 432 | struct fw_control_info *fw_control; | ||
| 433 | void *buffer;/* keep buffer pointer to be | ||
| 434 | freed when the responce comes*/ | ||
| 435 | void *virtAddr;/* keep virtual address of the data */ | ||
| 436 | void *usrAddr;/* keep virtual address of the | ||
| 437 | user data */ | ||
| 438 | dma_addr_t phys_addr; | ||
| 439 | u32 len; /* len of buffer */ | ||
| 440 | void *payload; /* pointer to IOCTL Payload */ | ||
| 441 | u8 inProgress;/*if 1 - the IOCTL request is in | ||
| 442 | progress */ | ||
| 443 | void *param1; | ||
| 444 | void *param2; | ||
| 445 | void *param3; | ||
| 446 | }; | ||
| 447 | |||
| 448 | /******************** function prototype *********************/ | ||
| 449 | int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); | ||
| 450 | void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); | ||
| 451 | u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); | ||
| 452 | void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx); | ||
| 453 | void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, | ||
| 454 | struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); | ||
| 455 | int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
| 456 | void *funcdata); | ||
| 457 | int pm8001_slave_alloc(struct scsi_device *scsi_dev); | ||
| 458 | int pm8001_slave_configure(struct scsi_device *sdev); | ||
| 459 | void pm8001_scan_start(struct Scsi_Host *shost); | ||
| 460 | int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); | ||
| 461 | int pm8001_queue_command(struct sas_task *task, const int num, | ||
| 462 | gfp_t gfp_flags); | ||
| 463 | int pm8001_abort_task(struct sas_task *task); | ||
| 464 | int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); | ||
| 465 | int pm8001_clear_aca(struct domain_device *dev, u8 *lun); | ||
| 466 | int pm8001_clear_task_set(struct domain_device *dev, u8 *lun); | ||
| 467 | int pm8001_dev_found(struct domain_device *dev); | ||
| 468 | void pm8001_dev_gone(struct domain_device *dev); | ||
| 469 | int pm8001_lu_reset(struct domain_device *dev, u8 *lun); | ||
| 470 | int pm8001_I_T_nexus_reset(struct domain_device *dev); | ||
| 471 | int pm8001_query_task(struct sas_task *task); | ||
| 472 | int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, | ||
| 473 | dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, | ||
| 474 | u32 mem_size, u32 align); | ||
| 475 | |||
| 476 | |||
| 477 | /* ctl shared API */ | ||
| 478 | extern struct device_attribute *pm8001_host_attrs[]; | ||
| 479 | |||
| 480 | #endif | ||
| 481 | |||
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 0a97bc9074bb..34c6b896a91b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -278,12 +278,17 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) | |||
| 278 | * pmcraid_change_queue_depth - Change the device's queue depth | 278 | * pmcraid_change_queue_depth - Change the device's queue depth |
| 279 | * @scsi_dev: scsi device struct | 279 | * @scsi_dev: scsi device struct |
| 280 | * @depth: depth to set | 280 | * @depth: depth to set |
| 281 | * @reason: calling context | ||
| 281 | * | 282 | * |
| 282 | * Return value | 283 | * Return value |
| 283 | * actual depth set | 284 | * actual depth set |
| 284 | */ | 285 | */ |
| 285 | static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) | 286 | static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, |
| 287 | int reason) | ||
| 286 | { | 288 | { |
| 289 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 290 | return -EOPNOTSUPP; | ||
| 291 | |||
| 287 | if (depth > PMCRAID_MAX_CMD_PER_LUN) | 292 | if (depth > PMCRAID_MAX_CMD_PER_LUN) |
| 288 | depth = PMCRAID_MAX_CMD_PER_LUN; | 293 | depth = PMCRAID_MAX_CMD_PER_LUN; |
| 289 | 294 | ||
| @@ -3342,7 +3347,7 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) | |||
| 3342 | * @direction : data transfer direction | 3347 | * @direction : data transfer direction |
| 3343 | * | 3348 | * |
| 3344 | * Return value | 3349 | * Return value |
| 3345 | * 0 on sucess, non-zero error code on failure | 3350 | * 0 on success, non-zero error code on failure |
| 3346 | */ | 3351 | */ |
| 3347 | static int pmcraid_build_passthrough_ioadls( | 3352 | static int pmcraid_build_passthrough_ioadls( |
| 3348 | struct pmcraid_cmd *cmd, | 3353 | struct pmcraid_cmd *cmd, |
| @@ -3401,7 +3406,7 @@ static int pmcraid_build_passthrough_ioadls( | |||
| 3401 | * @direction: data transfer direction | 3406 | * @direction: data transfer direction |
| 3402 | * | 3407 | * |
| 3403 | * Return value | 3408 | * Return value |
| 3404 | * 0 on sucess, non-zero error code on failure | 3409 | * 0 on success, non-zero error code on failure |
| 3405 | */ | 3410 | */ |
| 3406 | static void pmcraid_release_passthrough_ioadls( | 3411 | static void pmcraid_release_passthrough_ioadls( |
| 3407 | struct pmcraid_cmd *cmd, | 3412 | struct pmcraid_cmd *cmd, |
| @@ -3429,7 +3434,7 @@ static void pmcraid_release_passthrough_ioadls( | |||
| 3429 | * @arg: pointer to pmcraid_passthrough_buffer user buffer | 3434 | * @arg: pointer to pmcraid_passthrough_buffer user buffer |
| 3430 | * | 3435 | * |
| 3431 | * Return value | 3436 | * Return value |
| 3432 | * 0 on sucess, non-zero error code on failure | 3437 | * 0 on success, non-zero error code on failure |
| 3433 | */ | 3438 | */ |
| 3434 | static long pmcraid_ioctl_passthrough( | 3439 | static long pmcraid_ioctl_passthrough( |
| 3435 | struct pmcraid_instance *pinstance, | 3440 | struct pmcraid_instance *pinstance, |
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 3441b3f90827..2752b56cad56 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h | |||
| @@ -771,11 +771,11 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = { | |||
| 771 | {0x01180600, IOASC_LOG_LEVEL_MUST, | 771 | {0x01180600, IOASC_LOG_LEVEL_MUST, |
| 772 | "Recovered Error, soft media error, sector reassignment suggested"}, | 772 | "Recovered Error, soft media error, sector reassignment suggested"}, |
| 773 | {0x015D0000, IOASC_LOG_LEVEL_MUST, | 773 | {0x015D0000, IOASC_LOG_LEVEL_MUST, |
| 774 | "Recovered Error, failure prediction thresold exceeded"}, | 774 | "Recovered Error, failure prediction threshold exceeded"}, |
| 775 | {0x015D9200, IOASC_LOG_LEVEL_MUST, | 775 | {0x015D9200, IOASC_LOG_LEVEL_MUST, |
| 776 | "Recovered Error, soft Cache Card Battery error thresold"}, | 776 | "Recovered Error, soft Cache Card Battery error threshold"}, |
| 777 | {0x015D9200, IOASC_LOG_LEVEL_MUST, | 777 | {0x015D9200, IOASC_LOG_LEVEL_MUST, |
| 778 | "Recovered Error, soft Cache Card Battery error thresold"}, | 778 | "Recovered Error, soft Cache Card Battery error threshold"}, |
| 779 | {0x02048000, IOASC_LOG_LEVEL_MUST, | 779 | {0x02048000, IOASC_LOG_LEVEL_MUST, |
| 780 | "Not Ready, IOA Reset Required"}, | 780 | "Not Ready, IOA Reset Required"}, |
| 781 | {0x02408500, IOASC_LOG_LEVEL_MUST, | 781 | {0x02408500, IOASC_LOG_LEVEL_MUST, |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fbcb82a2f7f4..21e2bc4d7401 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -1654,7 +1654,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
| 1654 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | 1654 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); |
| 1655 | } | 1655 | } |
| 1656 | 1656 | ||
| 1657 | if (scsi_add_host(vha->host, &fc_vport->dev)) { | 1657 | if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, |
| 1658 | &ha->pdev->dev)) { | ||
| 1658 | DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", | 1659 | DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", |
| 1659 | vha->host_no, vha->vp_idx)); | 1660 | vha->host_no, vha->vp_idx)); |
| 1660 | goto vport_create_failed_2; | 1661 | goto vport_create_failed_2; |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index cca8e4ab0372..cb2eca4c26d8 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
| @@ -377,6 +377,24 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | |||
| 377 | return ptr + sizeof(struct qla2xxx_mq_chain); | 377 | return ptr + sizeof(struct qla2xxx_mq_chain); |
| 378 | } | 378 | } |
| 379 | 379 | ||
| 380 | static void | ||
| 381 | qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) | ||
| 382 | { | ||
| 383 | struct qla_hw_data *ha = vha->hw; | ||
| 384 | |||
| 385 | if (rval != QLA_SUCCESS) { | ||
| 386 | qla_printk(KERN_WARNING, ha, | ||
| 387 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 388 | ha->fw_dumped = 0; | ||
| 389 | } else { | ||
| 390 | qla_printk(KERN_INFO, ha, | ||
| 391 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 392 | vha->host_no, ha->fw_dump); | ||
| 393 | ha->fw_dumped = 1; | ||
| 394 | qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 380 | /** | 398 | /** |
| 381 | * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. | 399 | * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. |
| 382 | * @ha: HA context | 400 | * @ha: HA context |
| @@ -530,17 +548,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 530 | if (rval == QLA_SUCCESS) | 548 | if (rval == QLA_SUCCESS) |
| 531 | qla2xxx_copy_queues(ha, nxt); | 549 | qla2xxx_copy_queues(ha, nxt); |
| 532 | 550 | ||
| 533 | if (rval != QLA_SUCCESS) { | 551 | qla2xxx_dump_post_process(base_vha, rval); |
| 534 | qla_printk(KERN_WARNING, ha, | ||
| 535 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 536 | ha->fw_dumped = 0; | ||
| 537 | |||
| 538 | } else { | ||
| 539 | qla_printk(KERN_INFO, ha, | ||
| 540 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 541 | base_vha->host_no, ha->fw_dump); | ||
| 542 | ha->fw_dumped = 1; | ||
| 543 | } | ||
| 544 | 552 | ||
| 545 | qla2300_fw_dump_failed: | 553 | qla2300_fw_dump_failed: |
| 546 | if (!hardware_locked) | 554 | if (!hardware_locked) |
| @@ -737,17 +745,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 737 | if (rval == QLA_SUCCESS) | 745 | if (rval == QLA_SUCCESS) |
| 738 | qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); | 746 | qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); |
| 739 | 747 | ||
| 740 | if (rval != QLA_SUCCESS) { | 748 | qla2xxx_dump_post_process(base_vha, rval); |
| 741 | qla_printk(KERN_WARNING, ha, | ||
| 742 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 743 | ha->fw_dumped = 0; | ||
| 744 | |||
| 745 | } else { | ||
| 746 | qla_printk(KERN_INFO, ha, | ||
| 747 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 748 | base_vha->host_no, ha->fw_dump); | ||
| 749 | ha->fw_dumped = 1; | ||
| 750 | } | ||
| 751 | 749 | ||
| 752 | qla2100_fw_dump_failed: | 750 | qla2100_fw_dump_failed: |
| 753 | if (!hardware_locked) | 751 | if (!hardware_locked) |
| @@ -984,17 +982,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 984 | qla24xx_copy_eft(ha, nxt); | 982 | qla24xx_copy_eft(ha, nxt); |
| 985 | 983 | ||
| 986 | qla24xx_fw_dump_failed_0: | 984 | qla24xx_fw_dump_failed_0: |
| 987 | if (rval != QLA_SUCCESS) { | 985 | qla2xxx_dump_post_process(base_vha, rval); |
| 988 | qla_printk(KERN_WARNING, ha, | ||
| 989 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 990 | ha->fw_dumped = 0; | ||
| 991 | |||
| 992 | } else { | ||
| 993 | qla_printk(KERN_INFO, ha, | ||
| 994 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 995 | base_vha->host_no, ha->fw_dump); | ||
| 996 | ha->fw_dumped = 1; | ||
| 997 | } | ||
| 998 | 986 | ||
| 999 | qla24xx_fw_dump_failed: | 987 | qla24xx_fw_dump_failed: |
| 1000 | if (!hardware_locked) | 988 | if (!hardware_locked) |
| @@ -1305,17 +1293,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1305 | } | 1293 | } |
| 1306 | 1294 | ||
| 1307 | qla25xx_fw_dump_failed_0: | 1295 | qla25xx_fw_dump_failed_0: |
| 1308 | if (rval != QLA_SUCCESS) { | 1296 | qla2xxx_dump_post_process(base_vha, rval); |
| 1309 | qla_printk(KERN_WARNING, ha, | ||
| 1310 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 1311 | ha->fw_dumped = 0; | ||
| 1312 | |||
| 1313 | } else { | ||
| 1314 | qla_printk(KERN_INFO, ha, | ||
| 1315 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 1316 | base_vha->host_no, ha->fw_dump); | ||
| 1317 | ha->fw_dumped = 1; | ||
| 1318 | } | ||
| 1319 | 1297 | ||
| 1320 | qla25xx_fw_dump_failed: | 1298 | qla25xx_fw_dump_failed: |
| 1321 | if (!hardware_locked) | 1299 | if (!hardware_locked) |
| @@ -1628,17 +1606,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1628 | } | 1606 | } |
| 1629 | 1607 | ||
| 1630 | qla81xx_fw_dump_failed_0: | 1608 | qla81xx_fw_dump_failed_0: |
| 1631 | if (rval != QLA_SUCCESS) { | 1609 | qla2xxx_dump_post_process(base_vha, rval); |
| 1632 | qla_printk(KERN_WARNING, ha, | ||
| 1633 | "Failed to dump firmware (%x)!!!\n", rval); | ||
| 1634 | ha->fw_dumped = 0; | ||
| 1635 | |||
| 1636 | } else { | ||
| 1637 | qla_printk(KERN_INFO, ha, | ||
| 1638 | "Firmware dump saved to temp buffer (%ld/%p).\n", | ||
| 1639 | base_vha->host_no, ha->fw_dump); | ||
| 1640 | ha->fw_dumped = 1; | ||
| 1641 | } | ||
| 1642 | 1610 | ||
| 1643 | qla81xx_fw_dump_failed: | 1611 | qla81xx_fw_dump_failed: |
| 1644 | if (!hardware_locked) | 1612 | if (!hardware_locked) |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 215061861794..6b9bf23c7735 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -2123,6 +2123,7 @@ enum qla_work_type { | |||
| 2123 | QLA_EVT_ASYNC_LOGIN_DONE, | 2123 | QLA_EVT_ASYNC_LOGIN_DONE, |
| 2124 | QLA_EVT_ASYNC_LOGOUT, | 2124 | QLA_EVT_ASYNC_LOGOUT, |
| 2125 | QLA_EVT_ASYNC_LOGOUT_DONE, | 2125 | QLA_EVT_ASYNC_LOGOUT_DONE, |
| 2126 | QLA_EVT_UEVENT, | ||
| 2126 | }; | 2127 | }; |
| 2127 | 2128 | ||
| 2128 | 2129 | ||
| @@ -2146,6 +2147,10 @@ struct qla_work_evt { | |||
| 2146 | #define QLA_LOGIO_LOGIN_RETRIED BIT_0 | 2147 | #define QLA_LOGIO_LOGIN_RETRIED BIT_0 |
| 2147 | u16 data[2]; | 2148 | u16 data[2]; |
| 2148 | } logio; | 2149 | } logio; |
| 2150 | struct { | ||
| 2151 | u32 code; | ||
| 2152 | #define QLA_UEVENT_CODE_FW_DUMP 0 | ||
| 2153 | } uevent; | ||
| 2149 | } u; | 2154 | } u; |
| 2150 | }; | 2155 | }; |
| 2151 | 2156 | ||
| @@ -2435,11 +2440,11 @@ struct qla_hw_data { | |||
| 2435 | dma_addr_t edc_data_dma; | 2440 | dma_addr_t edc_data_dma; |
| 2436 | uint16_t edc_data_len; | 2441 | uint16_t edc_data_len; |
| 2437 | 2442 | ||
| 2438 | #define XGMAC_DATA_SIZE PAGE_SIZE | 2443 | #define XGMAC_DATA_SIZE 4096 |
| 2439 | void *xgmac_data; | 2444 | void *xgmac_data; |
| 2440 | dma_addr_t xgmac_data_dma; | 2445 | dma_addr_t xgmac_data_dma; |
| 2441 | 2446 | ||
| 2442 | #define DCBX_TLV_DATA_SIZE PAGE_SIZE | 2447 | #define DCBX_TLV_DATA_SIZE 4096 |
| 2443 | void *dcbx_tlv; | 2448 | void *dcbx_tlv; |
| 2444 | dma_addr_t dcbx_tlv_dma; | 2449 | dma_addr_t dcbx_tlv_dma; |
| 2445 | 2450 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f3d1d1afa95b..e21851358509 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -92,6 +92,7 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, | |||
| 92 | uint16_t *); | 92 | uint16_t *); |
| 93 | extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, | 93 | extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, |
| 94 | fc_port_t *, uint16_t *); | 94 | fc_port_t *, uint16_t *); |
| 95 | extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); | ||
| 95 | 96 | ||
| 96 | extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); | 97 | extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); |
| 97 | 98 | ||
| @@ -246,7 +247,7 @@ qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *); | |||
| 246 | 247 | ||
| 247 | extern int | 248 | extern int |
| 248 | qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, | 249 | qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, |
| 249 | uint16_t *, uint16_t *, uint16_t *); | 250 | uint16_t *, uint16_t *, uint16_t *, uint16_t *); |
| 250 | 251 | ||
| 251 | extern int | 252 | extern int |
| 252 | qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); | 253 | qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9e3eaac25596..b74924b279ef 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -277,7 +277,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
| 277 | vha->marker_needed = 0; | 277 | vha->marker_needed = 0; |
| 278 | ha->isp_abort_cnt = 0; | 278 | ha->isp_abort_cnt = 0; |
| 279 | ha->beacon_blink_led = 0; | 279 | ha->beacon_blink_led = 0; |
| 280 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); | ||
| 281 | 280 | ||
| 282 | set_bit(0, ha->req_qid_map); | 281 | set_bit(0, ha->req_qid_map); |
| 283 | set_bit(0, ha->rsp_qid_map); | 282 | set_bit(0, ha->rsp_qid_map); |
| @@ -1203,7 +1202,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) | |||
| 1203 | } | 1202 | } |
| 1204 | qla2x00_get_resource_cnts(vha, NULL, | 1203 | qla2x00_get_resource_cnts(vha, NULL, |
| 1205 | &ha->fw_xcb_count, NULL, NULL, | 1204 | &ha->fw_xcb_count, NULL, NULL, |
| 1206 | &ha->max_npiv_vports); | 1205 | &ha->max_npiv_vports, NULL); |
| 1207 | 1206 | ||
| 1208 | if (!fw_major_version && ql2xallocfwdump) | 1207 | if (!fw_major_version && ql2xallocfwdump) |
| 1209 | qla2x00_alloc_fw_dump(vha); | 1208 | qla2x00_alloc_fw_dump(vha); |
| @@ -3573,6 +3572,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
| 3573 | ha->isp_abort_cnt = 0; | 3572 | ha->isp_abort_cnt = 0; |
| 3574 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 3573 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
| 3575 | 3574 | ||
| 3575 | if (IS_QLA81XX(ha)) | ||
| 3576 | qla2x00_get_fw_version(vha, | ||
| 3577 | &ha->fw_major_version, | ||
| 3578 | &ha->fw_minor_version, | ||
| 3579 | &ha->fw_subminor_version, | ||
| 3580 | &ha->fw_attributes, &ha->fw_memory_size, | ||
| 3581 | ha->mpi_version, &ha->mpi_capabilities, | ||
| 3582 | ha->phy_version); | ||
| 3583 | |||
| 3576 | if (ha->fce) { | 3584 | if (ha->fce) { |
| 3577 | ha->flags.fce_enabled = 1; | 3585 | ha->flags.fce_enabled = 1; |
| 3578 | memset(ha->fce, 0, | 3586 | memset(ha->fce, 0, |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b20a7169aac2..804987397b77 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -313,10 +313,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) | |||
| 313 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | 313 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; |
| 314 | char *link_speed; | 314 | char *link_speed; |
| 315 | uint16_t handle_cnt; | 315 | uint16_t handle_cnt; |
| 316 | uint16_t cnt; | 316 | uint16_t cnt, mbx; |
| 317 | uint32_t handles[5]; | 317 | uint32_t handles[5]; |
| 318 | struct qla_hw_data *ha = vha->hw; | 318 | struct qla_hw_data *ha = vha->hw; |
| 319 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 319 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
| 320 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; | ||
| 320 | uint32_t rscn_entry, host_pid; | 321 | uint32_t rscn_entry, host_pid; |
| 321 | uint8_t rscn_queue_index; | 322 | uint8_t rscn_queue_index; |
| 322 | unsigned long flags; | 323 | unsigned long flags; |
| @@ -395,9 +396,10 @@ skip_rio: | |||
| 395 | break; | 396 | break; |
| 396 | 397 | ||
| 397 | case MBA_SYSTEM_ERR: /* System Error */ | 398 | case MBA_SYSTEM_ERR: /* System Error */ |
| 399 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; | ||
| 398 | qla_printk(KERN_INFO, ha, | 400 | qla_printk(KERN_INFO, ha, |
| 399 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", | 401 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " |
| 400 | mb[1], mb[2], mb[3]); | 402 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); |
| 401 | 403 | ||
| 402 | ha->isp_ops->fw_dump(vha, 1); | 404 | ha->isp_ops->fw_dump(vha, 1); |
| 403 | 405 | ||
| @@ -419,9 +421,10 @@ skip_rio: | |||
| 419 | break; | 421 | break; |
| 420 | 422 | ||
| 421 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ | 423 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ |
| 422 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", | 424 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", |
| 423 | vha->host_no)); | 425 | vha->host_no, mb[1])); |
| 424 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); | 426 | qla_printk(KERN_WARNING, ha, |
| 427 | "ISP Request Transfer Error (%x).\n", mb[1]); | ||
| 425 | 428 | ||
| 426 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 429 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
| 427 | break; | 430 | break; |
| @@ -485,10 +488,13 @@ skip_rio: | |||
| 485 | break; | 488 | break; |
| 486 | 489 | ||
| 487 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 490 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
| 491 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; | ||
| 488 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " | 492 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " |
| 489 | "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); | 493 | "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], |
| 490 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", | 494 | mbx)); |
| 491 | mb[1], mb[2], mb[3]); | 495 | qla_printk(KERN_INFO, ha, |
| 496 | "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], | ||
| 497 | mbx); | ||
| 492 | 498 | ||
| 493 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 499 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
| 494 | atomic_set(&vha->loop_state, LOOP_DOWN); | 500 | atomic_set(&vha->loop_state, LOOP_DOWN); |
| @@ -1347,16 +1353,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
| 1347 | 1353 | ||
| 1348 | sense_len = rsp_info_len = resid_len = fw_resid_len = 0; | 1354 | sense_len = rsp_info_len = resid_len = fw_resid_len = 0; |
| 1349 | if (IS_FWI2_CAPABLE(ha)) { | 1355 | if (IS_FWI2_CAPABLE(ha)) { |
| 1350 | sense_len = le32_to_cpu(sts24->sense_len); | 1356 | if (scsi_status & SS_SENSE_LEN_VALID) |
| 1351 | rsp_info_len = le32_to_cpu(sts24->rsp_data_len); | 1357 | sense_len = le32_to_cpu(sts24->sense_len); |
| 1352 | resid_len = le32_to_cpu(sts24->rsp_residual_count); | 1358 | if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) |
| 1353 | fw_resid_len = le32_to_cpu(sts24->residual_len); | 1359 | rsp_info_len = le32_to_cpu(sts24->rsp_data_len); |
| 1360 | if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) | ||
| 1361 | resid_len = le32_to_cpu(sts24->rsp_residual_count); | ||
| 1362 | if (comp_status == CS_DATA_UNDERRUN) | ||
| 1363 | fw_resid_len = le32_to_cpu(sts24->residual_len); | ||
| 1354 | rsp_info = sts24->data; | 1364 | rsp_info = sts24->data; |
| 1355 | sense_data = sts24->data; | 1365 | sense_data = sts24->data; |
| 1356 | host_to_fcp_swap(sts24->data, sizeof(sts24->data)); | 1366 | host_to_fcp_swap(sts24->data, sizeof(sts24->data)); |
| 1357 | } else { | 1367 | } else { |
| 1358 | sense_len = le16_to_cpu(sts->req_sense_length); | 1368 | if (scsi_status & SS_SENSE_LEN_VALID) |
| 1359 | rsp_info_len = le16_to_cpu(sts->rsp_info_len); | 1369 | sense_len = le16_to_cpu(sts->req_sense_length); |
| 1370 | if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) | ||
| 1371 | rsp_info_len = le16_to_cpu(sts->rsp_info_len); | ||
| 1360 | resid_len = le32_to_cpu(sts->residual_length); | 1372 | resid_len = le32_to_cpu(sts->residual_length); |
| 1361 | rsp_info = sts->rsp_info; | 1373 | rsp_info = sts->rsp_info; |
| 1362 | sense_data = sts->req_sense_data; | 1374 | sense_data = sts->req_sense_data; |
| @@ -1443,38 +1455,62 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
| 1443 | break; | 1455 | break; |
| 1444 | 1456 | ||
| 1445 | case CS_DATA_UNDERRUN: | 1457 | case CS_DATA_UNDERRUN: |
| 1446 | resid = resid_len; | 1458 | DEBUG2(printk(KERN_INFO |
| 1459 | "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. " | ||
| 1460 | "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n", | ||
| 1461 | vha->host_no, cp->device->id, cp->device->lun, comp_status, | ||
| 1462 | scsi_status, resid_len, fw_resid_len, cp->cmnd[0], | ||
| 1463 | cp->underflow)); | ||
| 1464 | |||
| 1447 | /* Use F/W calculated residual length. */ | 1465 | /* Use F/W calculated residual length. */ |
| 1448 | if (IS_FWI2_CAPABLE(ha)) { | 1466 | resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; |
| 1449 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { | 1467 | scsi_set_resid(cp, resid); |
| 1450 | lscsi_status = 0; | 1468 | if (scsi_status & SS_RESIDUAL_UNDER) { |
| 1451 | } else if (resid != fw_resid_len) { | 1469 | if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { |
| 1452 | scsi_status &= ~SS_RESIDUAL_UNDER; | 1470 | DEBUG2(printk( |
| 1453 | lscsi_status = 0; | 1471 | "scsi(%ld:%d:%d:%d) Dropped frame(s) " |
| 1472 | "detected (%x of %x bytes)...residual " | ||
| 1473 | "length mismatch...retrying command.\n", | ||
| 1474 | vha->host_no, cp->device->channel, | ||
| 1475 | cp->device->id, cp->device->lun, resid, | ||
| 1476 | scsi_bufflen(cp))); | ||
| 1477 | |||
| 1478 | cp->result = DID_ERROR << 16 | lscsi_status; | ||
| 1479 | break; | ||
| 1454 | } | 1480 | } |
| 1455 | resid = fw_resid_len; | ||
| 1456 | } | ||
| 1457 | 1481 | ||
| 1458 | if (scsi_status & SS_RESIDUAL_UNDER) { | 1482 | if (!lscsi_status && |
| 1459 | scsi_set_resid(cp, resid); | 1483 | ((unsigned)(scsi_bufflen(cp) - resid) < |
| 1460 | } else { | 1484 | cp->underflow)) { |
| 1461 | DEBUG2(printk(KERN_INFO | 1485 | qla_printk(KERN_INFO, ha, |
| 1462 | "scsi(%ld:%d:%d) UNDERRUN status detected " | 1486 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " |
| 1463 | "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " | 1487 | "detected (%x of %x bytes)...returning " |
| 1464 | "os_underflow=0x%x\n", vha->host_no, | 1488 | "error status.\n", vha->host_no, |
| 1465 | cp->device->id, cp->device->lun, comp_status, | 1489 | cp->device->channel, cp->device->id, |
| 1466 | scsi_status, resid_len, resid, cp->cmnd[0], | 1490 | cp->device->lun, resid, scsi_bufflen(cp)); |
| 1467 | cp->underflow)); | 1491 | |
| 1492 | cp->result = DID_ERROR << 16; | ||
| 1493 | break; | ||
| 1494 | } | ||
| 1495 | } else if (!lscsi_status) { | ||
| 1496 | DEBUG2(printk( | ||
| 1497 | "scsi(%ld:%d:%d:%d) Dropped frame(s) detected " | ||
| 1498 | "(%x of %x bytes)...firmware reported underrun..." | ||
| 1499 | "retrying command.\n", vha->host_no, | ||
| 1500 | cp->device->channel, cp->device->id, | ||
| 1501 | cp->device->lun, resid, scsi_bufflen(cp))); | ||
| 1468 | 1502 | ||
| 1503 | cp->result = DID_ERROR << 16; | ||
| 1504 | break; | ||
| 1469 | } | 1505 | } |
| 1470 | 1506 | ||
| 1507 | cp->result = DID_OK << 16 | lscsi_status; | ||
| 1508 | |||
| 1471 | /* | 1509 | /* |
| 1472 | * Check to see if SCSI Status is non zero. If so report SCSI | 1510 | * Check to see if SCSI Status is non zero. If so report SCSI |
| 1473 | * Status. | 1511 | * Status. |
| 1474 | */ | 1512 | */ |
| 1475 | if (lscsi_status != 0) { | 1513 | if (lscsi_status != 0) { |
| 1476 | cp->result = DID_OK << 16 | lscsi_status; | ||
| 1477 | |||
| 1478 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1514 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
| 1479 | DEBUG2(printk(KERN_INFO | 1515 | DEBUG2(printk(KERN_INFO |
| 1480 | "scsi(%ld): QUEUE FULL status detected " | 1516 | "scsi(%ld): QUEUE FULL status detected " |
| @@ -1501,42 +1537,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
| 1501 | break; | 1537 | break; |
| 1502 | 1538 | ||
| 1503 | qla2x00_handle_sense(sp, sense_data, sense_len, rsp); | 1539 | qla2x00_handle_sense(sp, sense_data, sense_len, rsp); |
| 1504 | } else { | ||
| 1505 | /* | ||
| 1506 | * If RISC reports underrun and target does not report | ||
| 1507 | * it then we must have a lost frame, so tell upper | ||
| 1508 | * layer to retry it by reporting an error. | ||
| 1509 | */ | ||
| 1510 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { | ||
| 1511 | DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " | ||
| 1512 | "frame(s) detected (%x of %x bytes)..." | ||
| 1513 | "retrying command.\n", | ||
| 1514 | vha->host_no, cp->device->channel, | ||
| 1515 | cp->device->id, cp->device->lun, resid, | ||
| 1516 | scsi_bufflen(cp))); | ||
| 1517 | |||
| 1518 | scsi_set_resid(cp, resid); | ||
| 1519 | cp->result = DID_ERROR << 16; | ||
| 1520 | break; | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | /* Handle mid-layer underflow */ | ||
| 1524 | if ((unsigned)(scsi_bufflen(cp) - resid) < | ||
| 1525 | cp->underflow) { | ||
| 1526 | qla_printk(KERN_INFO, ha, | ||
| 1527 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " | ||
| 1528 | "detected (%x of %x bytes)...returning " | ||
| 1529 | "error status.\n", vha->host_no, | ||
| 1530 | cp->device->channel, cp->device->id, | ||
| 1531 | cp->device->lun, resid, | ||
| 1532 | scsi_bufflen(cp)); | ||
| 1533 | |||
| 1534 | cp->result = DID_ERROR << 16; | ||
| 1535 | break; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | /* Everybody online, looking good... */ | ||
| 1539 | cp->result = DID_OK << 16; | ||
| 1540 | } | 1540 | } |
| 1541 | break; | 1541 | break; |
| 1542 | 1542 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b6202fe118ac..05d595d9a7ef 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -2006,7 +2006,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, | |||
| 2006 | int | 2006 | int |
| 2007 | qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | 2007 | qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, |
| 2008 | uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, | 2008 | uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, |
| 2009 | uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) | 2009 | uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) |
| 2010 | { | 2010 | { |
| 2011 | int rval; | 2011 | int rval; |
| 2012 | mbx_cmd_t mc; | 2012 | mbx_cmd_t mc; |
| @@ -2017,6 +2017,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
| 2017 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; | 2017 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; |
| 2018 | mcp->out_mb = MBX_0; | 2018 | mcp->out_mb = MBX_0; |
| 2019 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 2019 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
| 2020 | if (IS_QLA81XX(vha->hw)) | ||
| 2021 | mcp->in_mb |= MBX_12; | ||
| 2020 | mcp->tov = MBX_TOV_SECONDS; | 2022 | mcp->tov = MBX_TOV_SECONDS; |
| 2021 | mcp->flags = 0; | 2023 | mcp->flags = 0; |
| 2022 | rval = qla2x00_mailbox_command(vha, mcp); | 2024 | rval = qla2x00_mailbox_command(vha, mcp); |
| @@ -2027,9 +2029,10 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
| 2027 | vha->host_no, mcp->mb[0])); | 2029 | vha->host_no, mcp->mb[0])); |
| 2028 | } else { | 2030 | } else { |
| 2029 | DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " | 2031 | DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " |
| 2030 | "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no, | 2032 | "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, |
| 2031 | mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], | 2033 | vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], |
| 2032 | mcp->mb[10], mcp->mb[11])); | 2034 | mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], |
| 2035 | mcp->mb[12])); | ||
| 2033 | 2036 | ||
| 2034 | if (cur_xchg_cnt) | 2037 | if (cur_xchg_cnt) |
| 2035 | *cur_xchg_cnt = mcp->mb[3]; | 2038 | *cur_xchg_cnt = mcp->mb[3]; |
| @@ -2041,6 +2044,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
| 2041 | *orig_iocb_cnt = mcp->mb[10]; | 2044 | *orig_iocb_cnt = mcp->mb[10]; |
| 2042 | if (vha->hw->flags.npiv_supported && max_npiv_vports) | 2045 | if (vha->hw->flags.npiv_supported && max_npiv_vports) |
| 2043 | *max_npiv_vports = mcp->mb[11]; | 2046 | *max_npiv_vports = mcp->mb[11]; |
| 2047 | if (IS_QLA81XX(vha->hw) && max_fcfs) | ||
| 2048 | *max_fcfs = mcp->mb[12]; | ||
| 2044 | } | 2049 | } |
| 2045 | 2050 | ||
| 2046 | return (rval); | 2051 | return (rval); |
| @@ -2313,6 +2318,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
| 2313 | { | 2318 | { |
| 2314 | int rval, rval2; | 2319 | int rval, rval2; |
| 2315 | struct tsk_mgmt_cmd *tsk; | 2320 | struct tsk_mgmt_cmd *tsk; |
| 2321 | struct sts_entry_24xx *sts; | ||
| 2316 | dma_addr_t tsk_dma; | 2322 | dma_addr_t tsk_dma; |
| 2317 | scsi_qla_host_t *vha; | 2323 | scsi_qla_host_t *vha; |
| 2318 | struct qla_hw_data *ha; | 2324 | struct qla_hw_data *ha; |
| @@ -2352,20 +2358,37 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
| 2352 | sizeof(tsk->p.tsk.lun)); | 2358 | sizeof(tsk->p.tsk.lun)); |
| 2353 | } | 2359 | } |
| 2354 | 2360 | ||
| 2361 | sts = &tsk->p.sts; | ||
| 2355 | rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); | 2362 | rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); |
| 2356 | if (rval != QLA_SUCCESS) { | 2363 | if (rval != QLA_SUCCESS) { |
| 2357 | DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " | 2364 | DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " |
| 2358 | "(%x).\n", __func__, vha->host_no, name, rval)); | 2365 | "(%x).\n", __func__, vha->host_no, name, rval)); |
| 2359 | } else if (tsk->p.sts.entry_status != 0) { | 2366 | } else if (sts->entry_status != 0) { |
| 2360 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | 2367 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " |
| 2361 | "-- error status (%x).\n", __func__, vha->host_no, | 2368 | "-- error status (%x).\n", __func__, vha->host_no, |
| 2362 | tsk->p.sts.entry_status)); | 2369 | sts->entry_status)); |
| 2363 | rval = QLA_FUNCTION_FAILED; | 2370 | rval = QLA_FUNCTION_FAILED; |
| 2364 | } else if (tsk->p.sts.comp_status != | 2371 | } else if (sts->comp_status != |
| 2365 | __constant_cpu_to_le16(CS_COMPLETE)) { | 2372 | __constant_cpu_to_le16(CS_COMPLETE)) { |
| 2366 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | 2373 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " |
| 2367 | "-- completion status (%x).\n", __func__, | 2374 | "-- completion status (%x).\n", __func__, |
| 2368 | vha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); | 2375 | vha->host_no, le16_to_cpu(sts->comp_status))); |
| 2376 | rval = QLA_FUNCTION_FAILED; | ||
| 2377 | } else if (!(le16_to_cpu(sts->scsi_status) & | ||
| 2378 | SS_RESPONSE_INFO_LEN_VALID)) { | ||
| 2379 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | ||
| 2380 | "-- no response info (%x).\n", __func__, vha->host_no, | ||
| 2381 | le16_to_cpu(sts->scsi_status))); | ||
| 2382 | rval = QLA_FUNCTION_FAILED; | ||
| 2383 | } else if (le32_to_cpu(sts->rsp_data_len) < 4) { | ||
| 2384 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | ||
| 2385 | "-- not enough response info (%d).\n", __func__, | ||
| 2386 | vha->host_no, le32_to_cpu(sts->rsp_data_len))); | ||
| 2387 | rval = QLA_FUNCTION_FAILED; | ||
| 2388 | } else if (sts->data[3]) { | ||
| 2389 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | ||
| 2390 | "-- response (%x).\n", __func__, | ||
| 2391 | vha->host_no, sts->data[3])); | ||
| 2369 | rval = QLA_FUNCTION_FAILED; | 2392 | rval = QLA_FUNCTION_FAILED; |
| 2370 | } | 2393 | } |
| 2371 | 2394 | ||
| @@ -2759,8 +2782,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 2759 | vp_idx, MSB(stat), | 2782 | vp_idx, MSB(stat), |
| 2760 | rptid_entry->port_id[2], rptid_entry->port_id[1], | 2783 | rptid_entry->port_id[2], rptid_entry->port_id[1], |
| 2761 | rptid_entry->port_id[0])); | 2784 | rptid_entry->port_id[0])); |
| 2762 | if (vp_idx == 0) | 2785 | |
| 2763 | return; | 2786 | vp = vha; |
| 2787 | if (vp_idx == 0 && (MSB(stat) != 1)) | ||
| 2788 | goto reg_needed; | ||
| 2764 | 2789 | ||
| 2765 | if (MSB(stat) == 1) { | 2790 | if (MSB(stat) == 1) { |
| 2766 | DEBUG2(printk("scsi(%ld): Could not acquire ID for " | 2791 | DEBUG2(printk("scsi(%ld): Could not acquire ID for " |
| @@ -2783,8 +2808,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 2783 | * response queue. Handle it in dpc context. | 2808 | * response queue. Handle it in dpc context. |
| 2784 | */ | 2809 | */ |
| 2785 | set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); | 2810 | set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); |
| 2786 | set_bit(VP_DPC_NEEDED, &vha->dpc_flags); | ||
| 2787 | 2811 | ||
| 2812 | reg_needed: | ||
| 2813 | set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); | ||
| 2814 | set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); | ||
| 2815 | set_bit(VP_DPC_NEEDED, &vha->dpc_flags); | ||
| 2788 | qla2xxx_wake_dpc(vha); | 2816 | qla2xxx_wake_dpc(vha); |
| 2789 | } | 2817 | } |
| 2790 | } | 2818 | } |
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index e07b3617f019..a47d34308a3a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -382,8 +382,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
| 382 | vha->mgmt_svr_loop_id = 10 + vha->vp_idx; | 382 | vha->mgmt_svr_loop_id = 10 + vha->vp_idx; |
| 383 | 383 | ||
| 384 | vha->dpc_flags = 0L; | 384 | vha->dpc_flags = 0L; |
| 385 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); | ||
| 386 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); | ||
| 387 | 385 | ||
| 388 | /* | 386 | /* |
| 389 | * To fix the issue of processing a parent's RSCN for the vport before | 387 | * To fix the issue of processing a parent's RSCN for the vport before |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b79fca7d461b..41669357b186 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 12 | #include <linux/kthread.h> | 12 | #include <linux/kthread.h> |
| 13 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
| 14 | #include <linux/kobject.h> | ||
| 14 | 15 | ||
| 15 | #include <scsi/scsi_tcq.h> | 16 | #include <scsi/scsi_tcq.h> |
| 16 | #include <scsi/scsicam.h> | 17 | #include <scsi/scsicam.h> |
| @@ -137,7 +138,7 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *); | |||
| 137 | static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); | 138 | static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); |
| 138 | static int qla2xxx_eh_host_reset(struct scsi_cmnd *); | 139 | static int qla2xxx_eh_host_reset(struct scsi_cmnd *); |
| 139 | 140 | ||
| 140 | static int qla2x00_change_queue_depth(struct scsi_device *, int); | 141 | static int qla2x00_change_queue_depth(struct scsi_device *, int, int); |
| 141 | static int qla2x00_change_queue_type(struct scsi_device *, int); | 142 | static int qla2x00_change_queue_type(struct scsi_device *, int); |
| 142 | 143 | ||
| 143 | struct scsi_host_template qla2xxx_driver_template = { | 144 | struct scsi_host_template qla2xxx_driver_template = { |
| @@ -727,23 +728,6 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) | |||
| 727 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 728 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 728 | } | 729 | } |
| 729 | 730 | ||
| 730 | static void | ||
| 731 | qla2x00_block_error_handler(struct scsi_cmnd *cmnd) | ||
| 732 | { | ||
| 733 | struct Scsi_Host *shost = cmnd->device->host; | ||
| 734 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
| 735 | unsigned long flags; | ||
| 736 | |||
| 737 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 738 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
| 739 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 740 | msleep(1000); | ||
| 741 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 742 | } | ||
| 743 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 744 | return; | ||
| 745 | } | ||
| 746 | |||
| 747 | /************************************************************************** | 731 | /************************************************************************** |
| 748 | * qla2xxx_eh_abort | 732 | * qla2xxx_eh_abort |
| 749 | * | 733 | * |
| @@ -773,7 +757,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 773 | struct req_que *req = vha->req; | 757 | struct req_que *req = vha->req; |
| 774 | srb_t *spt; | 758 | srb_t *spt; |
| 775 | 759 | ||
| 776 | qla2x00_block_error_handler(cmd); | 760 | fc_block_scsi_eh(cmd); |
| 777 | 761 | ||
| 778 | if (!CMD_SP(cmd)) | 762 | if (!CMD_SP(cmd)) |
| 779 | return SUCCESS; | 763 | return SUCCESS; |
| @@ -904,7 +888,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
| 904 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 888 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
| 905 | int err; | 889 | int err; |
| 906 | 890 | ||
| 907 | qla2x00_block_error_handler(cmd); | 891 | fc_block_scsi_eh(cmd); |
| 908 | 892 | ||
| 909 | if (!fcport) | 893 | if (!fcport) |
| 910 | return FAILED; | 894 | return FAILED; |
| @@ -984,7 +968,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
| 984 | unsigned long serial; | 968 | unsigned long serial; |
| 985 | srb_t *sp = (srb_t *) CMD_SP(cmd); | 969 | srb_t *sp = (srb_t *) CMD_SP(cmd); |
| 986 | 970 | ||
| 987 | qla2x00_block_error_handler(cmd); | 971 | fc_block_scsi_eh(cmd); |
| 988 | 972 | ||
| 989 | id = cmd->device->id; | 973 | id = cmd->device->id; |
| 990 | lun = cmd->device->lun; | 974 | lun = cmd->device->lun; |
| @@ -1047,7 +1031,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
| 1047 | srb_t *sp = (srb_t *) CMD_SP(cmd); | 1031 | srb_t *sp = (srb_t *) CMD_SP(cmd); |
| 1048 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 1032 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
| 1049 | 1033 | ||
| 1050 | qla2x00_block_error_handler(cmd); | 1034 | fc_block_scsi_eh(cmd); |
| 1051 | 1035 | ||
| 1052 | id = cmd->device->id; | 1036 | id = cmd->device->id; |
| 1053 | lun = cmd->device->lun; | 1037 | lun = cmd->device->lun; |
| @@ -1234,8 +1218,11 @@ qla2xxx_slave_destroy(struct scsi_device *sdev) | |||
| 1234 | } | 1218 | } |
| 1235 | 1219 | ||
| 1236 | static int | 1220 | static int |
| 1237 | qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) | 1221 | qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) |
| 1238 | { | 1222 | { |
| 1223 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
| 1224 | return -EOPNOTSUPP; | ||
| 1225 | |||
| 1239 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | 1226 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); |
| 1240 | return sdev->queue_depth; | 1227 | return sdev->queue_depth; |
| 1241 | } | 1228 | } |
| @@ -2653,6 +2640,37 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); | |||
| 2653 | qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); | 2640 | qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); |
| 2654 | qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); | 2641 | qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); |
| 2655 | 2642 | ||
| 2643 | int | ||
| 2644 | qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) | ||
| 2645 | { | ||
| 2646 | struct qla_work_evt *e; | ||
| 2647 | |||
| 2648 | e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); | ||
| 2649 | if (!e) | ||
| 2650 | return QLA_FUNCTION_FAILED; | ||
| 2651 | |||
| 2652 | e->u.uevent.code = code; | ||
| 2653 | return qla2x00_post_work(vha, e); | ||
| 2654 | } | ||
| 2655 | |||
| 2656 | static void | ||
| 2657 | qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) | ||
| 2658 | { | ||
| 2659 | char event_string[40]; | ||
| 2660 | char *envp[] = { event_string, NULL }; | ||
| 2661 | |||
| 2662 | switch (code) { | ||
| 2663 | case QLA_UEVENT_CODE_FW_DUMP: | ||
| 2664 | snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", | ||
| 2665 | vha->host_no); | ||
| 2666 | break; | ||
| 2667 | default: | ||
| 2668 | /* do nothing */ | ||
| 2669 | break; | ||
| 2670 | } | ||
| 2671 | kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); | ||
| 2672 | } | ||
| 2673 | |||
| 2656 | void | 2674 | void |
| 2657 | qla2x00_do_work(struct scsi_qla_host *vha) | 2675 | qla2x00_do_work(struct scsi_qla_host *vha) |
| 2658 | { | 2676 | { |
| @@ -2690,6 +2708,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
| 2690 | qla2x00_async_logout_done(vha, e->u.logio.fcport, | 2708 | qla2x00_async_logout_done(vha, e->u.logio.fcport, |
| 2691 | e->u.logio.data); | 2709 | e->u.logio.data); |
| 2692 | break; | 2710 | break; |
| 2711 | case QLA_EVT_UEVENT: | ||
| 2712 | qla2x00_uevent_emit(vha, e->u.uevent.code); | ||
| 2713 | break; | ||
| 2693 | } | 2714 | } |
| 2694 | if (e->flags & QLA_EVT_FLAG_FREE) | 2715 | if (e->flags & QLA_EVT_FLAG_FREE) |
| 2695 | kfree(e); | 2716 | kfree(e); |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index ac107a2c34a4..807e0dbc67fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.03.01-k6" | 10 | #define QLA2XXX_VERSION "8.03.01-k7" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
| 13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h index 9c053bbaa877..e3c74d1ee2db 100644 --- a/drivers/scsi/qlogicpti.h +++ b/drivers/scsi/qlogicpti.h | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | * determined for each queue request anew. | 43 | * determined for each queue request anew. |
| 44 | */ | 44 | */ |
| 45 | #define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */ | 45 | #define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */ |
| 46 | #define QLOGICPTI_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0) | 46 | #define QLOGICPTI_MAX_SG(ql) (4 + (((ql) > 0) ? 7*((ql) - 1) : 0)) |
| 47 | 47 | ||
| 48 | /* mailbox command complete status codes */ | 48 | /* mailbox command complete status codes */ |
| 49 | #define MBOX_COMMAND_COMPLETE 0x4000 | 49 | #define MBOX_COMMAND_COMPLETE 0x4000 |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index dd098cad337b..a60da5555577 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -940,10 +940,16 @@ EXPORT_SYMBOL(scsi_adjust_queue_depth); | |||
| 940 | */ | 940 | */ |
| 941 | int scsi_track_queue_full(struct scsi_device *sdev, int depth) | 941 | int scsi_track_queue_full(struct scsi_device *sdev, int depth) |
| 942 | { | 942 | { |
| 943 | if ((jiffies >> 4) == sdev->last_queue_full_time) | 943 | |
| 944 | /* | ||
| 945 | * Don't let QUEUE_FULLs on the same | ||
| 946 | * jiffies count, they could all be from | ||
| 947 | * same event. | ||
| 948 | */ | ||
| 949 | if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) | ||
| 944 | return 0; | 950 | return 0; |
| 945 | 951 | ||
| 946 | sdev->last_queue_full_time = (jiffies >> 4); | 952 | sdev->last_queue_full_time = jiffies; |
| 947 | if (sdev->last_queue_full_depth != depth) { | 953 | if (sdev->last_queue_full_depth != depth) { |
| 948 | sdev->last_queue_full_count = 1; | 954 | sdev->last_queue_full_count = 1; |
| 949 | sdev->last_queue_full_depth = depth; | 955 | sdev->last_queue_full_depth = depth; |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index c4103bef41b5..0b575c871007 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -44,6 +44,8 @@ | |||
| 44 | 44 | ||
| 45 | #include <net/checksum.h> | 45 | #include <net/checksum.h> |
| 46 | 46 | ||
| 47 | #include <asm/unaligned.h> | ||
| 48 | |||
| 47 | #include <scsi/scsi.h> | 49 | #include <scsi/scsi.h> |
| 48 | #include <scsi/scsi_cmnd.h> | 50 | #include <scsi/scsi_cmnd.h> |
| 49 | #include <scsi/scsi_device.h> | 51 | #include <scsi/scsi_device.h> |
| @@ -105,6 +107,10 @@ static const char * scsi_debug_version_date = "20070104"; | |||
| 105 | #define DEF_ATO 1 | 107 | #define DEF_ATO 1 |
| 106 | #define DEF_PHYSBLK_EXP 0 | 108 | #define DEF_PHYSBLK_EXP 0 |
| 107 | #define DEF_LOWEST_ALIGNED 0 | 109 | #define DEF_LOWEST_ALIGNED 0 |
| 110 | #define DEF_UNMAP_MAX_BLOCKS 0 | ||
| 111 | #define DEF_UNMAP_MAX_DESC 0 | ||
| 112 | #define DEF_UNMAP_GRANULARITY 0 | ||
| 113 | #define DEF_UNMAP_ALIGNMENT 0 | ||
| 108 | 114 | ||
| 109 | /* bit mask values for scsi_debug_opts */ | 115 | /* bit mask values for scsi_debug_opts */ |
| 110 | #define SCSI_DEBUG_OPT_NOISE 1 | 116 | #define SCSI_DEBUG_OPT_NOISE 1 |
| @@ -162,6 +168,10 @@ static int scsi_debug_guard = DEF_GUARD; | |||
| 162 | static int scsi_debug_ato = DEF_ATO; | 168 | static int scsi_debug_ato = DEF_ATO; |
| 163 | static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; | 169 | static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; |
| 164 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; | 170 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; |
| 171 | static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; | ||
| 172 | static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; | ||
| 173 | static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; | ||
| 174 | static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; | ||
| 165 | 175 | ||
| 166 | static int scsi_debug_cmnd_count = 0; | 176 | static int scsi_debug_cmnd_count = 0; |
| 167 | 177 | ||
| @@ -223,7 +233,9 @@ static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; | |||
| 223 | 233 | ||
| 224 | static unsigned char * fake_storep; /* ramdisk storage */ | 234 | static unsigned char * fake_storep; /* ramdisk storage */ |
| 225 | static unsigned char *dif_storep; /* protection info */ | 235 | static unsigned char *dif_storep; /* protection info */ |
| 236 | static void *map_storep; /* provisioning map */ | ||
| 226 | 237 | ||
| 238 | static unsigned long map_size; | ||
| 227 | static int num_aborts = 0; | 239 | static int num_aborts = 0; |
| 228 | static int num_dev_resets = 0; | 240 | static int num_dev_resets = 0; |
| 229 | static int num_bus_resets = 0; | 241 | static int num_bus_resets = 0; |
| @@ -317,6 +329,7 @@ static void get_data_transfer_info(unsigned char *cmd, | |||
| 317 | (u32)cmd[28] << 24; | 329 | (u32)cmd[28] << 24; |
| 318 | break; | 330 | break; |
| 319 | 331 | ||
| 332 | case WRITE_SAME_16: | ||
| 320 | case WRITE_16: | 333 | case WRITE_16: |
| 321 | case READ_16: | 334 | case READ_16: |
| 322 | *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | | 335 | *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | |
| @@ -335,6 +348,7 @@ static void get_data_transfer_info(unsigned char *cmd, | |||
| 335 | *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | | 348 | *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | |
| 336 | (u32)cmd[6] << 24; | 349 | (u32)cmd[6] << 24; |
| 337 | break; | 350 | break; |
| 351 | case WRITE_SAME: | ||
| 338 | case WRITE_10: | 352 | case WRITE_10: |
| 339 | case READ_10: | 353 | case READ_10: |
| 340 | case XDWRITEREAD_10: | 354 | case XDWRITEREAD_10: |
| @@ -671,10 +685,12 @@ static int inquiry_evpd_89(unsigned char * arr) | |||
| 671 | } | 685 | } |
| 672 | 686 | ||
| 673 | 687 | ||
| 688 | /* Block limits VPD page (SBC-3) */ | ||
| 674 | static unsigned char vpdb0_data[] = { | 689 | static unsigned char vpdb0_data[] = { |
| 675 | /* from 4th byte */ 0,0,0,4, | 690 | /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, |
| 676 | 0,0,0x4,0, | 691 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, |
| 677 | 0,0,0,64, | 692 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, |
| 693 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, | ||
| 678 | }; | 694 | }; |
| 679 | 695 | ||
| 680 | static int inquiry_evpd_b0(unsigned char * arr) | 696 | static int inquiry_evpd_b0(unsigned char * arr) |
| @@ -691,14 +707,40 @@ static int inquiry_evpd_b0(unsigned char * arr) | |||
| 691 | arr[6] = (sdebug_store_sectors >> 8) & 0xff; | 707 | arr[6] = (sdebug_store_sectors >> 8) & 0xff; |
| 692 | arr[7] = sdebug_store_sectors & 0xff; | 708 | arr[7] = sdebug_store_sectors & 0xff; |
| 693 | } | 709 | } |
| 710 | |||
| 711 | if (scsi_debug_unmap_max_desc) { | ||
| 712 | unsigned int blocks; | ||
| 713 | |||
| 714 | if (scsi_debug_unmap_max_blocks) | ||
| 715 | blocks = scsi_debug_unmap_max_blocks; | ||
| 716 | else | ||
| 717 | blocks = 0xffffffff; | ||
| 718 | |||
| 719 | put_unaligned_be32(blocks, &arr[16]); | ||
| 720 | put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); | ||
| 721 | } | ||
| 722 | |||
| 723 | if (scsi_debug_unmap_alignment) { | ||
| 724 | put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); | ||
| 725 | arr[28] |= 0x80; /* UGAVALID */ | ||
| 726 | } | ||
| 727 | |||
| 728 | if (scsi_debug_unmap_granularity) { | ||
| 729 | put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); | ||
| 730 | return 0x3c; /* Mandatory page length for thin provisioning */ | ||
| 731 | } | ||
| 732 | |||
| 694 | return sizeof(vpdb0_data); | 733 | return sizeof(vpdb0_data); |
| 695 | } | 734 | } |
| 696 | 735 | ||
| 736 | /* Block device characteristics VPD page (SBC-3) */ | ||
| 697 | static int inquiry_evpd_b1(unsigned char *arr) | 737 | static int inquiry_evpd_b1(unsigned char *arr) |
| 698 | { | 738 | { |
| 699 | memset(arr, 0, 0x3c); | 739 | memset(arr, 0, 0x3c); |
| 700 | arr[0] = 0; | 740 | arr[0] = 0; |
| 701 | arr[1] = 1; | 741 | arr[1] = 1; /* non rotating medium (e.g. solid state) */ |
| 742 | arr[2] = 0; | ||
| 743 | arr[3] = 5; /* less than 1.8" */ | ||
| 702 | 744 | ||
| 703 | return 0x3c; | 745 | return 0x3c; |
| 704 | } | 746 | } |
| @@ -974,6 +1016,10 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
| 974 | arr[11] = scsi_debug_sector_size & 0xff; | 1016 | arr[11] = scsi_debug_sector_size & 0xff; |
| 975 | arr[13] = scsi_debug_physblk_exp & 0xf; | 1017 | arr[13] = scsi_debug_physblk_exp & 0xf; |
| 976 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; | 1018 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; |
| 1019 | |||
| 1020 | if (scsi_debug_unmap_granularity) | ||
| 1021 | arr[14] |= 0x80; /* TPE */ | ||
| 1022 | |||
| 977 | arr[15] = scsi_debug_lowest_aligned & 0xff; | 1023 | arr[15] = scsi_debug_lowest_aligned & 0xff; |
| 978 | 1024 | ||
| 979 | if (scsi_debug_dif) { | 1025 | if (scsi_debug_dif) { |
| @@ -1887,6 +1933,70 @@ out: | |||
| 1887 | return ret; | 1933 | return ret; |
| 1888 | } | 1934 | } |
| 1889 | 1935 | ||
| 1936 | static unsigned int map_state(sector_t lba, unsigned int *num) | ||
| 1937 | { | ||
| 1938 | unsigned int granularity, alignment, mapped; | ||
| 1939 | sector_t block, next, end; | ||
| 1940 | |||
| 1941 | granularity = scsi_debug_unmap_granularity; | ||
| 1942 | alignment = granularity - scsi_debug_unmap_alignment; | ||
| 1943 | block = lba + alignment; | ||
| 1944 | do_div(block, granularity); | ||
| 1945 | |||
| 1946 | mapped = test_bit(block, map_storep); | ||
| 1947 | |||
| 1948 | if (mapped) | ||
| 1949 | next = find_next_zero_bit(map_storep, map_size, block); | ||
| 1950 | else | ||
| 1951 | next = find_next_bit(map_storep, map_size, block); | ||
| 1952 | |||
| 1953 | end = next * granularity - scsi_debug_unmap_alignment; | ||
| 1954 | *num = end - lba; | ||
| 1955 | |||
| 1956 | return mapped; | ||
| 1957 | } | ||
| 1958 | |||
| 1959 | static void map_region(sector_t lba, unsigned int len) | ||
| 1960 | { | ||
| 1961 | unsigned int granularity, alignment; | ||
| 1962 | sector_t end = lba + len; | ||
| 1963 | |||
| 1964 | granularity = scsi_debug_unmap_granularity; | ||
| 1965 | alignment = granularity - scsi_debug_unmap_alignment; | ||
| 1966 | |||
| 1967 | while (lba < end) { | ||
| 1968 | sector_t block, rem; | ||
| 1969 | |||
| 1970 | block = lba + alignment; | ||
| 1971 | rem = do_div(block, granularity); | ||
| 1972 | |||
| 1973 | set_bit(block, map_storep); | ||
| 1974 | |||
| 1975 | lba += granularity - rem; | ||
| 1976 | } | ||
| 1977 | } | ||
| 1978 | |||
| 1979 | static void unmap_region(sector_t lba, unsigned int len) | ||
| 1980 | { | ||
| 1981 | unsigned int granularity, alignment; | ||
| 1982 | sector_t end = lba + len; | ||
| 1983 | |||
| 1984 | granularity = scsi_debug_unmap_granularity; | ||
| 1985 | alignment = granularity - scsi_debug_unmap_alignment; | ||
| 1986 | |||
| 1987 | while (lba < end) { | ||
| 1988 | sector_t block, rem; | ||
| 1989 | |||
| 1990 | block = lba + alignment; | ||
| 1991 | rem = do_div(block, granularity); | ||
| 1992 | |||
| 1993 | if (rem == 0 && lba + granularity <= end) | ||
| 1994 | clear_bit(block, map_storep); | ||
| 1995 | |||
| 1996 | lba += granularity - rem; | ||
| 1997 | } | ||
| 1998 | } | ||
| 1999 | |||
| 1890 | static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | 2000 | static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, |
| 1891 | unsigned int num, struct sdebug_dev_info *devip, | 2001 | unsigned int num, struct sdebug_dev_info *devip, |
| 1892 | u32 ei_lba) | 2002 | u32 ei_lba) |
| @@ -1910,6 +2020,8 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 1910 | 2020 | ||
| 1911 | write_lock_irqsave(&atomic_rw, iflags); | 2021 | write_lock_irqsave(&atomic_rw, iflags); |
| 1912 | ret = do_device_access(SCpnt, devip, lba, num, 1); | 2022 | ret = do_device_access(SCpnt, devip, lba, num, 1); |
| 2023 | if (scsi_debug_unmap_granularity) | ||
| 2024 | map_region(lba, num); | ||
| 1913 | write_unlock_irqrestore(&atomic_rw, iflags); | 2025 | write_unlock_irqrestore(&atomic_rw, iflags); |
| 1914 | if (-1 == ret) | 2026 | if (-1 == ret) |
| 1915 | return (DID_ERROR << 16); | 2027 | return (DID_ERROR << 16); |
| @@ -1917,9 +2029,143 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 1917 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 2029 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) |
| 1918 | printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " | 2030 | printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " |
| 1919 | " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); | 2031 | " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); |
| 2032 | |||
| 2033 | return 0; | ||
| 2034 | } | ||
| 2035 | |||
| 2036 | static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, | ||
| 2037 | unsigned int num, struct sdebug_dev_info *devip, | ||
| 2038 | u32 ei_lba, unsigned int unmap) | ||
| 2039 | { | ||
| 2040 | unsigned long iflags; | ||
| 2041 | unsigned long long i; | ||
| 2042 | int ret; | ||
| 2043 | |||
| 2044 | ret = check_device_access_params(devip, lba, num); | ||
| 2045 | if (ret) | ||
| 2046 | return ret; | ||
| 2047 | |||
| 2048 | write_lock_irqsave(&atomic_rw, iflags); | ||
| 2049 | |||
| 2050 | if (unmap && scsi_debug_unmap_granularity) { | ||
| 2051 | unmap_region(lba, num); | ||
| 2052 | goto out; | ||
| 2053 | } | ||
| 2054 | |||
| 2055 | /* Else fetch one logical block */ | ||
| 2056 | ret = fetch_to_dev_buffer(scmd, | ||
| 2057 | fake_storep + (lba * scsi_debug_sector_size), | ||
| 2058 | scsi_debug_sector_size); | ||
| 2059 | |||
| 2060 | if (-1 == ret) { | ||
| 2061 | write_unlock_irqrestore(&atomic_rw, iflags); | ||
| 2062 | return (DID_ERROR << 16); | ||
| 2063 | } else if ((ret < (num * scsi_debug_sector_size)) && | ||
| 2064 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | ||
| 2065 | printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, " | ||
| 2066 | " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); | ||
| 2067 | |||
| 2068 | /* Copy first sector to remaining blocks */ | ||
| 2069 | for (i = 1 ; i < num ; i++) | ||
| 2070 | memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), | ||
| 2071 | fake_storep + (lba * scsi_debug_sector_size), | ||
| 2072 | scsi_debug_sector_size); | ||
| 2073 | |||
| 2074 | if (scsi_debug_unmap_granularity) | ||
| 2075 | map_region(lba, num); | ||
| 2076 | out: | ||
| 2077 | write_unlock_irqrestore(&atomic_rw, iflags); | ||
| 2078 | |||
| 1920 | return 0; | 2079 | return 0; |
| 1921 | } | 2080 | } |
| 1922 | 2081 | ||
| 2082 | struct unmap_block_desc { | ||
| 2083 | __be64 lba; | ||
| 2084 | __be32 blocks; | ||
| 2085 | __be32 __reserved; | ||
| 2086 | }; | ||
| 2087 | |||
| 2088 | static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) | ||
| 2089 | { | ||
| 2090 | unsigned char *buf; | ||
| 2091 | struct unmap_block_desc *desc; | ||
| 2092 | unsigned int i, payload_len, descriptors; | ||
| 2093 | int ret; | ||
| 2094 | |||
| 2095 | ret = check_readiness(scmd, 1, devip); | ||
| 2096 | if (ret) | ||
| 2097 | return ret; | ||
| 2098 | |||
| 2099 | payload_len = get_unaligned_be16(&scmd->cmnd[7]); | ||
| 2100 | BUG_ON(scsi_bufflen(scmd) != payload_len); | ||
| 2101 | |||
| 2102 | descriptors = (payload_len - 8) / 16; | ||
| 2103 | |||
| 2104 | buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); | ||
| 2105 | if (!buf) | ||
| 2106 | return check_condition_result; | ||
| 2107 | |||
| 2108 | scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); | ||
| 2109 | |||
| 2110 | BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); | ||
| 2111 | BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); | ||
| 2112 | |||
| 2113 | desc = (void *)&buf[8]; | ||
| 2114 | |||
| 2115 | for (i = 0 ; i < descriptors ; i++) { | ||
| 2116 | unsigned long long lba = get_unaligned_be64(&desc[i].lba); | ||
| 2117 | unsigned int num = get_unaligned_be32(&desc[i].blocks); | ||
| 2118 | |||
| 2119 | ret = check_device_access_params(devip, lba, num); | ||
| 2120 | if (ret) | ||
| 2121 | goto out; | ||
| 2122 | |||
| 2123 | unmap_region(lba, num); | ||
| 2124 | } | ||
| 2125 | |||
| 2126 | ret = 0; | ||
| 2127 | |||
| 2128 | out: | ||
| 2129 | kfree(buf); | ||
| 2130 | |||
| 2131 | return ret; | ||
| 2132 | } | ||
| 2133 | |||
| 2134 | #define SDEBUG_GET_LBA_STATUS_LEN 32 | ||
| 2135 | |||
| 2136 | static int resp_get_lba_status(struct scsi_cmnd * scmd, | ||
| 2137 | struct sdebug_dev_info * devip) | ||
| 2138 | { | ||
| 2139 | unsigned long long lba; | ||
| 2140 | unsigned int alloc_len, mapped, num; | ||
| 2141 | unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; | ||
| 2142 | int ret; | ||
| 2143 | |||
| 2144 | ret = check_readiness(scmd, 1, devip); | ||
| 2145 | if (ret) | ||
| 2146 | return ret; | ||
| 2147 | |||
| 2148 | lba = get_unaligned_be64(&scmd->cmnd[2]); | ||
| 2149 | alloc_len = get_unaligned_be32(&scmd->cmnd[10]); | ||
| 2150 | |||
| 2151 | if (alloc_len < 24) | ||
| 2152 | return 0; | ||
| 2153 | |||
| 2154 | ret = check_device_access_params(devip, lba, 1); | ||
| 2155 | if (ret) | ||
| 2156 | return ret; | ||
| 2157 | |||
| 2158 | mapped = map_state(lba, &num); | ||
| 2159 | |||
| 2160 | memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); | ||
| 2161 | put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */ | ||
| 2162 | put_unaligned_be64(lba, &arr[8]); /* LBA */ | ||
| 2163 | put_unaligned_be32(num, &arr[16]); /* Number of blocks */ | ||
| 2164 | arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ | ||
| 2165 | |||
| 2166 | return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); | ||
| 2167 | } | ||
| 2168 | |||
| 1923 | #define SDEBUG_RLUN_ARR_SZ 256 | 2169 | #define SDEBUG_RLUN_ARR_SZ 256 |
| 1924 | 2170 | ||
| 1925 | static int resp_report_luns(struct scsi_cmnd * scp, | 2171 | static int resp_report_luns(struct scsi_cmnd * scp, |
| @@ -2430,6 +2676,10 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO); | |||
| 2430 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); | 2676 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); |
| 2431 | module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); | 2677 | module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); |
| 2432 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | 2678 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); |
| 2679 | module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); | ||
| 2680 | module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); | ||
| 2681 | module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); | ||
| 2682 | module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); | ||
| 2433 | 2683 | ||
| 2434 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); | 2684 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); |
| 2435 | MODULE_DESCRIPTION("SCSI debug adapter driver"); | 2685 | MODULE_DESCRIPTION("SCSI debug adapter driver"); |
| @@ -2458,6 +2708,10 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); | |||
| 2458 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); | 2708 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); |
| 2459 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | 2709 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); |
| 2460 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); | 2710 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); |
| 2711 | MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)"); | ||
| 2712 | MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)"); | ||
| 2713 | MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)"); | ||
| 2714 | MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); | ||
| 2461 | 2715 | ||
| 2462 | static char sdebug_info[256]; | 2716 | static char sdebug_info[256]; |
| 2463 | 2717 | ||
| @@ -2816,6 +3070,23 @@ static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf) | |||
| 2816 | } | 3070 | } |
| 2817 | DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); | 3071 | DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); |
| 2818 | 3072 | ||
| 3073 | static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) | ||
| 3074 | { | ||
| 3075 | ssize_t count; | ||
| 3076 | |||
| 3077 | if (scsi_debug_unmap_granularity == 0) | ||
| 3078 | return scnprintf(buf, PAGE_SIZE, "0-%u\n", | ||
| 3079 | sdebug_store_sectors); | ||
| 3080 | |||
| 3081 | count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size); | ||
| 3082 | |||
| 3083 | buf[count++] = '\n'; | ||
| 3084 | buf[count++] = 0; | ||
| 3085 | |||
| 3086 | return count; | ||
| 3087 | } | ||
| 3088 | DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); | ||
| 3089 | |||
| 2819 | 3090 | ||
| 2820 | /* Note: The following function creates attribute files in the | 3091 | /* Note: The following function creates attribute files in the |
| 2821 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these | 3092 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these |
| @@ -2847,11 +3118,13 @@ static int do_create_driverfs_files(void) | |||
| 2847 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); | 3118 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); |
| 2848 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); | 3119 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); |
| 2849 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); | 3120 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); |
| 3121 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map); | ||
| 2850 | return ret; | 3122 | return ret; |
| 2851 | } | 3123 | } |
| 2852 | 3124 | ||
| 2853 | static void do_remove_driverfs_files(void) | 3125 | static void do_remove_driverfs_files(void) |
| 2854 | { | 3126 | { |
| 3127 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map); | ||
| 2855 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); | 3128 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); |
| 2856 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); | 3129 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); |
| 2857 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); | 3130 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); |
| @@ -2989,6 +3262,36 @@ static int __init scsi_debug_init(void) | |||
| 2989 | memset(dif_storep, 0xff, dif_size); | 3262 | memset(dif_storep, 0xff, dif_size); |
| 2990 | } | 3263 | } |
| 2991 | 3264 | ||
| 3265 | if (scsi_debug_unmap_granularity) { | ||
| 3266 | unsigned int map_bytes; | ||
| 3267 | |||
| 3268 | if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { | ||
| 3269 | printk(KERN_ERR | ||
| 3270 | "%s: ERR: unmap_granularity < unmap_alignment\n", | ||
| 3271 | __func__); | ||
| 3272 | return -EINVAL; | ||
| 3273 | } | ||
| 3274 | |||
| 3275 | map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); | ||
| 3276 | map_bytes = map_size >> 3; | ||
| 3277 | map_storep = vmalloc(map_bytes); | ||
| 3278 | |||
| 3279 | printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", | ||
| 3280 | map_size); | ||
| 3281 | |||
| 3282 | if (map_storep == NULL) { | ||
| 3283 | printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n"); | ||
| 3284 | ret = -ENOMEM; | ||
| 3285 | goto free_vm; | ||
| 3286 | } | ||
| 3287 | |||
| 3288 | memset(map_storep, 0x0, map_bytes); | ||
| 3289 | |||
| 3290 | /* Map first 1KB for partition table */ | ||
| 3291 | if (scsi_debug_num_parts) | ||
| 3292 | map_region(0, 2); | ||
| 3293 | } | ||
| 3294 | |||
| 2992 | ret = device_register(&pseudo_primary); | 3295 | ret = device_register(&pseudo_primary); |
| 2993 | if (ret < 0) { | 3296 | if (ret < 0) { |
| 2994 | printk(KERN_WARNING "scsi_debug: device_register error: %d\n", | 3297 | printk(KERN_WARNING "scsi_debug: device_register error: %d\n", |
| @@ -3041,6 +3344,8 @@ bus_unreg: | |||
| 3041 | dev_unreg: | 3344 | dev_unreg: |
| 3042 | device_unregister(&pseudo_primary); | 3345 | device_unregister(&pseudo_primary); |
| 3043 | free_vm: | 3346 | free_vm: |
| 3347 | if (map_storep) | ||
| 3348 | vfree(map_storep); | ||
| 3044 | if (dif_storep) | 3349 | if (dif_storep) |
| 3045 | vfree(dif_storep); | 3350 | vfree(dif_storep); |
| 3046 | vfree(fake_storep); | 3351 | vfree(fake_storep); |
| @@ -3167,6 +3472,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3167 | int inj_dif = 0; | 3472 | int inj_dif = 0; |
| 3168 | int inj_dix = 0; | 3473 | int inj_dix = 0; |
| 3169 | int delay_override = 0; | 3474 | int delay_override = 0; |
| 3475 | int unmap = 0; | ||
| 3170 | 3476 | ||
| 3171 | scsi_set_resid(SCpnt, 0); | 3477 | scsi_set_resid(SCpnt, 0); |
| 3172 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { | 3478 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { |
| @@ -3272,13 +3578,21 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3272 | errsts = resp_readcap(SCpnt, devip); | 3578 | errsts = resp_readcap(SCpnt, devip); |
| 3273 | break; | 3579 | break; |
| 3274 | case SERVICE_ACTION_IN: | 3580 | case SERVICE_ACTION_IN: |
| 3275 | if (SAI_READ_CAPACITY_16 != cmd[1]) { | 3581 | if (cmd[1] == SAI_READ_CAPACITY_16) |
| 3582 | errsts = resp_readcap16(SCpnt, devip); | ||
| 3583 | else if (cmd[1] == SAI_GET_LBA_STATUS) { | ||
| 3584 | |||
| 3585 | if (scsi_debug_unmap_max_desc == 0) { | ||
| 3586 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
| 3587 | INVALID_COMMAND_OPCODE, 0); | ||
| 3588 | errsts = check_condition_result; | ||
| 3589 | } else | ||
| 3590 | errsts = resp_get_lba_status(SCpnt, devip); | ||
| 3591 | } else { | ||
| 3276 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 3592 | mk_sense_buffer(devip, ILLEGAL_REQUEST, |
| 3277 | INVALID_OPCODE, 0); | 3593 | INVALID_OPCODE, 0); |
| 3278 | errsts = check_condition_result; | 3594 | errsts = check_condition_result; |
| 3279 | break; | ||
| 3280 | } | 3595 | } |
| 3281 | errsts = resp_readcap16(SCpnt, devip); | ||
| 3282 | break; | 3596 | break; |
| 3283 | case MAINTENANCE_IN: | 3597 | case MAINTENANCE_IN: |
| 3284 | if (MI_REPORT_TARGET_PGS != cmd[1]) { | 3598 | if (MI_REPORT_TARGET_PGS != cmd[1]) { |
| @@ -3378,6 +3692,29 @@ write: | |||
| 3378 | errsts = illegal_condition_result; | 3692 | errsts = illegal_condition_result; |
| 3379 | } | 3693 | } |
| 3380 | break; | 3694 | break; |
| 3695 | case WRITE_SAME_16: | ||
| 3696 | if (cmd[1] & 0x8) | ||
| 3697 | unmap = 1; | ||
| 3698 | /* fall through */ | ||
| 3699 | case WRITE_SAME: | ||
| 3700 | errsts = check_readiness(SCpnt, 0, devip); | ||
| 3701 | if (errsts) | ||
| 3702 | break; | ||
| 3703 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); | ||
| 3704 | errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap); | ||
| 3705 | break; | ||
| 3706 | case UNMAP: | ||
| 3707 | errsts = check_readiness(SCpnt, 0, devip); | ||
| 3708 | if (errsts) | ||
| 3709 | break; | ||
| 3710 | |||
| 3711 | if (scsi_debug_unmap_max_desc == 0) { | ||
| 3712 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
| 3713 | INVALID_COMMAND_OPCODE, 0); | ||
| 3714 | errsts = check_condition_result; | ||
| 3715 | } else | ||
| 3716 | errsts = resp_unmap(SCpnt, devip); | ||
| 3717 | break; | ||
| 3381 | case MODE_SENSE: | 3718 | case MODE_SENSE: |
| 3382 | case MODE_SENSE_10: | 3719 | case MODE_SENSE_10: |
| 3383 | errsts = resp_mode_sense(SCpnt, target, devip); | 3720 | errsts = resp_mode_sense(SCpnt, target, devip); |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 93c2622cb969..37af178b2d17 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -168,11 +168,10 @@ static struct { | |||
| 168 | {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, | 168 | {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, |
| 169 | {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, | 169 | {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, |
| 170 | {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, | 170 | {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, |
| 171 | {"HITACHI", "DF400", "*", BLIST_SPARSELUN}, | 171 | {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, |
| 172 | {"HITACHI", "DF500", "*", BLIST_SPARSELUN}, | 172 | {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, |
| 173 | {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, | 173 | {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, |
| 174 | {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, | 174 | {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, |
| 175 | {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
| 176 | {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | 175 | {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, |
| 177 | {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | 176 | {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, |
| 178 | {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | 177 | {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, |
| @@ -454,7 +453,7 @@ int scsi_get_device_flags(struct scsi_device *sdev, | |||
| 454 | 453 | ||
| 455 | 454 | ||
| 456 | /** | 455 | /** |
| 457 | * get_device_flags_keyed - get device specific flags from the dynamic device list. | 456 | * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list |
| 458 | * @sdev: &scsi_device to get flags for | 457 | * @sdev: &scsi_device to get flags for |
| 459 | * @vendor: vendor name | 458 | * @vendor: vendor name |
| 460 | * @model: model name | 459 | * @model: model name |
| @@ -685,7 +684,7 @@ MODULE_PARM_DESC(default_dev_flags, | |||
| 685 | "scsi default device flag integer value"); | 684 | "scsi default device flag integer value"); |
| 686 | 685 | ||
| 687 | /** | 686 | /** |
| 688 | * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list. | 687 | * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list |
| 689 | **/ | 688 | **/ |
| 690 | void scsi_exit_devinfo(void) | 689 | void scsi_exit_devinfo(void) |
| 691 | { | 690 | { |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1b0060b791e8..08ed506e6059 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -331,6 +331,64 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) | |||
| 331 | } | 331 | } |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) | ||
| 335 | { | ||
| 336 | struct scsi_host_template *sht = sdev->host->hostt; | ||
| 337 | struct scsi_device *tmp_sdev; | ||
| 338 | |||
| 339 | if (!sht->change_queue_depth || | ||
| 340 | sdev->queue_depth >= sdev->max_queue_depth) | ||
| 341 | return; | ||
| 342 | |||
| 343 | if (time_before(jiffies, | ||
| 344 | sdev->last_queue_ramp_up + sdev->queue_ramp_up_period)) | ||
| 345 | return; | ||
| 346 | |||
| 347 | if (time_before(jiffies, | ||
| 348 | sdev->last_queue_full_time + sdev->queue_ramp_up_period)) | ||
| 349 | return; | ||
| 350 | |||
| 351 | /* | ||
| 352 | * Walk all devices of a target and do | ||
| 353 | * ramp up on them. | ||
| 354 | */ | ||
| 355 | shost_for_each_device(tmp_sdev, sdev->host) { | ||
| 356 | if (tmp_sdev->channel != sdev->channel || | ||
| 357 | tmp_sdev->id != sdev->id || | ||
| 358 | tmp_sdev->queue_depth == sdev->max_queue_depth) | ||
| 359 | continue; | ||
| 360 | /* | ||
| 361 | * call back into LLD to increase queue_depth by one | ||
| 362 | * with ramp up reason code. | ||
| 363 | */ | ||
| 364 | sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1, | ||
| 365 | SCSI_QDEPTH_RAMP_UP); | ||
| 366 | sdev->last_queue_ramp_up = jiffies; | ||
| 367 | } | ||
| 368 | } | ||
| 369 | |||
| 370 | static void scsi_handle_queue_full(struct scsi_device *sdev) | ||
| 371 | { | ||
| 372 | struct scsi_host_template *sht = sdev->host->hostt; | ||
| 373 | struct scsi_device *tmp_sdev; | ||
| 374 | |||
| 375 | if (!sht->change_queue_depth) | ||
| 376 | return; | ||
| 377 | |||
| 378 | shost_for_each_device(tmp_sdev, sdev->host) { | ||
| 379 | if (tmp_sdev->channel != sdev->channel || | ||
| 380 | tmp_sdev->id != sdev->id) | ||
| 381 | continue; | ||
| 382 | /* | ||
| 383 | * We do not know the number of commands that were at | ||
| 384 | * the device when we got the queue full so we start | ||
| 385 | * from the highest possible value and work our way down. | ||
| 386 | */ | ||
| 387 | sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, | ||
| 388 | SCSI_QDEPTH_QFULL); | ||
| 389 | } | ||
| 390 | } | ||
| 391 | |||
| 334 | /** | 392 | /** |
| 335 | * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. | 393 | * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. |
| 336 | * @scmd: SCSI cmd to examine. | 394 | * @scmd: SCSI cmd to examine. |
| @@ -371,6 +429,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) | |||
| 371 | */ | 429 | */ |
| 372 | switch (status_byte(scmd->result)) { | 430 | switch (status_byte(scmd->result)) { |
| 373 | case GOOD: | 431 | case GOOD: |
| 432 | scsi_handle_queue_ramp_up(scmd->device); | ||
| 374 | case COMMAND_TERMINATED: | 433 | case COMMAND_TERMINATED: |
| 375 | return SUCCESS; | 434 | return SUCCESS; |
| 376 | case CHECK_CONDITION: | 435 | case CHECK_CONDITION: |
| @@ -387,8 +446,10 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) | |||
| 387 | * let issuer deal with this, it could be just fine | 446 | * let issuer deal with this, it could be just fine |
| 388 | */ | 447 | */ |
| 389 | return SUCCESS; | 448 | return SUCCESS; |
| 390 | case BUSY: | ||
| 391 | case QUEUE_FULL: | 449 | case QUEUE_FULL: |
| 450 | scsi_handle_queue_full(scmd->device); | ||
| 451 | /* fall through */ | ||
| 452 | case BUSY: | ||
| 392 | default: | 453 | default: |
| 393 | return FAILED; | 454 | return FAILED; |
| 394 | } | 455 | } |
| @@ -1387,6 +1448,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
| 1387 | */ | 1448 | */ |
| 1388 | switch (status_byte(scmd->result)) { | 1449 | switch (status_byte(scmd->result)) { |
| 1389 | case QUEUE_FULL: | 1450 | case QUEUE_FULL: |
| 1451 | scsi_handle_queue_full(scmd->device); | ||
| 1390 | /* | 1452 | /* |
| 1391 | * the case of trying to send too many commands to a | 1453 | * the case of trying to send too many commands to a |
| 1392 | * tagged queueing device. | 1454 | * tagged queueing device. |
| @@ -1400,6 +1462,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
| 1400 | */ | 1462 | */ |
| 1401 | return ADD_TO_MLQUEUE; | 1463 | return ADD_TO_MLQUEUE; |
| 1402 | case GOOD: | 1464 | case GOOD: |
| 1465 | scsi_handle_queue_ramp_up(scmd->device); | ||
| 1403 | case COMMAND_TERMINATED: | 1466 | case COMMAND_TERMINATED: |
| 1404 | return SUCCESS; | 1467 | return SUCCESS; |
| 1405 | case TASK_ABORTED: | 1468 | case TASK_ABORTED: |
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index b98f763931c5..d9564fb04f62 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
| @@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, | |||
| 308 | case SG_SCSI_RESET_DEVICE: | 308 | case SG_SCSI_RESET_DEVICE: |
| 309 | val = SCSI_TRY_RESET_DEVICE; | 309 | val = SCSI_TRY_RESET_DEVICE; |
| 310 | break; | 310 | break; |
| 311 | case SG_SCSI_RESET_TARGET: | ||
| 312 | val = SCSI_TRY_RESET_TARGET; | ||
| 313 | break; | ||
| 311 | case SG_SCSI_RESET_BUS: | 314 | case SG_SCSI_RESET_BUS: |
| 312 | val = SCSI_TRY_RESET_BUS; | 315 | val = SCSI_TRY_RESET_BUS; |
| 313 | break; | 316 | break; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5987da857103..e495d3813948 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -898,7 +898,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 898 | scsi_print_sense("", cmd); | 898 | scsi_print_sense("", cmd); |
| 899 | scsi_print_command(cmd); | 899 | scsi_print_command(cmd); |
| 900 | } | 900 | } |
| 901 | if (blk_end_request_err(req, -EIO)) | 901 | if (blk_end_request_err(req, error)) |
| 902 | scsi_requeue_command(q, cmd); | 902 | scsi_requeue_command(q, cmd); |
| 903 | else | 903 | else |
| 904 | scsi_next_command(cmd); | 904 | scsi_next_command(cmd); |
| @@ -1359,9 +1359,9 @@ static int scsi_lld_busy(struct request_queue *q) | |||
| 1359 | static void scsi_kill_request(struct request *req, struct request_queue *q) | 1359 | static void scsi_kill_request(struct request *req, struct request_queue *q) |
| 1360 | { | 1360 | { |
| 1361 | struct scsi_cmnd *cmd = req->special; | 1361 | struct scsi_cmnd *cmd = req->special; |
| 1362 | struct scsi_device *sdev = cmd->device; | 1362 | struct scsi_device *sdev; |
| 1363 | struct scsi_target *starget = scsi_target(sdev); | 1363 | struct scsi_target *starget; |
| 1364 | struct Scsi_Host *shost = sdev->host; | 1364 | struct Scsi_Host *shost; |
| 1365 | 1365 | ||
| 1366 | blk_start_request(req); | 1366 | blk_start_request(req); |
| 1367 | 1367 | ||
| @@ -1371,6 +1371,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
| 1371 | BUG(); | 1371 | BUG(); |
| 1372 | } | 1372 | } |
| 1373 | 1373 | ||
| 1374 | sdev = cmd->device; | ||
| 1375 | starget = scsi_target(sdev); | ||
| 1376 | shost = sdev->host; | ||
| 1374 | scsi_init_cmd_errh(cmd); | 1377 | scsi_init_cmd_errh(cmd); |
| 1375 | cmd->result = DID_NO_CONNECT << 16; | 1378 | cmd->result = DID_NO_CONNECT << 16; |
| 1376 | atomic_inc(&cmd->device->iorequest_cnt); | 1379 | atomic_inc(&cmd->device->iorequest_cnt); |
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c index ac6855cd2657..dcd128583b89 100644 --- a/drivers/scsi/scsi_lib_dma.c +++ b/drivers/scsi/scsi_lib_dma.c | |||
| @@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd) | |||
| 23 | int nseg = 0; | 23 | int nseg = 0; |
| 24 | 24 | ||
| 25 | if (scsi_sg_count(cmd)) { | 25 | if (scsi_sg_count(cmd)) { |
| 26 | struct device *dev = cmd->device->host->shost_gendev.parent; | 26 | struct device *dev = cmd->device->host->dma_dev; |
| 27 | 27 | ||
| 28 | nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), | 28 | nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), |
| 29 | cmd->sc_data_direction); | 29 | cmd->sc_data_direction); |
| @@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map); | |||
| 41 | void scsi_dma_unmap(struct scsi_cmnd *cmd) | 41 | void scsi_dma_unmap(struct scsi_cmnd *cmd) |
| 42 | { | 42 | { |
| 43 | if (scsi_sg_count(cmd)) { | 43 | if (scsi_sg_count(cmd)) { |
| 44 | struct device *dev = cmd->device->host->shost_gendev.parent; | 44 | struct device *dev = cmd->device->host->dma_dev; |
| 45 | 45 | ||
| 46 | dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), | 46 | dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), |
| 47 | cmd->sc_data_direction); | 47 | cmd->sc_data_direction); |
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 723fdecd91bd..0fd6ae6911ad 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c | |||
| @@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg); | |||
| 613 | * @data_buf: pointer to vendor unique data buffer | 613 | * @data_buf: pointer to vendor unique data buffer |
| 614 | * | 614 | * |
| 615 | * Returns: | 615 | * Returns: |
| 616 | * 0 on succesful return | 616 | * 0 on successful return |
| 617 | * otherwise, failing error code | 617 | * otherwise, failing error code |
| 618 | * | 618 | * |
| 619 | * Notes: | 619 | * Notes: |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 47291bcff0d5..012f73a96880 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -251,6 +251,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
| 251 | sdev->model = scsi_null_device_strs; | 251 | sdev->model = scsi_null_device_strs; |
| 252 | sdev->rev = scsi_null_device_strs; | 252 | sdev->rev = scsi_null_device_strs; |
| 253 | sdev->host = shost; | 253 | sdev->host = shost; |
| 254 | sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; | ||
| 254 | sdev->id = starget->id; | 255 | sdev->id = starget->id; |
| 255 | sdev->lun = lun; | 256 | sdev->lun = lun; |
| 256 | sdev->channel = starget->channel; | 257 | sdev->channel = starget->channel; |
| @@ -941,6 +942,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 941 | } | 942 | } |
| 942 | } | 943 | } |
| 943 | 944 | ||
| 945 | sdev->max_queue_depth = sdev->queue_depth; | ||
| 946 | |||
| 944 | /* | 947 | /* |
| 945 | * Ok, the device is now all set up, we can | 948 | * Ok, the device is now all set up, we can |
| 946 | * register it and tell the rest of the kernel | 949 | * register it and tell the rest of the kernel |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 392d8db33905..5a065055e68a 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
| @@ -766,10 +766,13 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, | |||
| 766 | if (depth < 1) | 766 | if (depth < 1) |
| 767 | return -EINVAL; | 767 | return -EINVAL; |
| 768 | 768 | ||
| 769 | retval = sht->change_queue_depth(sdev, depth); | 769 | retval = sht->change_queue_depth(sdev, depth, |
| 770 | SCSI_QDEPTH_DEFAULT); | ||
| 770 | if (retval < 0) | 771 | if (retval < 0) |
| 771 | return retval; | 772 | return retval; |
| 772 | 773 | ||
| 774 | sdev->max_queue_depth = sdev->queue_depth; | ||
| 775 | |||
| 773 | return count; | 776 | return count; |
| 774 | } | 777 | } |
| 775 | 778 | ||
| @@ -778,6 +781,37 @@ static struct device_attribute sdev_attr_queue_depth_rw = | |||
| 778 | sdev_store_queue_depth_rw); | 781 | sdev_store_queue_depth_rw); |
| 779 | 782 | ||
| 780 | static ssize_t | 783 | static ssize_t |
| 784 | sdev_show_queue_ramp_up_period(struct device *dev, | ||
| 785 | struct device_attribute *attr, | ||
| 786 | char *buf) | ||
| 787 | { | ||
| 788 | struct scsi_device *sdev; | ||
| 789 | sdev = to_scsi_device(dev); | ||
| 790 | return snprintf(buf, 20, "%u\n", | ||
| 791 | jiffies_to_msecs(sdev->queue_ramp_up_period)); | ||
| 792 | } | ||
| 793 | |||
| 794 | static ssize_t | ||
| 795 | sdev_store_queue_ramp_up_period(struct device *dev, | ||
| 796 | struct device_attribute *attr, | ||
| 797 | const char *buf, size_t count) | ||
| 798 | { | ||
| 799 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 800 | unsigned long period; | ||
| 801 | |||
| 802 | if (strict_strtoul(buf, 10, &period)) | ||
| 803 | return -EINVAL; | ||
| 804 | |||
| 805 | sdev->queue_ramp_up_period = msecs_to_jiffies(period); | ||
| 806 | return period; | ||
| 807 | } | ||
| 808 | |||
| 809 | static struct device_attribute sdev_attr_queue_ramp_up_period = | ||
| 810 | __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, | ||
| 811 | sdev_show_queue_ramp_up_period, | ||
| 812 | sdev_store_queue_ramp_up_period); | ||
| 813 | |||
| 814 | static ssize_t | ||
| 781 | sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, | 815 | sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, |
| 782 | const char *buf, size_t count) | 816 | const char *buf, size_t count) |
| 783 | { | 817 | { |
| @@ -867,8 +901,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) | |||
| 867 | sdev->is_visible = 1; | 901 | sdev->is_visible = 1; |
| 868 | 902 | ||
| 869 | /* create queue files, which may be writable, depending on the host */ | 903 | /* create queue files, which may be writable, depending on the host */ |
| 870 | if (sdev->host->hostt->change_queue_depth) | 904 | if (sdev->host->hostt->change_queue_depth) { |
| 871 | error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); | 905 | error = device_create_file(&sdev->sdev_gendev, |
| 906 | &sdev_attr_queue_depth_rw); | ||
| 907 | error = device_create_file(&sdev->sdev_gendev, | ||
| 908 | &sdev_attr_queue_ramp_up_period); | ||
| 909 | } | ||
| 872 | else | 910 | else |
| 873 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); | 911 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); |
| 874 | if (error) | 912 | if (error) |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index c6f70dae9b2e..6531c91501be 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | */ | 27 | */ |
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/delay.h> | ||
| 30 | #include <scsi/scsi_device.h> | 31 | #include <scsi/scsi_device.h> |
| 31 | #include <scsi/scsi_host.h> | 32 | #include <scsi/scsi_host.h> |
| 32 | #include <scsi/scsi_transport.h> | 33 | #include <scsi/scsi_transport.h> |
| @@ -2384,6 +2385,7 @@ fc_rport_final_delete(struct work_struct *work) | |||
| 2384 | struct Scsi_Host *shost = rport_to_shost(rport); | 2385 | struct Scsi_Host *shost = rport_to_shost(rport); |
| 2385 | struct fc_internal *i = to_fc_internal(shost->transportt); | 2386 | struct fc_internal *i = to_fc_internal(shost->transportt); |
| 2386 | unsigned long flags; | 2387 | unsigned long flags; |
| 2388 | int do_callback = 0; | ||
| 2387 | 2389 | ||
| 2388 | /* | 2390 | /* |
| 2389 | * if a scan is pending, flush the SCSI Host work_q so that | 2391 | * if a scan is pending, flush the SCSI Host work_q so that |
| @@ -2422,8 +2424,15 @@ fc_rport_final_delete(struct work_struct *work) | |||
| 2422 | * Avoid this call if we already called it when we preserved the | 2424 | * Avoid this call if we already called it when we preserved the |
| 2423 | * rport for the binding. | 2425 | * rport for the binding. |
| 2424 | */ | 2426 | */ |
| 2427 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 2425 | if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && | 2428 | if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && |
| 2426 | (i->f->dev_loss_tmo_callbk)) | 2429 | (i->f->dev_loss_tmo_callbk)) { |
| 2430 | rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
| 2431 | do_callback = 1; | ||
| 2432 | } | ||
| 2433 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 2434 | |||
| 2435 | if (do_callback) | ||
| 2427 | i->f->dev_loss_tmo_callbk(rport); | 2436 | i->f->dev_loss_tmo_callbk(rport); |
| 2428 | 2437 | ||
| 2429 | fc_bsg_remove(rport->rqst_q); | 2438 | fc_bsg_remove(rport->rqst_q); |
| @@ -2970,6 +2979,7 @@ fc_timeout_deleted_rport(struct work_struct *work) | |||
| 2970 | struct fc_internal *i = to_fc_internal(shost->transportt); | 2979 | struct fc_internal *i = to_fc_internal(shost->transportt); |
| 2971 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | 2980 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); |
| 2972 | unsigned long flags; | 2981 | unsigned long flags; |
| 2982 | int do_callback = 0; | ||
| 2973 | 2983 | ||
| 2974 | spin_lock_irqsave(shost->host_lock, flags); | 2984 | spin_lock_irqsave(shost->host_lock, flags); |
| 2975 | 2985 | ||
| @@ -3035,7 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work) | |||
| 3035 | rport->roles = FC_PORT_ROLE_UNKNOWN; | 3045 | rport->roles = FC_PORT_ROLE_UNKNOWN; |
| 3036 | rport->port_state = FC_PORTSTATE_NOTPRESENT; | 3046 | rport->port_state = FC_PORTSTATE_NOTPRESENT; |
| 3037 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; | 3047 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; |
| 3038 | rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
| 3039 | 3048 | ||
| 3040 | /* | 3049 | /* |
| 3041 | * Pre-emptively kill I/O rather than waiting for the work queue | 3050 | * Pre-emptively kill I/O rather than waiting for the work queue |
| @@ -3045,32 +3054,40 @@ fc_timeout_deleted_rport(struct work_struct *work) | |||
| 3045 | spin_unlock_irqrestore(shost->host_lock, flags); | 3054 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 3046 | fc_terminate_rport_io(rport); | 3055 | fc_terminate_rport_io(rport); |
| 3047 | 3056 | ||
| 3048 | BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT); | 3057 | spin_lock_irqsave(shost->host_lock, flags); |
| 3058 | |||
| 3059 | if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ | ||
| 3049 | 3060 | ||
| 3050 | /* remove the identifiers that aren't used in the consisting binding */ | 3061 | /* remove the identifiers that aren't used in the consisting binding */ |
| 3051 | switch (fc_host->tgtid_bind_type) { | 3062 | switch (fc_host->tgtid_bind_type) { |
| 3052 | case FC_TGTID_BIND_BY_WWPN: | 3063 | case FC_TGTID_BIND_BY_WWPN: |
| 3053 | rport->node_name = -1; | 3064 | rport->node_name = -1; |
| 3054 | rport->port_id = -1; | 3065 | rport->port_id = -1; |
| 3055 | break; | 3066 | break; |
| 3056 | case FC_TGTID_BIND_BY_WWNN: | 3067 | case FC_TGTID_BIND_BY_WWNN: |
| 3057 | rport->port_name = -1; | 3068 | rport->port_name = -1; |
| 3058 | rport->port_id = -1; | 3069 | rport->port_id = -1; |
| 3059 | break; | 3070 | break; |
| 3060 | case FC_TGTID_BIND_BY_ID: | 3071 | case FC_TGTID_BIND_BY_ID: |
| 3061 | rport->node_name = -1; | 3072 | rport->node_name = -1; |
| 3062 | rport->port_name = -1; | 3073 | rport->port_name = -1; |
| 3063 | break; | 3074 | break; |
| 3064 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ | 3075 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ |
| 3065 | break; | 3076 | break; |
| 3077 | } | ||
| 3078 | |||
| 3079 | /* | ||
| 3080 | * As this only occurs if the remote port (scsi target) | ||
| 3081 | * went away and didn't come back - we'll remove | ||
| 3082 | * all attached scsi devices. | ||
| 3083 | */ | ||
| 3084 | rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
| 3085 | fc_queue_work(shost, &rport->stgt_delete_work); | ||
| 3086 | |||
| 3087 | do_callback = 1; | ||
| 3066 | } | 3088 | } |
| 3067 | 3089 | ||
| 3068 | /* | 3090 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 3069 | * As this only occurs if the remote port (scsi target) | ||
| 3070 | * went away and didn't come back - we'll remove | ||
| 3071 | * all attached scsi devices. | ||
| 3072 | */ | ||
| 3073 | fc_queue_work(shost, &rport->stgt_delete_work); | ||
| 3074 | 3091 | ||
| 3075 | /* | 3092 | /* |
| 3076 | * Notify the driver that the rport is now dead. The LLDD will | 3093 | * Notify the driver that the rport is now dead. The LLDD will |
| @@ -3078,7 +3095,7 @@ fc_timeout_deleted_rport(struct work_struct *work) | |||
| 3078 | * | 3095 | * |
| 3079 | * Note: we set the CALLBK_DONE flag above to correspond | 3096 | * Note: we set the CALLBK_DONE flag above to correspond |
| 3080 | */ | 3097 | */ |
| 3081 | if (i->f->dev_loss_tmo_callbk) | 3098 | if (do_callback && i->f->dev_loss_tmo_callbk) |
| 3082 | i->f->dev_loss_tmo_callbk(rport); | 3099 | i->f->dev_loss_tmo_callbk(rport); |
| 3083 | } | 3100 | } |
| 3084 | 3101 | ||
| @@ -3128,6 +3145,31 @@ fc_scsi_scan_rport(struct work_struct *work) | |||
| 3128 | spin_unlock_irqrestore(shost->host_lock, flags); | 3145 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 3129 | } | 3146 | } |
| 3130 | 3147 | ||
| 3148 | /** | ||
| 3149 | * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport | ||
| 3150 | * @cmnd: SCSI command that scsi_eh is trying to recover | ||
| 3151 | * | ||
| 3152 | * This routine can be called from a FC LLD scsi_eh callback. It | ||
| 3153 | * blocks the scsi_eh thread until the fc_rport leaves the | ||
| 3154 | * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh | ||
| 3155 | * failing recovery actions for blocked rports which would lead to | ||
| 3156 | * offlined SCSI devices. | ||
| 3157 | */ | ||
| 3158 | void fc_block_scsi_eh(struct scsi_cmnd *cmnd) | ||
| 3159 | { | ||
| 3160 | struct Scsi_Host *shost = cmnd->device->host; | ||
| 3161 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
| 3162 | unsigned long flags; | ||
| 3163 | |||
| 3164 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 3165 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
| 3166 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 3167 | msleep(1000); | ||
| 3168 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 3169 | } | ||
| 3170 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 3171 | } | ||
| 3172 | EXPORT_SYMBOL(fc_block_scsi_eh); | ||
| 3131 | 3173 | ||
| 3132 | /** | 3174 | /** |
| 3133 | * fc_vport_setup - allocates and creates a FC virtual port. | 3175 | * fc_vport_setup - allocates and creates a FC virtual port. |
| @@ -3769,8 +3811,9 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, | |||
| 3769 | return; | 3811 | return; |
| 3770 | 3812 | ||
| 3771 | while (!blk_queue_plugged(q)) { | 3813 | while (!blk_queue_plugged(q)) { |
| 3772 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED)) | 3814 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && |
| 3773 | break; | 3815 | !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) |
| 3816 | break; | ||
| 3774 | 3817 | ||
| 3775 | req = blk_fetch_request(q); | 3818 | req = blk_fetch_request(q); |
| 3776 | if (!req) | 3819 | if (!req) |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ad897df36615..ea3892e7e0f7 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #include <scsi/scsi_transport_iscsi.h> | 30 | #include <scsi/scsi_transport_iscsi.h> |
| 31 | #include <scsi/iscsi_if.h> | 31 | #include <scsi/iscsi_if.h> |
| 32 | 32 | ||
| 33 | #define ISCSI_SESSION_ATTRS 21 | 33 | #define ISCSI_SESSION_ATTRS 22 |
| 34 | #define ISCSI_CONN_ATTRS 13 | 34 | #define ISCSI_CONN_ATTRS 13 |
| 35 | #define ISCSI_HOST_ATTRS 4 | 35 | #define ISCSI_HOST_ATTRS 4 |
| 36 | 36 | ||
| @@ -627,8 +627,10 @@ static void __iscsi_block_session(struct work_struct *work) | |||
| 627 | spin_unlock_irqrestore(&session->lock, flags); | 627 | spin_unlock_irqrestore(&session->lock, flags); |
| 628 | scsi_target_block(&session->dev); | 628 | scsi_target_block(&session->dev); |
| 629 | ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); | 629 | ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); |
| 630 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, | 630 | if (session->recovery_tmo >= 0) |
| 631 | session->recovery_tmo * HZ); | 631 | queue_delayed_work(iscsi_eh_timer_workq, |
| 632 | &session->recovery_work, | ||
| 633 | session->recovery_tmo * HZ); | ||
| 632 | } | 634 | } |
| 633 | 635 | ||
| 634 | void iscsi_block_session(struct iscsi_cls_session *session) | 636 | void iscsi_block_session(struct iscsi_cls_session *session) |
| @@ -1348,8 +1350,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) | |||
| 1348 | switch (ev->u.set_param.param) { | 1350 | switch (ev->u.set_param.param) { |
| 1349 | case ISCSI_PARAM_SESS_RECOVERY_TMO: | 1351 | case ISCSI_PARAM_SESS_RECOVERY_TMO: |
| 1350 | sscanf(data, "%d", &value); | 1352 | sscanf(data, "%d", &value); |
| 1351 | if (value != 0) | 1353 | session->recovery_tmo = value; |
| 1352 | session->recovery_tmo = value; | ||
| 1353 | break; | 1354 | break; |
| 1354 | default: | 1355 | default: |
| 1355 | err = transport->set_param(conn, ev->u.set_param.param, | 1356 | err = transport->set_param(conn, ev->u.set_param.param, |
| @@ -1759,6 +1760,7 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); | |||
| 1759 | iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); | 1760 | iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); |
| 1760 | iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); | 1761 | iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); |
| 1761 | iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); | 1762 | iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); |
| 1763 | iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); | ||
| 1762 | iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); | 1764 | iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); |
| 1763 | iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) | 1765 | iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) |
| 1764 | 1766 | ||
| @@ -2000,6 +2002,7 @@ iscsi_register_transport(struct iscsi_transport *tt) | |||
| 2000 | SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); | 2002 | SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); |
| 2001 | SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); | 2003 | SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); |
| 2002 | SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); | 2004 | SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); |
| 2005 | SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO); | ||
| 2003 | SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); | 2006 | SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); |
| 2004 | SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); | 2007 | SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); |
| 2005 | SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); | 2008 | SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index fd47cb1bee1b..f27e52d963d3 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
| @@ -666,7 +666,7 @@ EXPORT_SYMBOL(sas_phy_add); | |||
| 666 | * | 666 | * |
| 667 | * Note: | 667 | * Note: |
| 668 | * This function must only be called on a PHY that has not | 668 | * This function must only be called on a PHY that has not |
| 669 | * sucessfully been added using sas_phy_add(). | 669 | * successfully been added using sas_phy_add(). |
| 670 | */ | 670 | */ |
| 671 | void sas_phy_free(struct sas_phy *phy) | 671 | void sas_phy_free(struct sas_phy *phy) |
| 672 | { | 672 | { |
| @@ -896,7 +896,7 @@ EXPORT_SYMBOL(sas_port_add); | |||
| 896 | * | 896 | * |
| 897 | * Note: | 897 | * Note: |
| 898 | * This function must only be called on a PORT that has not | 898 | * This function must only be called on a PORT that has not |
| 899 | * sucessfully been added using sas_port_add(). | 899 | * successfully been added using sas_port_add(). |
| 900 | */ | 900 | */ |
| 901 | void sas_port_free(struct sas_port *port) | 901 | void sas_port_free(struct sas_port *port) |
| 902 | { | 902 | { |
| @@ -1476,7 +1476,7 @@ EXPORT_SYMBOL(sas_rphy_add); | |||
| 1476 | * | 1476 | * |
| 1477 | * Note: | 1477 | * Note: |
| 1478 | * This function must only be called on a remote | 1478 | * This function must only be called on a remote |
| 1479 | * PHY that has not sucessfully been added using | 1479 | * PHY that has not successfully been added using |
| 1480 | * sas_rphy_add() (or has been sas_rphy_remove()'d) | 1480 | * sas_rphy_add() (or has been sas_rphy_remove()'d) |
| 1481 | */ | 1481 | */ |
| 1482 | void sas_rphy_free(struct sas_rphy *rphy) | 1482 | void sas_rphy_free(struct sas_rphy *rphy) |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 12d58a7ed6bc..ad59abb47722 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -2280,7 +2280,8 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2280 | } else if (code == MT_ST_SET_CLN) { | 2280 | } else if (code == MT_ST_SET_CLN) { |
| 2281 | value = (options & ~MT_ST_OPTIONS) & 0xff; | 2281 | value = (options & ~MT_ST_OPTIONS) & 0xff; |
| 2282 | if (value != 0 && | 2282 | if (value != 0 && |
| 2283 | value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE) | 2283 | (value < EXTENDED_SENSE_START || |
| 2284 | value >= SCSI_SENSE_BUFFERSIZE)) | ||
| 2284 | return (-EINVAL); | 2285 | return (-EINVAL); |
| 2285 | STp->cln_mode = value; | 2286 | STp->cln_mode = value; |
| 2286 | STp->cln_sense_mask = (options >> 8) & 0xff; | 2287 | STp->cln_sense_mask = (options >> 8) & 0xff; |
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 09fa8861fc58..3058bb1aff95 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
| @@ -36,11 +36,11 @@ | |||
| 36 | #include <scsi/scsi_eh.h> | 36 | #include <scsi/scsi_eh.h> |
| 37 | 37 | ||
| 38 | #define DRV_NAME "stex" | 38 | #define DRV_NAME "stex" |
| 39 | #define ST_DRIVER_VERSION "4.6.0000.3" | 39 | #define ST_DRIVER_VERSION "4.6.0000.4" |
| 40 | #define ST_VER_MAJOR 4 | 40 | #define ST_VER_MAJOR 4 |
| 41 | #define ST_VER_MINOR 6 | 41 | #define ST_VER_MINOR 6 |
| 42 | #define ST_OEM 0 | 42 | #define ST_OEM 0 |
| 43 | #define ST_BUILD_VER 3 | 43 | #define ST_BUILD_VER 4 |
| 44 | 44 | ||
| 45 | enum { | 45 | enum { |
| 46 | /* MU register offset */ | 46 | /* MU register offset */ |
| @@ -64,24 +64,24 @@ enum { | |||
| 64 | YH2I_REQ_HI = 0xc4, | 64 | YH2I_REQ_HI = 0xc4, |
| 65 | 65 | ||
| 66 | /* MU register value */ | 66 | /* MU register value */ |
| 67 | MU_INBOUND_DOORBELL_HANDSHAKE = 1, | 67 | MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), |
| 68 | MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, | 68 | MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), |
| 69 | MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, | 69 | MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), |
| 70 | MU_INBOUND_DOORBELL_HMUSTOPPED = 8, | 70 | MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), |
| 71 | MU_INBOUND_DOORBELL_RESET = 16, | 71 | MU_INBOUND_DOORBELL_RESET = (1 << 4), |
| 72 | 72 | ||
| 73 | MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, | 73 | MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), |
| 74 | MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, | 74 | MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), |
| 75 | MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, | 75 | MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), |
| 76 | MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, | 76 | MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), |
| 77 | MU_OUTBOUND_DOORBELL_HASEVENT = 16, | 77 | MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), |
| 78 | MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), | ||
| 78 | 79 | ||
| 79 | /* MU status code */ | 80 | /* MU status code */ |
| 80 | MU_STATE_STARTING = 1, | 81 | MU_STATE_STARTING = 1, |
| 81 | MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, | 82 | MU_STATE_STARTED = 2, |
| 82 | MU_STATE_SEND_HANDSHAKE_FRAME = 3, | 83 | MU_STATE_RESETTING = 3, |
| 83 | MU_STATE_STARTED = 4, | 84 | MU_STATE_FAILED = 4, |
| 84 | MU_STATE_RESETTING = 5, | ||
| 85 | 85 | ||
| 86 | MU_MAX_DELAY = 120, | 86 | MU_MAX_DELAY = 120, |
| 87 | MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, | 87 | MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, |
| @@ -111,6 +111,8 @@ enum { | |||
| 111 | 111 | ||
| 112 | SS_H2I_INT_RESET = 0x100, | 112 | SS_H2I_INT_RESET = 0x100, |
| 113 | 113 | ||
| 114 | SS_I2H_REQUEST_RESET = 0x2000, | ||
| 115 | |||
| 114 | SS_MU_OPERATIONAL = 0x80000000, | 116 | SS_MU_OPERATIONAL = 0x80000000, |
| 115 | 117 | ||
| 116 | STEX_CDB_LENGTH = 16, | 118 | STEX_CDB_LENGTH = 16, |
| @@ -160,6 +162,7 @@ enum { | |||
| 160 | INQUIRY_EVPD = 0x01, | 162 | INQUIRY_EVPD = 0x01, |
| 161 | 163 | ||
| 162 | ST_ADDITIONAL_MEM = 0x200000, | 164 | ST_ADDITIONAL_MEM = 0x200000, |
| 165 | ST_ADDITIONAL_MEM_MIN = 0x80000, | ||
| 163 | }; | 166 | }; |
| 164 | 167 | ||
| 165 | struct st_sgitem { | 168 | struct st_sgitem { |
| @@ -311,6 +314,10 @@ struct st_hba { | |||
| 311 | struct st_ccb *wait_ccb; | 314 | struct st_ccb *wait_ccb; |
| 312 | __le32 *scratch; | 315 | __le32 *scratch; |
| 313 | 316 | ||
| 317 | char work_q_name[20]; | ||
| 318 | struct workqueue_struct *work_q; | ||
| 319 | struct work_struct reset_work; | ||
| 320 | wait_queue_head_t reset_waitq; | ||
| 314 | unsigned int mu_status; | 321 | unsigned int mu_status; |
| 315 | unsigned int cardtype; | 322 | unsigned int cardtype; |
| 316 | int msi_enabled; | 323 | int msi_enabled; |
| @@ -577,6 +584,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
| 577 | lun = cmd->device->lun; | 584 | lun = cmd->device->lun; |
| 578 | hba = (struct st_hba *) &host->hostdata[0]; | 585 | hba = (struct st_hba *) &host->hostdata[0]; |
| 579 | 586 | ||
| 587 | if (unlikely(hba->mu_status == MU_STATE_RESETTING)) | ||
| 588 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 589 | |||
| 580 | switch (cmd->cmnd[0]) { | 590 | switch (cmd->cmnd[0]) { |
| 581 | case MODE_SENSE_10: | 591 | case MODE_SENSE_10: |
| 582 | { | 592 | { |
| @@ -841,7 +851,6 @@ static irqreturn_t stex_intr(int irq, void *__hba) | |||
| 841 | void __iomem *base = hba->mmio_base; | 851 | void __iomem *base = hba->mmio_base; |
| 842 | u32 data; | 852 | u32 data; |
| 843 | unsigned long flags; | 853 | unsigned long flags; |
| 844 | int handled = 0; | ||
| 845 | 854 | ||
| 846 | spin_lock_irqsave(hba->host->host_lock, flags); | 855 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 847 | 856 | ||
| @@ -852,12 +861,16 @@ static irqreturn_t stex_intr(int irq, void *__hba) | |||
| 852 | writel(data, base + ODBL); | 861 | writel(data, base + ODBL); |
| 853 | readl(base + ODBL); /* flush */ | 862 | readl(base + ODBL); /* flush */ |
| 854 | stex_mu_intr(hba, data); | 863 | stex_mu_intr(hba, data); |
| 855 | handled = 1; | 864 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 865 | if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && | ||
| 866 | hba->cardtype == st_shasta)) | ||
| 867 | queue_work(hba->work_q, &hba->reset_work); | ||
| 868 | return IRQ_HANDLED; | ||
| 856 | } | 869 | } |
| 857 | 870 | ||
| 858 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 871 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 859 | 872 | ||
| 860 | return IRQ_RETVAL(handled); | 873 | return IRQ_NONE; |
| 861 | } | 874 | } |
| 862 | 875 | ||
| 863 | static void stex_ss_mu_intr(struct st_hba *hba) | 876 | static void stex_ss_mu_intr(struct st_hba *hba) |
| @@ -939,7 +952,6 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba) | |||
| 939 | void __iomem *base = hba->mmio_base; | 952 | void __iomem *base = hba->mmio_base; |
| 940 | u32 data; | 953 | u32 data; |
| 941 | unsigned long flags; | 954 | unsigned long flags; |
| 942 | int handled = 0; | ||
| 943 | 955 | ||
| 944 | spin_lock_irqsave(hba->host->host_lock, flags); | 956 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 945 | 957 | ||
| @@ -948,12 +960,15 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba) | |||
| 948 | /* clear the interrupt */ | 960 | /* clear the interrupt */ |
| 949 | writel(data, base + YI2H_INT_C); | 961 | writel(data, base + YI2H_INT_C); |
| 950 | stex_ss_mu_intr(hba); | 962 | stex_ss_mu_intr(hba); |
| 951 | handled = 1; | 963 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 964 | if (unlikely(data & SS_I2H_REQUEST_RESET)) | ||
| 965 | queue_work(hba->work_q, &hba->reset_work); | ||
| 966 | return IRQ_HANDLED; | ||
| 952 | } | 967 | } |
| 953 | 968 | ||
| 954 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 969 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 955 | 970 | ||
| 956 | return IRQ_RETVAL(handled); | 971 | return IRQ_NONE; |
| 957 | } | 972 | } |
| 958 | 973 | ||
| 959 | static int stex_common_handshake(struct st_hba *hba) | 974 | static int stex_common_handshake(struct st_hba *hba) |
| @@ -1001,7 +1016,7 @@ static int stex_common_handshake(struct st_hba *hba) | |||
| 1001 | h->partner_type = HMU_PARTNER_TYPE; | 1016 | h->partner_type = HMU_PARTNER_TYPE; |
| 1002 | if (hba->extra_offset) { | 1017 | if (hba->extra_offset) { |
| 1003 | h->extra_offset = cpu_to_le32(hba->extra_offset); | 1018 | h->extra_offset = cpu_to_le32(hba->extra_offset); |
| 1004 | h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM); | 1019 | h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); |
| 1005 | } else | 1020 | } else |
| 1006 | h->extra_offset = h->extra_size = 0; | 1021 | h->extra_offset = h->extra_size = 0; |
| 1007 | 1022 | ||
| @@ -1046,7 +1061,7 @@ static int stex_ss_handshake(struct st_hba *hba) | |||
| 1046 | struct st_msg_header *msg_h; | 1061 | struct st_msg_header *msg_h; |
| 1047 | struct handshake_frame *h; | 1062 | struct handshake_frame *h; |
| 1048 | __le32 *scratch; | 1063 | __le32 *scratch; |
| 1049 | u32 data; | 1064 | u32 data, scratch_size; |
| 1050 | unsigned long before; | 1065 | unsigned long before; |
| 1051 | int ret = 0; | 1066 | int ret = 0; |
| 1052 | 1067 | ||
| @@ -1074,13 +1089,16 @@ static int stex_ss_handshake(struct st_hba *hba) | |||
| 1074 | stex_gettime(&h->hosttime); | 1089 | stex_gettime(&h->hosttime); |
| 1075 | h->partner_type = HMU_PARTNER_TYPE; | 1090 | h->partner_type = HMU_PARTNER_TYPE; |
| 1076 | h->extra_offset = h->extra_size = 0; | 1091 | h->extra_offset = h->extra_size = 0; |
| 1077 | h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32)); | 1092 | scratch_size = (hba->sts_count+1)*sizeof(u32); |
| 1093 | h->scratch_size = cpu_to_le32(scratch_size); | ||
| 1078 | 1094 | ||
| 1079 | data = readl(base + YINT_EN); | 1095 | data = readl(base + YINT_EN); |
| 1080 | data &= ~4; | 1096 | data &= ~4; |
| 1081 | writel(data, base + YINT_EN); | 1097 | writel(data, base + YINT_EN); |
| 1082 | writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); | 1098 | writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); |
| 1099 | readl(base + YH2I_REQ_HI); | ||
| 1083 | writel(hba->dma_handle, base + YH2I_REQ); | 1100 | writel(hba->dma_handle, base + YH2I_REQ); |
| 1101 | readl(base + YH2I_REQ); /* flush */ | ||
| 1084 | 1102 | ||
| 1085 | scratch = hba->scratch; | 1103 | scratch = hba->scratch; |
| 1086 | before = jiffies; | 1104 | before = jiffies; |
| @@ -1096,7 +1114,7 @@ static int stex_ss_handshake(struct st_hba *hba) | |||
| 1096 | msleep(1); | 1114 | msleep(1); |
| 1097 | } | 1115 | } |
| 1098 | 1116 | ||
| 1099 | *scratch = 0; | 1117 | memset(scratch, 0, scratch_size); |
| 1100 | msg_h->flag = 0; | 1118 | msg_h->flag = 0; |
| 1101 | return ret; | 1119 | return ret; |
| 1102 | } | 1120 | } |
| @@ -1105,19 +1123,24 @@ static int stex_handshake(struct st_hba *hba) | |||
| 1105 | { | 1123 | { |
| 1106 | int err; | 1124 | int err; |
| 1107 | unsigned long flags; | 1125 | unsigned long flags; |
| 1126 | unsigned int mu_status; | ||
| 1108 | 1127 | ||
| 1109 | err = (hba->cardtype == st_yel) ? | 1128 | err = (hba->cardtype == st_yel) ? |
| 1110 | stex_ss_handshake(hba) : stex_common_handshake(hba); | 1129 | stex_ss_handshake(hba) : stex_common_handshake(hba); |
| 1130 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 1131 | mu_status = hba->mu_status; | ||
| 1111 | if (err == 0) { | 1132 | if (err == 0) { |
| 1112 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 1113 | hba->req_head = 0; | 1133 | hba->req_head = 0; |
| 1114 | hba->req_tail = 0; | 1134 | hba->req_tail = 0; |
| 1115 | hba->status_head = 0; | 1135 | hba->status_head = 0; |
| 1116 | hba->status_tail = 0; | 1136 | hba->status_tail = 0; |
| 1117 | hba->out_req_cnt = 0; | 1137 | hba->out_req_cnt = 0; |
| 1118 | hba->mu_status = MU_STATE_STARTED; | 1138 | hba->mu_status = MU_STATE_STARTED; |
| 1119 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1139 | } else |
| 1120 | } | 1140 | hba->mu_status = MU_STATE_FAILED; |
| 1141 | if (mu_status == MU_STATE_RESETTING) | ||
| 1142 | wake_up_all(&hba->reset_waitq); | ||
| 1143 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1121 | return err; | 1144 | return err; |
| 1122 | } | 1145 | } |
| 1123 | 1146 | ||
| @@ -1137,17 +1160,11 @@ static int stex_abort(struct scsi_cmnd *cmd) | |||
| 1137 | 1160 | ||
| 1138 | base = hba->mmio_base; | 1161 | base = hba->mmio_base; |
| 1139 | spin_lock_irqsave(host->host_lock, flags); | 1162 | spin_lock_irqsave(host->host_lock, flags); |
| 1140 | if (tag < host->can_queue && hba->ccb[tag].cmd == cmd) | 1163 | if (tag < host->can_queue && |
| 1164 | hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) | ||
| 1141 | hba->wait_ccb = &hba->ccb[tag]; | 1165 | hba->wait_ccb = &hba->ccb[tag]; |
| 1142 | else { | 1166 | else |
| 1143 | for (tag = 0; tag < host->can_queue; tag++) | 1167 | goto out; |
| 1144 | if (hba->ccb[tag].cmd == cmd) { | ||
| 1145 | hba->wait_ccb = &hba->ccb[tag]; | ||
| 1146 | break; | ||
| 1147 | } | ||
| 1148 | if (tag >= host->can_queue) | ||
| 1149 | goto out; | ||
| 1150 | } | ||
| 1151 | 1168 | ||
| 1152 | if (hba->cardtype == st_yel) { | 1169 | if (hba->cardtype == st_yel) { |
| 1153 | data = readl(base + YI2H_INT); | 1170 | data = readl(base + YI2H_INT); |
| @@ -1221,6 +1238,37 @@ static void stex_hard_reset(struct st_hba *hba) | |||
| 1221 | hba->pdev->saved_config_space[i]); | 1238 | hba->pdev->saved_config_space[i]); |
| 1222 | } | 1239 | } |
| 1223 | 1240 | ||
| 1241 | static int stex_yos_reset(struct st_hba *hba) | ||
| 1242 | { | ||
| 1243 | void __iomem *base; | ||
| 1244 | unsigned long flags, before; | ||
| 1245 | int ret = 0; | ||
| 1246 | |||
| 1247 | base = hba->mmio_base; | ||
| 1248 | writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); | ||
| 1249 | readl(base + IDBL); /* flush */ | ||
| 1250 | before = jiffies; | ||
| 1251 | while (hba->out_req_cnt > 0) { | ||
| 1252 | if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { | ||
| 1253 | printk(KERN_WARNING DRV_NAME | ||
| 1254 | "(%s): reset timeout\n", pci_name(hba->pdev)); | ||
| 1255 | ret = -1; | ||
| 1256 | break; | ||
| 1257 | } | ||
| 1258 | msleep(1); | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 1262 | if (ret == -1) | ||
| 1263 | hba->mu_status = MU_STATE_FAILED; | ||
| 1264 | else | ||
| 1265 | hba->mu_status = MU_STATE_STARTED; | ||
| 1266 | wake_up_all(&hba->reset_waitq); | ||
| 1267 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1268 | |||
| 1269 | return ret; | ||
| 1270 | } | ||
| 1271 | |||
| 1224 | static void stex_ss_reset(struct st_hba *hba) | 1272 | static void stex_ss_reset(struct st_hba *hba) |
| 1225 | { | 1273 | { |
| 1226 | writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); | 1274 | writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); |
| @@ -1228,66 +1276,86 @@ static void stex_ss_reset(struct st_hba *hba) | |||
| 1228 | ssleep(5); | 1276 | ssleep(5); |
| 1229 | } | 1277 | } |
| 1230 | 1278 | ||
| 1231 | static int stex_reset(struct scsi_cmnd *cmd) | 1279 | static int stex_do_reset(struct st_hba *hba) |
| 1232 | { | 1280 | { |
| 1233 | struct st_hba *hba; | 1281 | struct st_ccb *ccb; |
| 1234 | void __iomem *base; | 1282 | unsigned long flags; |
| 1235 | unsigned long flags, before; | 1283 | unsigned int mu_status = MU_STATE_RESETTING; |
| 1284 | u16 tag; | ||
| 1236 | 1285 | ||
| 1237 | hba = (struct st_hba *) &cmd->device->host->hostdata[0]; | 1286 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 1287 | if (hba->mu_status == MU_STATE_STARTING) { | ||
| 1288 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1289 | printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", | ||
| 1290 | pci_name(hba->pdev)); | ||
| 1291 | return 0; | ||
| 1292 | } | ||
| 1293 | while (hba->mu_status == MU_STATE_RESETTING) { | ||
| 1294 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1295 | wait_event_timeout(hba->reset_waitq, | ||
| 1296 | hba->mu_status != MU_STATE_RESETTING, | ||
| 1297 | MU_MAX_DELAY * HZ); | ||
| 1298 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 1299 | mu_status = hba->mu_status; | ||
| 1300 | } | ||
| 1238 | 1301 | ||
| 1239 | printk(KERN_INFO DRV_NAME | 1302 | if (mu_status != MU_STATE_RESETTING) { |
| 1240 | "(%s): resetting host\n", pci_name(hba->pdev)); | 1303 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 1241 | scsi_print_command(cmd); | 1304 | return (mu_status == MU_STATE_STARTED) ? 0 : -1; |
| 1305 | } | ||
| 1242 | 1306 | ||
| 1243 | hba->mu_status = MU_STATE_RESETTING; | 1307 | hba->mu_status = MU_STATE_RESETTING; |
| 1308 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1309 | |||
| 1310 | if (hba->cardtype == st_yosemite) | ||
| 1311 | return stex_yos_reset(hba); | ||
| 1244 | 1312 | ||
| 1245 | if (hba->cardtype == st_shasta) | 1313 | if (hba->cardtype == st_shasta) |
| 1246 | stex_hard_reset(hba); | 1314 | stex_hard_reset(hba); |
| 1247 | else if (hba->cardtype == st_yel) | 1315 | else if (hba->cardtype == st_yel) |
| 1248 | stex_ss_reset(hba); | 1316 | stex_ss_reset(hba); |
| 1249 | 1317 | ||
| 1250 | if (hba->cardtype != st_yosemite) { | 1318 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 1251 | if (stex_handshake(hba)) { | 1319 | for (tag = 0; tag < hba->host->can_queue; tag++) { |
| 1252 | printk(KERN_WARNING DRV_NAME | 1320 | ccb = &hba->ccb[tag]; |
| 1253 | "(%s): resetting: handshake failed\n", | 1321 | if (ccb->req == NULL) |
| 1254 | pci_name(hba->pdev)); | 1322 | continue; |
| 1255 | return FAILED; | 1323 | ccb->req = NULL; |
| 1324 | if (ccb->cmd) { | ||
| 1325 | scsi_dma_unmap(ccb->cmd); | ||
| 1326 | ccb->cmd->result = DID_RESET << 16; | ||
| 1327 | ccb->cmd->scsi_done(ccb->cmd); | ||
| 1328 | ccb->cmd = NULL; | ||
| 1256 | } | 1329 | } |
| 1257 | return SUCCESS; | ||
| 1258 | } | 1330 | } |
| 1331 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1259 | 1332 | ||
| 1260 | /* st_yosemite */ | 1333 | if (stex_handshake(hba) == 0) |
| 1261 | writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL); | 1334 | return 0; |
| 1262 | readl(hba->mmio_base + IDBL); /* flush */ | ||
| 1263 | before = jiffies; | ||
| 1264 | while (hba->out_req_cnt > 0) { | ||
| 1265 | if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { | ||
| 1266 | printk(KERN_WARNING DRV_NAME | ||
| 1267 | "(%s): reset timeout\n", pci_name(hba->pdev)); | ||
| 1268 | return FAILED; | ||
| 1269 | } | ||
| 1270 | msleep(1); | ||
| 1271 | } | ||
| 1272 | 1335 | ||
| 1273 | base = hba->mmio_base; | 1336 | printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", |
| 1274 | writel(0, base + IMR0); | 1337 | pci_name(hba->pdev)); |
| 1275 | readl(base + IMR0); | 1338 | return -1; |
| 1276 | writel(0, base + OMR0); | 1339 | } |
| 1277 | readl(base + OMR0); | 1340 | |
| 1278 | writel(0, base + IMR1); | 1341 | static int stex_reset(struct scsi_cmnd *cmd) |
| 1279 | readl(base + IMR1); | 1342 | { |
| 1280 | writel(0, base + OMR1); | 1343 | struct st_hba *hba; |
| 1281 | readl(base + OMR1); /* flush */ | 1344 | |
| 1282 | spin_lock_irqsave(hba->host->host_lock, flags); | 1345 | hba = (struct st_hba *) &cmd->device->host->hostdata[0]; |
| 1283 | hba->req_head = 0; | 1346 | |
| 1284 | hba->req_tail = 0; | 1347 | printk(KERN_INFO DRV_NAME |
| 1285 | hba->status_head = 0; | 1348 | "(%s): resetting host\n", pci_name(hba->pdev)); |
| 1286 | hba->status_tail = 0; | 1349 | scsi_print_command(cmd); |
| 1287 | hba->out_req_cnt = 0; | 1350 | |
| 1288 | hba->mu_status = MU_STATE_STARTED; | 1351 | return stex_do_reset(hba) ? FAILED : SUCCESS; |
| 1289 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1352 | } |
| 1290 | return SUCCESS; | 1353 | |
| 1354 | static void stex_reset_work(struct work_struct *work) | ||
| 1355 | { | ||
| 1356 | struct st_hba *hba = container_of(work, struct st_hba, reset_work); | ||
| 1357 | |||
| 1358 | stex_do_reset(hba); | ||
| 1291 | } | 1359 | } |
| 1292 | 1360 | ||
| 1293 | static int stex_biosparam(struct scsi_device *sdev, | 1361 | static int stex_biosparam(struct scsi_device *sdev, |
| @@ -1420,8 +1488,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev) | |||
| 1420 | { | 1488 | { |
| 1421 | int ret; | 1489 | int ret; |
| 1422 | 1490 | ||
| 1423 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) | 1491 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) |
| 1424 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) | 1492 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) |
| 1425 | return 0; | 1493 | return 0; |
| 1426 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 1494 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 1427 | if (!ret) | 1495 | if (!ret) |
| @@ -1528,10 +1596,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1528 | hba->dma_mem = dma_alloc_coherent(&pdev->dev, | 1596 | hba->dma_mem = dma_alloc_coherent(&pdev->dev, |
| 1529 | hba->dma_size, &hba->dma_handle, GFP_KERNEL); | 1597 | hba->dma_size, &hba->dma_handle, GFP_KERNEL); |
| 1530 | if (!hba->dma_mem) { | 1598 | if (!hba->dma_mem) { |
| 1531 | err = -ENOMEM; | 1599 | /* Retry minimum coherent mapping for st_seq and st_vsc */ |
| 1532 | printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", | 1600 | if (hba->cardtype == st_seq || |
| 1533 | pci_name(pdev)); | 1601 | (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { |
| 1534 | goto out_iounmap; | 1602 | printk(KERN_WARNING DRV_NAME |
| 1603 | "(%s): allocating min buffer for controller\n", | ||
| 1604 | pci_name(pdev)); | ||
| 1605 | hba->dma_size = hba->extra_offset | ||
| 1606 | + ST_ADDITIONAL_MEM_MIN; | ||
| 1607 | hba->dma_mem = dma_alloc_coherent(&pdev->dev, | ||
| 1608 | hba->dma_size, &hba->dma_handle, GFP_KERNEL); | ||
| 1609 | } | ||
| 1610 | |||
| 1611 | if (!hba->dma_mem) { | ||
| 1612 | err = -ENOMEM; | ||
| 1613 | printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", | ||
| 1614 | pci_name(pdev)); | ||
| 1615 | goto out_iounmap; | ||
| 1616 | } | ||
| 1535 | } | 1617 | } |
| 1536 | 1618 | ||
| 1537 | hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); | 1619 | hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); |
| @@ -1568,12 +1650,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1568 | 1650 | ||
| 1569 | hba->host = host; | 1651 | hba->host = host; |
| 1570 | hba->pdev = pdev; | 1652 | hba->pdev = pdev; |
| 1653 | init_waitqueue_head(&hba->reset_waitq); | ||
| 1654 | |||
| 1655 | snprintf(hba->work_q_name, sizeof(hba->work_q_name), | ||
| 1656 | "stex_wq_%d", host->host_no); | ||
| 1657 | hba->work_q = create_singlethread_workqueue(hba->work_q_name); | ||
| 1658 | if (!hba->work_q) { | ||
| 1659 | printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", | ||
| 1660 | pci_name(pdev)); | ||
| 1661 | err = -ENOMEM; | ||
| 1662 | goto out_ccb_free; | ||
| 1663 | } | ||
| 1664 | INIT_WORK(&hba->reset_work, stex_reset_work); | ||
| 1571 | 1665 | ||
| 1572 | err = stex_request_irq(hba); | 1666 | err = stex_request_irq(hba); |
| 1573 | if (err) { | 1667 | if (err) { |
| 1574 | printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", | 1668 | printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", |
| 1575 | pci_name(pdev)); | 1669 | pci_name(pdev)); |
| 1576 | goto out_ccb_free; | 1670 | goto out_free_wq; |
| 1577 | } | 1671 | } |
| 1578 | 1672 | ||
| 1579 | err = stex_handshake(hba); | 1673 | err = stex_handshake(hba); |
| @@ -1602,6 +1696,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1602 | 1696 | ||
| 1603 | out_free_irq: | 1697 | out_free_irq: |
| 1604 | stex_free_irq(hba); | 1698 | stex_free_irq(hba); |
| 1699 | out_free_wq: | ||
| 1700 | destroy_workqueue(hba->work_q); | ||
| 1605 | out_ccb_free: | 1701 | out_ccb_free: |
| 1606 | kfree(hba->ccb); | 1702 | kfree(hba->ccb); |
| 1607 | out_pci_free: | 1703 | out_pci_free: |
| @@ -1669,6 +1765,8 @@ static void stex_hba_free(struct st_hba *hba) | |||
| 1669 | { | 1765 | { |
| 1670 | stex_free_irq(hba); | 1766 | stex_free_irq(hba); |
| 1671 | 1767 | ||
| 1768 | destroy_workqueue(hba->work_q); | ||
| 1769 | |||
| 1672 | iounmap(hba->mmio_base); | 1770 | iounmap(hba->mmio_base); |
| 1673 | 1771 | ||
| 1674 | pci_release_regions(hba->pdev); | 1772 | pci_release_regions(hba->pdev); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 45374d66d26a..8b955b534a36 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
| @@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) | |||
| 984 | } | 984 | } |
| 985 | } | 985 | } |
| 986 | 986 | ||
| 987 | static int skip_spaces(char *ptr, int len) | 987 | static int sym_skip_spaces(char *ptr, int len) |
| 988 | { | 988 | { |
| 989 | int cnt, c; | 989 | int cnt, c; |
| 990 | 990 | ||
| @@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb) | |||
| 1012 | } | 1012 | } |
| 1013 | 1013 | ||
| 1014 | #define SKIP_SPACES(ptr, len) \ | 1014 | #define SKIP_SPACES(ptr, len) \ |
| 1015 | if ((arg_len = skip_spaces(ptr, len)) < 1) \ | 1015 | if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \ |
| 1016 | return -EINVAL; \ | 1016 | return -EINVAL; \ |
| 1017 | ptr += arg_len; len -= arg_len; | 1017 | ptr += arg_len; len -= arg_len; |
| 1018 | 1018 | ||
| @@ -1864,7 +1864,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) | |||
| 1864 | * | 1864 | * |
| 1865 | * This routine is similar to sym_set_workarounds(), except | 1865 | * This routine is similar to sym_set_workarounds(), except |
| 1866 | * that, at this point, we already know that the device was | 1866 | * that, at this point, we already know that the device was |
| 1867 | * succesfully intialized at least once before, and so most | 1867 | * successfully intialized at least once before, and so most |
| 1868 | * of the steps taken there are un-needed here. | 1868 | * of the steps taken there are un-needed here. |
| 1869 | */ | 1869 | */ |
| 1870 | static void sym2_reset_workarounds(struct pci_dev *pdev) | 1870 | static void sym2_reset_workarounds(struct pci_dev *pdev) |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 297deb817a5d..a7bc8b7b09ac 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
| @@ -2692,7 +2692,7 @@ static void sym_int_ma (struct sym_hcb *np) | |||
| 2692 | * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids | 2692 | * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids |
| 2693 | * bloat for such a should_not_happen situation). | 2693 | * bloat for such a should_not_happen situation). |
| 2694 | * In all other situation, we reset the BUS. | 2694 | * In all other situation, we reset the BUS. |
| 2695 | * Are these assumptions reasonnable ? (Wait and see ...) | 2695 | * Are these assumptions reasonable ? (Wait and see ...) |
| 2696 | */ | 2696 | */ |
| 2697 | unexpected_phase: | 2697 | unexpected_phase: |
| 2698 | dsp -= 8; | 2698 | dsp -= 8; |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 053e63c86822..5a80cbac3f92 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h | |||
| @@ -54,7 +54,7 @@ | |||
| 54 | * | 54 | * |
| 55 | * SYM_OPT_LIMIT_COMMAND_REORDERING | 55 | * SYM_OPT_LIMIT_COMMAND_REORDERING |
| 56 | * When this option is set, the driver tries to limit tagged | 56 | * When this option is set, the driver tries to limit tagged |
| 57 | * command reordering to some reasonnable value. | 57 | * command reordering to some reasonable value. |
| 58 | * (set for Linux) | 58 | * (set for Linux) |
| 59 | */ | 59 | */ |
| 60 | #if 0 | 60 | #if 0 |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c new file mode 100644 index 000000000000..d2604c813a20 --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.c | |||
| @@ -0,0 +1,1407 @@ | |||
| 1 | /* | ||
| 2 | * Linux driver for VMware's para-virtualized SCSI HBA. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; version 2 of the License and no later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
| 14 | * details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | * | ||
| 20 | * Maintained by: Alok N Kataria <akataria@vmware.com> | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/interrupt.h> | ||
| 27 | #include <linux/workqueue.h> | ||
| 28 | #include <linux/pci.h> | ||
| 29 | |||
| 30 | #include <scsi/scsi.h> | ||
| 31 | #include <scsi/scsi_host.h> | ||
| 32 | #include <scsi/scsi_cmnd.h> | ||
| 33 | #include <scsi/scsi_device.h> | ||
| 34 | |||
| 35 | #include "vmw_pvscsi.h" | ||
| 36 | |||
| 37 | #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" | ||
| 38 | |||
| 39 | MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); | ||
| 40 | MODULE_AUTHOR("VMware, Inc."); | ||
| 41 | MODULE_LICENSE("GPL"); | ||
| 42 | MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); | ||
| 43 | |||
| 44 | #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 | ||
| 45 | #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 | ||
| 46 | #define PVSCSI_DEFAULT_QUEUE_DEPTH 64 | ||
| 47 | #define SGL_SIZE PAGE_SIZE | ||
| 48 | |||
| 49 | struct pvscsi_sg_list { | ||
| 50 | struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct pvscsi_ctx { | ||
| 54 | /* | ||
| 55 | * The index of the context in cmd_map serves as the context ID for a | ||
| 56 | * 1-to-1 mapping completions back to requests. | ||
| 57 | */ | ||
| 58 | struct scsi_cmnd *cmd; | ||
| 59 | struct pvscsi_sg_list *sgl; | ||
| 60 | struct list_head list; | ||
| 61 | dma_addr_t dataPA; | ||
| 62 | dma_addr_t sensePA; | ||
| 63 | dma_addr_t sglPA; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct pvscsi_adapter { | ||
| 67 | char *mmioBase; | ||
| 68 | unsigned int irq; | ||
| 69 | u8 rev; | ||
| 70 | bool use_msi; | ||
| 71 | bool use_msix; | ||
| 72 | bool use_msg; | ||
| 73 | |||
| 74 | spinlock_t hw_lock; | ||
| 75 | |||
| 76 | struct workqueue_struct *workqueue; | ||
| 77 | struct work_struct work; | ||
| 78 | |||
| 79 | struct PVSCSIRingReqDesc *req_ring; | ||
| 80 | unsigned req_pages; | ||
| 81 | unsigned req_depth; | ||
| 82 | dma_addr_t reqRingPA; | ||
| 83 | |||
| 84 | struct PVSCSIRingCmpDesc *cmp_ring; | ||
| 85 | unsigned cmp_pages; | ||
| 86 | dma_addr_t cmpRingPA; | ||
| 87 | |||
| 88 | struct PVSCSIRingMsgDesc *msg_ring; | ||
| 89 | unsigned msg_pages; | ||
| 90 | dma_addr_t msgRingPA; | ||
| 91 | |||
| 92 | struct PVSCSIRingsState *rings_state; | ||
| 93 | dma_addr_t ringStatePA; | ||
| 94 | |||
| 95 | struct pci_dev *dev; | ||
| 96 | struct Scsi_Host *host; | ||
| 97 | |||
| 98 | struct list_head cmd_pool; | ||
| 99 | struct pvscsi_ctx *cmd_map; | ||
| 100 | }; | ||
| 101 | |||
| 102 | |||
| 103 | /* Command line parameters */ | ||
| 104 | static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; | ||
| 105 | static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; | ||
| 106 | static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; | ||
| 107 | static bool pvscsi_disable_msi; | ||
| 108 | static bool pvscsi_disable_msix; | ||
| 109 | static bool pvscsi_use_msg = true; | ||
| 110 | |||
| 111 | #define PVSCSI_RW (S_IRUSR | S_IWUSR) | ||
| 112 | |||
| 113 | module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); | ||
| 114 | MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" | ||
| 115 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); | ||
| 116 | |||
| 117 | module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); | ||
| 118 | MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" | ||
| 119 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); | ||
| 120 | |||
| 121 | module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); | ||
| 122 | MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" | ||
| 123 | __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); | ||
| 124 | |||
| 125 | module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); | ||
| 126 | MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); | ||
| 127 | |||
| 128 | module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); | ||
| 129 | MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); | ||
| 130 | |||
| 131 | module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); | ||
| 132 | MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); | ||
| 133 | |||
| 134 | static const struct pci_device_id pvscsi_pci_tbl[] = { | ||
| 135 | { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, | ||
| 136 | { 0 } | ||
| 137 | }; | ||
| 138 | |||
| 139 | MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); | ||
| 140 | |||
| 141 | static struct device * | ||
| 142 | pvscsi_dev(const struct pvscsi_adapter *adapter) | ||
| 143 | { | ||
| 144 | return &(adapter->dev->dev); | ||
| 145 | } | ||
| 146 | |||
| 147 | static struct pvscsi_ctx * | ||
| 148 | pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | ||
| 149 | { | ||
| 150 | struct pvscsi_ctx *ctx, *end; | ||
| 151 | |||
| 152 | end = &adapter->cmd_map[adapter->req_depth]; | ||
| 153 | for (ctx = adapter->cmd_map; ctx < end; ctx++) | ||
| 154 | if (ctx->cmd == cmd) | ||
| 155 | return ctx; | ||
| 156 | |||
| 157 | return NULL; | ||
| 158 | } | ||
| 159 | |||
| 160 | static struct pvscsi_ctx * | ||
| 161 | pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | ||
| 162 | { | ||
| 163 | struct pvscsi_ctx *ctx; | ||
| 164 | |||
| 165 | if (list_empty(&adapter->cmd_pool)) | ||
| 166 | return NULL; | ||
| 167 | |||
| 168 | ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); | ||
| 169 | ctx->cmd = cmd; | ||
| 170 | list_del(&ctx->list); | ||
| 171 | |||
| 172 | return ctx; | ||
| 173 | } | ||
| 174 | |||
| 175 | static void pvscsi_release_context(struct pvscsi_adapter *adapter, | ||
| 176 | struct pvscsi_ctx *ctx) | ||
| 177 | { | ||
| 178 | ctx->cmd = NULL; | ||
| 179 | list_add(&ctx->list, &adapter->cmd_pool); | ||
| 180 | } | ||
| 181 | |||
| 182 | /* | ||
| 183 | * Map a pvscsi_ctx struct to a context ID field value; we map to a simple | ||
| 184 | * non-zero integer. ctx always points to an entry in cmd_map array, hence | ||
| 185 | * the return value is always >=1. | ||
| 186 | */ | ||
| 187 | static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, | ||
| 188 | const struct pvscsi_ctx *ctx) | ||
| 189 | { | ||
| 190 | return ctx - adapter->cmd_map + 1; | ||
| 191 | } | ||
| 192 | |||
| 193 | static struct pvscsi_ctx * | ||
| 194 | pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) | ||
| 195 | { | ||
| 196 | return &adapter->cmd_map[context - 1]; | ||
| 197 | } | ||
| 198 | |||
| 199 | static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, | ||
| 200 | u32 offset, u32 val) | ||
| 201 | { | ||
| 202 | writel(val, adapter->mmioBase + offset); | ||
| 203 | } | ||
| 204 | |||
| 205 | static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) | ||
| 206 | { | ||
| 207 | return readl(adapter->mmioBase + offset); | ||
| 208 | } | ||
| 209 | |||
| 210 | static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) | ||
| 211 | { | ||
| 212 | return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); | ||
| 213 | } | ||
| 214 | |||
| 215 | static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, | ||
| 216 | u32 val) | ||
| 217 | { | ||
| 218 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); | ||
| 219 | } | ||
| 220 | |||
| 221 | static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) | ||
| 222 | { | ||
| 223 | u32 intr_bits; | ||
| 224 | |||
| 225 | intr_bits = PVSCSI_INTR_CMPL_MASK; | ||
| 226 | if (adapter->use_msg) | ||
| 227 | intr_bits |= PVSCSI_INTR_MSG_MASK; | ||
| 228 | |||
| 229 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); | ||
| 230 | } | ||
| 231 | |||
| 232 | static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) | ||
| 233 | { | ||
| 234 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); | ||
| 235 | } | ||
| 236 | |||
| 237 | static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, | ||
| 238 | u32 cmd, const void *desc, size_t len) | ||
| 239 | { | ||
| 240 | const u32 *ptr = desc; | ||
| 241 | size_t i; | ||
| 242 | |||
| 243 | len /= sizeof(*ptr); | ||
| 244 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); | ||
| 245 | for (i = 0; i < len; i++) | ||
| 246 | pvscsi_reg_write(adapter, | ||
| 247 | PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, | ||
| 251 | const struct pvscsi_ctx *ctx) | ||
| 252 | { | ||
| 253 | struct PVSCSICmdDescAbortCmd cmd = { 0 }; | ||
| 254 | |||
| 255 | cmd.target = ctx->cmd->device->id; | ||
| 256 | cmd.context = pvscsi_map_context(adapter, ctx); | ||
| 257 | |||
| 258 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) | ||
| 262 | { | ||
| 263 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); | ||
| 264 | } | ||
| 265 | |||
| 266 | static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) | ||
| 267 | { | ||
| 268 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); | ||
| 269 | } | ||
| 270 | |||
| 271 | static int scsi_is_rw(unsigned char op) | ||
| 272 | { | ||
| 273 | return op == READ_6 || op == WRITE_6 || | ||
| 274 | op == READ_10 || op == WRITE_10 || | ||
| 275 | op == READ_12 || op == WRITE_12 || | ||
| 276 | op == READ_16 || op == WRITE_16; | ||
| 277 | } | ||
| 278 | |||
| 279 | static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, | ||
| 280 | unsigned char op) | ||
| 281 | { | ||
| 282 | if (scsi_is_rw(op)) | ||
| 283 | pvscsi_kick_rw_io(adapter); | ||
| 284 | else | ||
| 285 | pvscsi_process_request_ring(adapter); | ||
| 286 | } | ||
| 287 | |||
| 288 | static void ll_adapter_reset(const struct pvscsi_adapter *adapter) | ||
| 289 | { | ||
| 290 | dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); | ||
| 291 | |||
| 292 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); | ||
| 293 | } | ||
| 294 | |||
| 295 | static void ll_bus_reset(const struct pvscsi_adapter *adapter) | ||
| 296 | { | ||
| 297 | dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); | ||
| 298 | |||
| 299 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); | ||
| 300 | } | ||
| 301 | |||
| 302 | static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) | ||
| 303 | { | ||
| 304 | struct PVSCSICmdDescResetDevice cmd = { 0 }; | ||
| 305 | |||
| 306 | dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); | ||
| 307 | |||
| 308 | cmd.target = target; | ||
| 309 | |||
| 310 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, | ||
| 311 | &cmd, sizeof(cmd)); | ||
| 312 | } | ||
| 313 | |||
| 314 | static void pvscsi_create_sg(struct pvscsi_ctx *ctx, | ||
| 315 | struct scatterlist *sg, unsigned count) | ||
| 316 | { | ||
| 317 | unsigned i; | ||
| 318 | struct PVSCSISGElement *sge; | ||
| 319 | |||
| 320 | BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); | ||
| 321 | |||
| 322 | sge = &ctx->sgl->sge[0]; | ||
| 323 | for (i = 0; i < count; i++, sg++) { | ||
| 324 | sge[i].addr = sg_dma_address(sg); | ||
| 325 | sge[i].length = sg_dma_len(sg); | ||
| 326 | sge[i].flags = 0; | ||
| 327 | } | ||
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * Map all data buffers for a command into PCI space and | ||
| 332 | * setup the scatter/gather list if needed. | ||
| 333 | */ | ||
| 334 | static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, | ||
| 335 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, | ||
| 336 | struct PVSCSIRingReqDesc *e) | ||
| 337 | { | ||
| 338 | unsigned count; | ||
| 339 | unsigned bufflen = scsi_bufflen(cmd); | ||
| 340 | struct scatterlist *sg; | ||
| 341 | |||
| 342 | e->dataLen = bufflen; | ||
| 343 | e->dataAddr = 0; | ||
| 344 | if (bufflen == 0) | ||
| 345 | return; | ||
| 346 | |||
| 347 | sg = scsi_sglist(cmd); | ||
| 348 | count = scsi_sg_count(cmd); | ||
| 349 | if (count != 0) { | ||
| 350 | int segs = scsi_dma_map(cmd); | ||
| 351 | if (segs > 1) { | ||
| 352 | pvscsi_create_sg(ctx, sg, segs); | ||
| 353 | |||
| 354 | e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; | ||
| 355 | ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, | ||
| 356 | SGL_SIZE, PCI_DMA_TODEVICE); | ||
| 357 | e->dataAddr = ctx->sglPA; | ||
| 358 | } else | ||
| 359 | e->dataAddr = sg_dma_address(sg); | ||
| 360 | } else { | ||
| 361 | /* | ||
| 362 | * In case there is no S/G list, scsi_sglist points | ||
| 363 | * directly to the buffer. | ||
| 364 | */ | ||
| 365 | ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, | ||
| 366 | cmd->sc_data_direction); | ||
| 367 | e->dataAddr = ctx->dataPA; | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, | ||
| 372 | struct pvscsi_ctx *ctx) | ||
| 373 | { | ||
| 374 | struct scsi_cmnd *cmd; | ||
| 375 | unsigned bufflen; | ||
| 376 | |||
| 377 | cmd = ctx->cmd; | ||
| 378 | bufflen = scsi_bufflen(cmd); | ||
| 379 | |||
| 380 | if (bufflen != 0) { | ||
| 381 | unsigned count = scsi_sg_count(cmd); | ||
| 382 | |||
| 383 | if (count != 0) { | ||
| 384 | scsi_dma_unmap(cmd); | ||
| 385 | if (ctx->sglPA) { | ||
| 386 | pci_unmap_single(adapter->dev, ctx->sglPA, | ||
| 387 | SGL_SIZE, PCI_DMA_TODEVICE); | ||
| 388 | ctx->sglPA = 0; | ||
| 389 | } | ||
| 390 | } else | ||
| 391 | pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, | ||
| 392 | cmd->sc_data_direction); | ||
| 393 | } | ||
| 394 | if (cmd->sense_buffer) | ||
| 395 | pci_unmap_single(adapter->dev, ctx->sensePA, | ||
| 396 | SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); | ||
| 397 | } | ||
| 398 | |||
| 399 | static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter) | ||
| 400 | { | ||
| 401 | adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, | ||
| 402 | &adapter->ringStatePA); | ||
| 403 | if (!adapter->rings_state) | ||
| 404 | return -ENOMEM; | ||
| 405 | |||
| 406 | adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, | ||
| 407 | pvscsi_ring_pages); | ||
| 408 | adapter->req_depth = adapter->req_pages | ||
| 409 | * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | ||
| 410 | adapter->req_ring = pci_alloc_consistent(adapter->dev, | ||
| 411 | adapter->req_pages * PAGE_SIZE, | ||
| 412 | &adapter->reqRingPA); | ||
| 413 | if (!adapter->req_ring) | ||
| 414 | return -ENOMEM; | ||
| 415 | |||
| 416 | adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, | ||
| 417 | pvscsi_ring_pages); | ||
| 418 | adapter->cmp_ring = pci_alloc_consistent(adapter->dev, | ||
| 419 | adapter->cmp_pages * PAGE_SIZE, | ||
| 420 | &adapter->cmpRingPA); | ||
| 421 | if (!adapter->cmp_ring) | ||
| 422 | return -ENOMEM; | ||
| 423 | |||
| 424 | BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); | ||
| 425 | BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); | ||
| 426 | BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); | ||
| 427 | |||
| 428 | if (!adapter->use_msg) | ||
| 429 | return 0; | ||
| 430 | |||
| 431 | adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, | ||
| 432 | pvscsi_msg_ring_pages); | ||
| 433 | adapter->msg_ring = pci_alloc_consistent(adapter->dev, | ||
| 434 | adapter->msg_pages * PAGE_SIZE, | ||
| 435 | &adapter->msgRingPA); | ||
| 436 | if (!adapter->msg_ring) | ||
| 437 | return -ENOMEM; | ||
| 438 | BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); | ||
| 439 | |||
| 440 | return 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) | ||
| 444 | { | ||
| 445 | struct PVSCSICmdDescSetupRings cmd = { 0 }; | ||
| 446 | dma_addr_t base; | ||
| 447 | unsigned i; | ||
| 448 | |||
| 449 | cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; | ||
| 450 | cmd.reqRingNumPages = adapter->req_pages; | ||
| 451 | cmd.cmpRingNumPages = adapter->cmp_pages; | ||
| 452 | |||
| 453 | base = adapter->reqRingPA; | ||
| 454 | for (i = 0; i < adapter->req_pages; i++) { | ||
| 455 | cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; | ||
| 456 | base += PAGE_SIZE; | ||
| 457 | } | ||
| 458 | |||
| 459 | base = adapter->cmpRingPA; | ||
| 460 | for (i = 0; i < adapter->cmp_pages; i++) { | ||
| 461 | cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; | ||
| 462 | base += PAGE_SIZE; | ||
| 463 | } | ||
| 464 | |||
| 465 | memset(adapter->rings_state, 0, PAGE_SIZE); | ||
| 466 | memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); | ||
| 467 | memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); | ||
| 468 | |||
| 469 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, | ||
| 470 | &cmd, sizeof(cmd)); | ||
| 471 | |||
| 472 | if (adapter->use_msg) { | ||
| 473 | struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; | ||
| 474 | |||
| 475 | cmd_msg.numPages = adapter->msg_pages; | ||
| 476 | |||
| 477 | base = adapter->msgRingPA; | ||
| 478 | for (i = 0; i < adapter->msg_pages; i++) { | ||
| 479 | cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; | ||
| 480 | base += PAGE_SIZE; | ||
| 481 | } | ||
| 482 | memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); | ||
| 483 | |||
| 484 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, | ||
| 485 | &cmd_msg, sizeof(cmd_msg)); | ||
| 486 | } | ||
| 487 | } | ||
| 488 | |||
| 489 | /* | ||
| 490 | * Pull a completion descriptor off and pass the completion back | ||
| 491 | * to the SCSI mid layer. | ||
| 492 | */ | ||
| 493 | static void pvscsi_complete_request(struct pvscsi_adapter *adapter, | ||
| 494 | const struct PVSCSIRingCmpDesc *e) | ||
| 495 | { | ||
| 496 | struct pvscsi_ctx *ctx; | ||
| 497 | struct scsi_cmnd *cmd; | ||
| 498 | u32 btstat = e->hostStatus; | ||
| 499 | u32 sdstat = e->scsiStatus; | ||
| 500 | |||
| 501 | ctx = pvscsi_get_context(adapter, e->context); | ||
| 502 | cmd = ctx->cmd; | ||
| 503 | pvscsi_unmap_buffers(adapter, ctx); | ||
| 504 | pvscsi_release_context(adapter, ctx); | ||
| 505 | cmd->result = 0; | ||
| 506 | |||
| 507 | if (sdstat != SAM_STAT_GOOD && | ||
| 508 | (btstat == BTSTAT_SUCCESS || | ||
| 509 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED || | ||
| 510 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { | ||
| 511 | cmd->result = (DID_OK << 16) | sdstat; | ||
| 512 | if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) | ||
| 513 | cmd->result |= (DRIVER_SENSE << 24); | ||
| 514 | } else | ||
| 515 | switch (btstat) { | ||
| 516 | case BTSTAT_SUCCESS: | ||
| 517 | case BTSTAT_LINKED_COMMAND_COMPLETED: | ||
| 518 | case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: | ||
| 519 | /* If everything went fine, let's move on.. */ | ||
| 520 | cmd->result = (DID_OK << 16); | ||
| 521 | break; | ||
| 522 | |||
| 523 | case BTSTAT_DATARUN: | ||
| 524 | case BTSTAT_DATA_UNDERRUN: | ||
| 525 | /* Report residual data in underruns */ | ||
| 526 | scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); | ||
| 527 | cmd->result = (DID_ERROR << 16); | ||
| 528 | break; | ||
| 529 | |||
| 530 | case BTSTAT_SELTIMEO: | ||
| 531 | /* Our emulation returns this for non-connected devs */ | ||
| 532 | cmd->result = (DID_BAD_TARGET << 16); | ||
| 533 | break; | ||
| 534 | |||
| 535 | case BTSTAT_LUNMISMATCH: | ||
| 536 | case BTSTAT_TAGREJECT: | ||
| 537 | case BTSTAT_BADMSG: | ||
| 538 | cmd->result = (DRIVER_INVALID << 24); | ||
| 539 | /* fall through */ | ||
| 540 | |||
| 541 | case BTSTAT_HAHARDWARE: | ||
| 542 | case BTSTAT_INVPHASE: | ||
| 543 | case BTSTAT_HATIMEOUT: | ||
| 544 | case BTSTAT_NORESPONSE: | ||
| 545 | case BTSTAT_DISCONNECT: | ||
| 546 | case BTSTAT_HASOFTWARE: | ||
| 547 | case BTSTAT_BUSFREE: | ||
| 548 | case BTSTAT_SENSFAILED: | ||
| 549 | cmd->result |= (DID_ERROR << 16); | ||
| 550 | break; | ||
| 551 | |||
| 552 | case BTSTAT_SENTRST: | ||
| 553 | case BTSTAT_RECVRST: | ||
| 554 | case BTSTAT_BUSRESET: | ||
| 555 | cmd->result = (DID_RESET << 16); | ||
| 556 | break; | ||
| 557 | |||
| 558 | case BTSTAT_ABORTQUEUE: | ||
| 559 | cmd->result = (DID_ABORT << 16); | ||
| 560 | break; | ||
| 561 | |||
| 562 | case BTSTAT_SCSIPARITY: | ||
| 563 | cmd->result = (DID_PARITY << 16); | ||
| 564 | break; | ||
| 565 | |||
| 566 | default: | ||
| 567 | cmd->result = (DID_ERROR << 16); | ||
| 568 | scmd_printk(KERN_DEBUG, cmd, | ||
| 569 | "Unknown completion status: 0x%x\n", | ||
| 570 | btstat); | ||
| 571 | } | ||
| 572 | |||
| 573 | dev_dbg(&cmd->device->sdev_gendev, | ||
| 574 | "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", | ||
| 575 | cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); | ||
| 576 | |||
| 577 | cmd->scsi_done(cmd); | ||
| 578 | } | ||
| 579 | |||
| 580 | /* | ||
| 581 | * barrier usage : Since the PVSCSI device is emulated, there could be cases | ||
| 582 | * where we may want to serialize some accesses between the driver and the | ||
| 583 | * emulation layer. We use compiler barriers instead of the more expensive | ||
| 584 | * memory barriers because PVSCSI is only supported on X86 which has strong | ||
| 585 | * memory access ordering. | ||
| 586 | */ | ||
| 587 | static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) | ||
| 588 | { | ||
| 589 | struct PVSCSIRingsState *s = adapter->rings_state; | ||
| 590 | struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; | ||
| 591 | u32 cmp_entries = s->cmpNumEntriesLog2; | ||
| 592 | |||
| 593 | while (s->cmpConsIdx != s->cmpProdIdx) { | ||
| 594 | struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & | ||
| 595 | MASK(cmp_entries)); | ||
| 596 | /* | ||
| 597 | * This barrier() ensures that *e is not dereferenced while | ||
| 598 | * the device emulation still writes data into the slot. | ||
| 599 | * Since the device emulation advances s->cmpProdIdx only after | ||
| 600 | * updating the slot we want to check it first. | ||
| 601 | */ | ||
| 602 | barrier(); | ||
| 603 | pvscsi_complete_request(adapter, e); | ||
| 604 | /* | ||
| 605 | * This barrier() ensures that compiler doesn't reorder write | ||
| 606 | * to s->cmpConsIdx before the read of (*e) inside | ||
| 607 | * pvscsi_complete_request. Otherwise, device emulation may | ||
| 608 | * overwrite *e before we had a chance to read it. | ||
| 609 | */ | ||
| 610 | barrier(); | ||
| 611 | s->cmpConsIdx++; | ||
| 612 | } | ||
| 613 | } | ||
| 614 | |||
| 615 | /* | ||
| 616 | * Translate a Linux SCSI request into a request ring entry. | ||
| 617 | */ | ||
| 618 | static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | ||
| 619 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) | ||
| 620 | { | ||
| 621 | struct PVSCSIRingsState *s; | ||
| 622 | struct PVSCSIRingReqDesc *e; | ||
| 623 | struct scsi_device *sdev; | ||
| 624 | u32 req_entries; | ||
| 625 | |||
| 626 | s = adapter->rings_state; | ||
| 627 | sdev = cmd->device; | ||
| 628 | req_entries = s->reqNumEntriesLog2; | ||
| 629 | |||
| 630 | /* | ||
| 631 | * If this condition holds, we might have room on the request ring, but | ||
| 632 | * we might not have room on the completion ring for the response. | ||
| 633 | * However, we have already ruled out this possibility - we would not | ||
| 634 | * have successfully allocated a context if it were true, since we only | ||
| 635 | * have one context per request entry. Check for it anyway, since it | ||
| 636 | * would be a serious bug. | ||
| 637 | */ | ||
| 638 | if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { | ||
| 639 | scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " | ||
| 640 | "ring full: reqProdIdx=%d cmpConsIdx=%d\n", | ||
| 641 | s->reqProdIdx, s->cmpConsIdx); | ||
| 642 | return -1; | ||
| 643 | } | ||
| 644 | |||
| 645 | e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); | ||
| 646 | |||
| 647 | e->bus = sdev->channel; | ||
| 648 | e->target = sdev->id; | ||
| 649 | memset(e->lun, 0, sizeof(e->lun)); | ||
| 650 | e->lun[1] = sdev->lun; | ||
| 651 | |||
| 652 | if (cmd->sense_buffer) { | ||
| 653 | ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, | ||
| 654 | SCSI_SENSE_BUFFERSIZE, | ||
| 655 | PCI_DMA_FROMDEVICE); | ||
| 656 | e->senseAddr = ctx->sensePA; | ||
| 657 | e->senseLen = SCSI_SENSE_BUFFERSIZE; | ||
| 658 | } else { | ||
| 659 | e->senseLen = 0; | ||
| 660 | e->senseAddr = 0; | ||
| 661 | } | ||
| 662 | e->cdbLen = cmd->cmd_len; | ||
| 663 | e->vcpuHint = smp_processor_id(); | ||
| 664 | memcpy(e->cdb, cmd->cmnd, e->cdbLen); | ||
| 665 | |||
| 666 | e->tag = SIMPLE_QUEUE_TAG; | ||
| 667 | if (sdev->tagged_supported && | ||
| 668 | (cmd->tag == HEAD_OF_QUEUE_TAG || | ||
| 669 | cmd->tag == ORDERED_QUEUE_TAG)) | ||
| 670 | e->tag = cmd->tag; | ||
| 671 | |||
| 672 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
| 673 | e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; | ||
| 674 | else if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
| 675 | e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; | ||
| 676 | else if (cmd->sc_data_direction == DMA_NONE) | ||
| 677 | e->flags = PVSCSI_FLAG_CMD_DIR_NONE; | ||
| 678 | else | ||
| 679 | e->flags = 0; | ||
| 680 | |||
| 681 | pvscsi_map_buffers(adapter, ctx, cmd, e); | ||
| 682 | |||
| 683 | e->context = pvscsi_map_context(adapter, ctx); | ||
| 684 | |||
| 685 | barrier(); | ||
| 686 | |||
| 687 | s->reqProdIdx++; | ||
| 688 | |||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
| 693 | { | ||
| 694 | struct Scsi_Host *host = cmd->device->host; | ||
| 695 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 696 | struct pvscsi_ctx *ctx; | ||
| 697 | unsigned long flags; | ||
| 698 | |||
| 699 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 700 | |||
| 701 | ctx = pvscsi_acquire_context(adapter, cmd); | ||
| 702 | if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { | ||
| 703 | if (ctx) | ||
| 704 | pvscsi_release_context(adapter, ctx); | ||
| 705 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 706 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 707 | } | ||
| 708 | |||
| 709 | cmd->scsi_done = done; | ||
| 710 | |||
| 711 | dev_dbg(&cmd->device->sdev_gendev, | ||
| 712 | "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); | ||
| 713 | |||
| 714 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 715 | |||
| 716 | pvscsi_kick_io(adapter, cmd->cmnd[0]); | ||
| 717 | |||
| 718 | return 0; | ||
| 719 | } | ||
| 720 | |||
| 721 | static int pvscsi_abort(struct scsi_cmnd *cmd) | ||
| 722 | { | ||
| 723 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); | ||
| 724 | struct pvscsi_ctx *ctx; | ||
| 725 | unsigned long flags; | ||
| 726 | |||
| 727 | scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", | ||
| 728 | adapter->host->host_no, cmd); | ||
| 729 | |||
| 730 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 731 | |||
| 732 | /* | ||
| 733 | * Poll the completion ring first - we might be trying to abort | ||
| 734 | * a command that is waiting to be dispatched in the completion ring. | ||
| 735 | */ | ||
| 736 | pvscsi_process_completion_ring(adapter); | ||
| 737 | |||
| 738 | /* | ||
| 739 | * If there is no context for the command, it either already succeeded | ||
| 740 | * or else was never properly issued. Not our problem. | ||
| 741 | */ | ||
| 742 | ctx = pvscsi_find_context(adapter, cmd); | ||
| 743 | if (!ctx) { | ||
| 744 | scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); | ||
| 745 | goto out; | ||
| 746 | } | ||
| 747 | |||
| 748 | pvscsi_abort_cmd(adapter, ctx); | ||
| 749 | |||
| 750 | pvscsi_process_completion_ring(adapter); | ||
| 751 | |||
| 752 | out: | ||
| 753 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 754 | return SUCCESS; | ||
| 755 | } | ||
| 756 | |||
| 757 | /* | ||
| 758 | * Abort all outstanding requests. This is only safe to use if the completion | ||
| 759 | * ring will never be walked again or the device has been reset, because it | ||
| 760 | * destroys the 1-1 mapping between context field passed to emulation and our | ||
| 761 | * request structure. | ||
| 762 | */ | ||
| 763 | static void pvscsi_reset_all(struct pvscsi_adapter *adapter) | ||
| 764 | { | ||
| 765 | unsigned i; | ||
| 766 | |||
| 767 | for (i = 0; i < adapter->req_depth; i++) { | ||
| 768 | struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; | ||
| 769 | struct scsi_cmnd *cmd = ctx->cmd; | ||
| 770 | if (cmd) { | ||
| 771 | scmd_printk(KERN_ERR, cmd, | ||
| 772 | "Forced reset on cmd %p\n", cmd); | ||
| 773 | pvscsi_unmap_buffers(adapter, ctx); | ||
| 774 | pvscsi_release_context(adapter, ctx); | ||
| 775 | cmd->result = (DID_RESET << 16); | ||
| 776 | cmd->scsi_done(cmd); | ||
| 777 | } | ||
| 778 | } | ||
| 779 | } | ||
| 780 | |||
| 781 | static int pvscsi_host_reset(struct scsi_cmnd *cmd) | ||
| 782 | { | ||
| 783 | struct Scsi_Host *host = cmd->device->host; | ||
| 784 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 785 | unsigned long flags; | ||
| 786 | bool use_msg; | ||
| 787 | |||
| 788 | scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); | ||
| 789 | |||
| 790 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 791 | |||
| 792 | use_msg = adapter->use_msg; | ||
| 793 | |||
| 794 | if (use_msg) { | ||
| 795 | adapter->use_msg = 0; | ||
| 796 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 797 | |||
| 798 | /* | ||
| 799 | * Now that we know that the ISR won't add more work on the | ||
| 800 | * workqueue we can safely flush any outstanding work. | ||
| 801 | */ | ||
| 802 | flush_workqueue(adapter->workqueue); | ||
| 803 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 804 | } | ||
| 805 | |||
| 806 | /* | ||
| 807 | * We're going to tear down the entire ring structure and set it back | ||
| 808 | * up, so stalling new requests until all completions are flushed and | ||
| 809 | * the rings are back in place. | ||
| 810 | */ | ||
| 811 | |||
| 812 | pvscsi_process_request_ring(adapter); | ||
| 813 | |||
| 814 | ll_adapter_reset(adapter); | ||
| 815 | |||
| 816 | /* | ||
| 817 | * Now process any completions. Note we do this AFTER adapter reset, | ||
| 818 | * which is strange, but stops races where completions get posted | ||
| 819 | * between processing the ring and issuing the reset. The backend will | ||
| 820 | * not touch the ring memory after reset, so the immediately pre-reset | ||
| 821 | * completion ring state is still valid. | ||
| 822 | */ | ||
| 823 | pvscsi_process_completion_ring(adapter); | ||
| 824 | |||
| 825 | pvscsi_reset_all(adapter); | ||
| 826 | adapter->use_msg = use_msg; | ||
| 827 | pvscsi_setup_all_rings(adapter); | ||
| 828 | pvscsi_unmask_intr(adapter); | ||
| 829 | |||
| 830 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 831 | |||
| 832 | return SUCCESS; | ||
| 833 | } | ||
| 834 | |||
| 835 | static int pvscsi_bus_reset(struct scsi_cmnd *cmd) | ||
| 836 | { | ||
| 837 | struct Scsi_Host *host = cmd->device->host; | ||
| 838 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 839 | unsigned long flags; | ||
| 840 | |||
| 841 | scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); | ||
| 842 | |||
| 843 | /* | ||
| 844 | * We don't want to queue new requests for this bus after | ||
| 845 | * flushing all pending requests to emulation, since new | ||
| 846 | * requests could then sneak in during this bus reset phase, | ||
| 847 | * so take the lock now. | ||
| 848 | */ | ||
| 849 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 850 | |||
| 851 | pvscsi_process_request_ring(adapter); | ||
| 852 | ll_bus_reset(adapter); | ||
| 853 | pvscsi_process_completion_ring(adapter); | ||
| 854 | |||
| 855 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 856 | |||
| 857 | return SUCCESS; | ||
| 858 | } | ||
| 859 | |||
| 860 | static int pvscsi_device_reset(struct scsi_cmnd *cmd) | ||
| 861 | { | ||
| 862 | struct Scsi_Host *host = cmd->device->host; | ||
| 863 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 864 | unsigned long flags; | ||
| 865 | |||
| 866 | scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", | ||
| 867 | host->host_no, cmd->device->id); | ||
| 868 | |||
| 869 | /* | ||
| 870 | * We don't want to queue new requests for this device after flushing | ||
| 871 | * all pending requests to emulation, since new requests could then | ||
| 872 | * sneak in during this device reset phase, so take the lock now. | ||
| 873 | */ | ||
| 874 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 875 | |||
| 876 | pvscsi_process_request_ring(adapter); | ||
| 877 | ll_device_reset(adapter, cmd->device->id); | ||
| 878 | pvscsi_process_completion_ring(adapter); | ||
| 879 | |||
| 880 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 881 | |||
| 882 | return SUCCESS; | ||
| 883 | } | ||
| 884 | |||
| 885 | static struct scsi_host_template pvscsi_template; | ||
| 886 | |||
| 887 | static const char *pvscsi_info(struct Scsi_Host *host) | ||
| 888 | { | ||
| 889 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 890 | static char buf[256]; | ||
| 891 | |||
| 892 | sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " | ||
| 893 | "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, | ||
| 894 | adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, | ||
| 895 | pvscsi_template.cmd_per_lun); | ||
| 896 | |||
| 897 | return buf; | ||
| 898 | } | ||
| 899 | |||
| 900 | static struct scsi_host_template pvscsi_template = { | ||
| 901 | .module = THIS_MODULE, | ||
| 902 | .name = "VMware PVSCSI Host Adapter", | ||
| 903 | .proc_name = "vmw_pvscsi", | ||
| 904 | .info = pvscsi_info, | ||
| 905 | .queuecommand = pvscsi_queue, | ||
| 906 | .this_id = -1, | ||
| 907 | .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, | ||
| 908 | .dma_boundary = UINT_MAX, | ||
| 909 | .max_sectors = 0xffff, | ||
| 910 | .use_clustering = ENABLE_CLUSTERING, | ||
| 911 | .eh_abort_handler = pvscsi_abort, | ||
| 912 | .eh_device_reset_handler = pvscsi_device_reset, | ||
| 913 | .eh_bus_reset_handler = pvscsi_bus_reset, | ||
| 914 | .eh_host_reset_handler = pvscsi_host_reset, | ||
| 915 | }; | ||
| 916 | |||
| 917 | static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, | ||
| 918 | const struct PVSCSIRingMsgDesc *e) | ||
| 919 | { | ||
| 920 | struct PVSCSIRingsState *s = adapter->rings_state; | ||
| 921 | struct Scsi_Host *host = adapter->host; | ||
| 922 | struct scsi_device *sdev; | ||
| 923 | |||
| 924 | printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", | ||
| 925 | e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); | ||
| 926 | |||
| 927 | BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); | ||
| 928 | |||
| 929 | if (e->type == PVSCSI_MSG_DEV_ADDED) { | ||
| 930 | struct PVSCSIMsgDescDevStatusChanged *desc; | ||
| 931 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | ||
| 932 | |||
| 933 | printk(KERN_INFO | ||
| 934 | "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", | ||
| 935 | desc->bus, desc->target, desc->lun[1]); | ||
| 936 | |||
| 937 | if (!scsi_host_get(host)) | ||
| 938 | return; | ||
| 939 | |||
| 940 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | ||
| 941 | desc->lun[1]); | ||
| 942 | if (sdev) { | ||
| 943 | printk(KERN_INFO "vmw_pvscsi: device already exists\n"); | ||
| 944 | scsi_device_put(sdev); | ||
| 945 | } else | ||
| 946 | scsi_add_device(adapter->host, desc->bus, | ||
| 947 | desc->target, desc->lun[1]); | ||
| 948 | |||
| 949 | scsi_host_put(host); | ||
| 950 | } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { | ||
| 951 | struct PVSCSIMsgDescDevStatusChanged *desc; | ||
| 952 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | ||
| 953 | |||
| 954 | printk(KERN_INFO | ||
| 955 | "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", | ||
| 956 | desc->bus, desc->target, desc->lun[1]); | ||
| 957 | |||
| 958 | if (!scsi_host_get(host)) | ||
| 959 | return; | ||
| 960 | |||
| 961 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | ||
| 962 | desc->lun[1]); | ||
| 963 | if (sdev) { | ||
| 964 | scsi_remove_device(sdev); | ||
| 965 | scsi_device_put(sdev); | ||
| 966 | } else | ||
| 967 | printk(KERN_INFO | ||
| 968 | "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", | ||
| 969 | desc->bus, desc->target, desc->lun[1]); | ||
| 970 | |||
| 971 | scsi_host_put(host); | ||
| 972 | } | ||
| 973 | } | ||
| 974 | |||
| 975 | static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) | ||
| 976 | { | ||
| 977 | struct PVSCSIRingsState *s = adapter->rings_state; | ||
| 978 | |||
| 979 | return s->msgProdIdx != s->msgConsIdx; | ||
| 980 | } | ||
| 981 | |||
| 982 | static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) | ||
| 983 | { | ||
| 984 | struct PVSCSIRingsState *s = adapter->rings_state; | ||
| 985 | struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; | ||
| 986 | u32 msg_entries = s->msgNumEntriesLog2; | ||
| 987 | |||
| 988 | while (pvscsi_msg_pending(adapter)) { | ||
| 989 | struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & | ||
| 990 | MASK(msg_entries)); | ||
| 991 | |||
| 992 | barrier(); | ||
| 993 | pvscsi_process_msg(adapter, e); | ||
| 994 | barrier(); | ||
| 995 | s->msgConsIdx++; | ||
| 996 | } | ||
| 997 | } | ||
| 998 | |||
| 999 | static void pvscsi_msg_workqueue_handler(struct work_struct *data) | ||
| 1000 | { | ||
| 1001 | struct pvscsi_adapter *adapter; | ||
| 1002 | |||
| 1003 | adapter = container_of(data, struct pvscsi_adapter, work); | ||
| 1004 | |||
| 1005 | pvscsi_process_msg_ring(adapter); | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) | ||
| 1009 | { | ||
| 1010 | char name[32]; | ||
| 1011 | |||
| 1012 | if (!pvscsi_use_msg) | ||
| 1013 | return 0; | ||
| 1014 | |||
| 1015 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, | ||
| 1016 | PVSCSI_CMD_SETUP_MSG_RING); | ||
| 1017 | |||
| 1018 | if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) | ||
| 1019 | return 0; | ||
| 1020 | |||
| 1021 | snprintf(name, sizeof(name), | ||
| 1022 | "vmw_pvscsi_wq_%u", adapter->host->host_no); | ||
| 1023 | |||
| 1024 | adapter->workqueue = create_singlethread_workqueue(name); | ||
| 1025 | if (!adapter->workqueue) { | ||
| 1026 | printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); | ||
| 1027 | return 0; | ||
| 1028 | } | ||
| 1029 | INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); | ||
| 1030 | |||
| 1031 | return 1; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static irqreturn_t pvscsi_isr(int irq, void *devp) | ||
| 1035 | { | ||
| 1036 | struct pvscsi_adapter *adapter = devp; | ||
| 1037 | int handled; | ||
| 1038 | |||
| 1039 | if (adapter->use_msi || adapter->use_msix) | ||
| 1040 | handled = true; | ||
| 1041 | else { | ||
| 1042 | u32 val = pvscsi_read_intr_status(adapter); | ||
| 1043 | handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; | ||
| 1044 | if (handled) | ||
| 1045 | pvscsi_write_intr_status(devp, val); | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | if (handled) { | ||
| 1049 | unsigned long flags; | ||
| 1050 | |||
| 1051 | spin_lock_irqsave(&adapter->hw_lock, flags); | ||
| 1052 | |||
| 1053 | pvscsi_process_completion_ring(adapter); | ||
| 1054 | if (adapter->use_msg && pvscsi_msg_pending(adapter)) | ||
| 1055 | queue_work(adapter->workqueue, &adapter->work); | ||
| 1056 | |||
| 1057 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | ||
| 1058 | } | ||
| 1059 | |||
| 1060 | return IRQ_RETVAL(handled); | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) | ||
| 1064 | { | ||
| 1065 | struct pvscsi_ctx *ctx = adapter->cmd_map; | ||
| 1066 | unsigned i; | ||
| 1067 | |||
| 1068 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) | ||
| 1069 | free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) | ||
| 1073 | { | ||
| 1074 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; | ||
| 1075 | int ret; | ||
| 1076 | |||
| 1077 | ret = pci_enable_msix(adapter->dev, &entry, 1); | ||
| 1078 | if (ret) | ||
| 1079 | return ret; | ||
| 1080 | |||
| 1081 | *irq = entry.vector; | ||
| 1082 | |||
| 1083 | return 0; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) | ||
| 1087 | { | ||
| 1088 | if (adapter->irq) { | ||
| 1089 | free_irq(adapter->irq, adapter); | ||
| 1090 | adapter->irq = 0; | ||
| 1091 | } | ||
| 1092 | if (adapter->use_msi) { | ||
| 1093 | pci_disable_msi(adapter->dev); | ||
| 1094 | adapter->use_msi = 0; | ||
| 1095 | } else if (adapter->use_msix) { | ||
| 1096 | pci_disable_msix(adapter->dev); | ||
| 1097 | adapter->use_msix = 0; | ||
| 1098 | } | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | static void pvscsi_release_resources(struct pvscsi_adapter *adapter) | ||
| 1102 | { | ||
| 1103 | pvscsi_shutdown_intr(adapter); | ||
| 1104 | |||
| 1105 | if (adapter->workqueue) | ||
| 1106 | destroy_workqueue(adapter->workqueue); | ||
| 1107 | |||
| 1108 | if (adapter->mmioBase) | ||
| 1109 | pci_iounmap(adapter->dev, adapter->mmioBase); | ||
| 1110 | |||
| 1111 | pci_release_regions(adapter->dev); | ||
| 1112 | |||
| 1113 | if (adapter->cmd_map) { | ||
| 1114 | pvscsi_free_sgls(adapter); | ||
| 1115 | kfree(adapter->cmd_map); | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | if (adapter->rings_state) | ||
| 1119 | pci_free_consistent(adapter->dev, PAGE_SIZE, | ||
| 1120 | adapter->rings_state, adapter->ringStatePA); | ||
| 1121 | |||
| 1122 | if (adapter->req_ring) | ||
| 1123 | pci_free_consistent(adapter->dev, | ||
| 1124 | adapter->req_pages * PAGE_SIZE, | ||
| 1125 | adapter->req_ring, adapter->reqRingPA); | ||
| 1126 | |||
| 1127 | if (adapter->cmp_ring) | ||
| 1128 | pci_free_consistent(adapter->dev, | ||
| 1129 | adapter->cmp_pages * PAGE_SIZE, | ||
| 1130 | adapter->cmp_ring, adapter->cmpRingPA); | ||
| 1131 | |||
| 1132 | if (adapter->msg_ring) | ||
| 1133 | pci_free_consistent(adapter->dev, | ||
| 1134 | adapter->msg_pages * PAGE_SIZE, | ||
| 1135 | adapter->msg_ring, adapter->msgRingPA); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | /* | ||
| 1139 | * Allocate scatter gather lists. | ||
| 1140 | * | ||
| 1141 | * These are statically allocated. Trying to be clever was not worth it. | ||
| 1142 | * | ||
| 1143 | * Dynamic allocation can fail, and we can't go deeep into the memory | ||
| 1144 | * allocator, since we're a SCSI driver, and trying too hard to allocate | ||
| 1145 | * memory might generate disk I/O. We also don't want to fail disk I/O | ||
| 1146 | * in that case because we can't get an allocation - the I/O could be | ||
| 1147 | * trying to swap out data to free memory. Since that is pathological, | ||
| 1148 | * just use a statically allocated scatter list. | ||
| 1149 | * | ||
| 1150 | */ | ||
| 1151 | static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) | ||
| 1152 | { | ||
| 1153 | struct pvscsi_ctx *ctx; | ||
| 1154 | int i; | ||
| 1155 | |||
| 1156 | ctx = adapter->cmd_map; | ||
| 1157 | BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); | ||
| 1158 | |||
| 1159 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) { | ||
| 1160 | ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, | ||
| 1161 | get_order(SGL_SIZE)); | ||
| 1162 | ctx->sglPA = 0; | ||
| 1163 | BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); | ||
| 1164 | if (!ctx->sgl) { | ||
| 1165 | for (; i >= 0; --i, --ctx) { | ||
| 1166 | free_pages((unsigned long)ctx->sgl, | ||
| 1167 | get_order(SGL_SIZE)); | ||
| 1168 | ctx->sgl = NULL; | ||
| 1169 | } | ||
| 1170 | return -ENOMEM; | ||
| 1171 | } | ||
| 1172 | } | ||
| 1173 | |||
| 1174 | return 0; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | static int __devinit pvscsi_probe(struct pci_dev *pdev, | ||
| 1178 | const struct pci_device_id *id) | ||
| 1179 | { | ||
| 1180 | struct pvscsi_adapter *adapter; | ||
| 1181 | struct Scsi_Host *host; | ||
| 1182 | unsigned int i; | ||
| 1183 | unsigned long flags = 0; | ||
| 1184 | int error; | ||
| 1185 | |||
| 1186 | error = -ENODEV; | ||
| 1187 | |||
| 1188 | if (pci_enable_device(pdev)) | ||
| 1189 | return error; | ||
| 1190 | |||
| 1191 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && | ||
| 1192 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | ||
| 1193 | printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); | ||
| 1194 | } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && | ||
| 1195 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { | ||
| 1196 | printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); | ||
| 1197 | } else { | ||
| 1198 | printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); | ||
| 1199 | goto out_disable_device; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | pvscsi_template.can_queue = | ||
| 1203 | min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * | ||
| 1204 | PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | ||
| 1205 | pvscsi_template.cmd_per_lun = | ||
| 1206 | min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); | ||
| 1207 | host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); | ||
| 1208 | if (!host) { | ||
| 1209 | printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); | ||
| 1210 | goto out_disable_device; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | adapter = shost_priv(host); | ||
| 1214 | memset(adapter, 0, sizeof(*adapter)); | ||
| 1215 | adapter->dev = pdev; | ||
| 1216 | adapter->host = host; | ||
| 1217 | |||
| 1218 | spin_lock_init(&adapter->hw_lock); | ||
| 1219 | |||
| 1220 | host->max_channel = 0; | ||
| 1221 | host->max_id = 16; | ||
| 1222 | host->max_lun = 1; | ||
| 1223 | host->max_cmd_len = 16; | ||
| 1224 | |||
| 1225 | adapter->rev = pdev->revision; | ||
| 1226 | |||
| 1227 | if (pci_request_regions(pdev, "vmw_pvscsi")) { | ||
| 1228 | printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); | ||
| 1229 | goto out_free_host; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
| 1233 | if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) | ||
| 1234 | continue; | ||
| 1235 | |||
| 1236 | if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) | ||
| 1237 | continue; | ||
| 1238 | |||
| 1239 | break; | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | if (i == DEVICE_COUNT_RESOURCE) { | ||
| 1243 | printk(KERN_ERR | ||
| 1244 | "vmw_pvscsi: adapter has no suitable MMIO region\n"); | ||
| 1245 | goto out_release_resources; | ||
| 1246 | } | ||
| 1247 | |||
| 1248 | adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); | ||
| 1249 | |||
| 1250 | if (!adapter->mmioBase) { | ||
| 1251 | printk(KERN_ERR | ||
| 1252 | "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", | ||
| 1253 | i, PVSCSI_MEM_SPACE_SIZE); | ||
| 1254 | goto out_release_resources; | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | pci_set_master(pdev); | ||
| 1258 | pci_set_drvdata(pdev, host); | ||
| 1259 | |||
| 1260 | ll_adapter_reset(adapter); | ||
| 1261 | |||
| 1262 | adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); | ||
| 1263 | |||
| 1264 | error = pvscsi_allocate_rings(adapter); | ||
| 1265 | if (error) { | ||
| 1266 | printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); | ||
| 1267 | goto out_release_resources; | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | /* | ||
| 1271 | * From this point on we should reset the adapter if anything goes | ||
| 1272 | * wrong. | ||
| 1273 | */ | ||
| 1274 | pvscsi_setup_all_rings(adapter); | ||
| 1275 | |||
| 1276 | adapter->cmd_map = kcalloc(adapter->req_depth, | ||
| 1277 | sizeof(struct pvscsi_ctx), GFP_KERNEL); | ||
| 1278 | if (!adapter->cmd_map) { | ||
| 1279 | printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); | ||
| 1280 | error = -ENOMEM; | ||
| 1281 | goto out_reset_adapter; | ||
| 1282 | } | ||
| 1283 | |||
| 1284 | INIT_LIST_HEAD(&adapter->cmd_pool); | ||
| 1285 | for (i = 0; i < adapter->req_depth; i++) { | ||
| 1286 | struct pvscsi_ctx *ctx = adapter->cmd_map + i; | ||
| 1287 | list_add(&ctx->list, &adapter->cmd_pool); | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | error = pvscsi_allocate_sg(adapter); | ||
| 1291 | if (error) { | ||
| 1292 | printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); | ||
| 1293 | goto out_reset_adapter; | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | if (!pvscsi_disable_msix && | ||
| 1297 | pvscsi_setup_msix(adapter, &adapter->irq) == 0) { | ||
| 1298 | printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); | ||
| 1299 | adapter->use_msix = 1; | ||
| 1300 | } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { | ||
| 1301 | printk(KERN_INFO "vmw_pvscsi: using MSI\n"); | ||
| 1302 | adapter->use_msi = 1; | ||
| 1303 | adapter->irq = pdev->irq; | ||
| 1304 | } else { | ||
| 1305 | printk(KERN_INFO "vmw_pvscsi: using INTx\n"); | ||
| 1306 | adapter->irq = pdev->irq; | ||
| 1307 | flags = IRQF_SHARED; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | error = request_irq(adapter->irq, pvscsi_isr, flags, | ||
| 1311 | "vmw_pvscsi", adapter); | ||
| 1312 | if (error) { | ||
| 1313 | printk(KERN_ERR | ||
| 1314 | "vmw_pvscsi: unable to request IRQ: %d\n", error); | ||
| 1315 | adapter->irq = 0; | ||
| 1316 | goto out_reset_adapter; | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | error = scsi_add_host(host, &pdev->dev); | ||
| 1320 | if (error) { | ||
| 1321 | printk(KERN_ERR | ||
| 1322 | "vmw_pvscsi: scsi_add_host failed: %d\n", error); | ||
| 1323 | goto out_reset_adapter; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", | ||
| 1327 | adapter->rev, host->host_no); | ||
| 1328 | |||
| 1329 | pvscsi_unmask_intr(adapter); | ||
| 1330 | |||
| 1331 | scsi_scan_host(host); | ||
| 1332 | |||
| 1333 | return 0; | ||
| 1334 | |||
| 1335 | out_reset_adapter: | ||
| 1336 | ll_adapter_reset(adapter); | ||
| 1337 | out_release_resources: | ||
| 1338 | pvscsi_release_resources(adapter); | ||
| 1339 | out_free_host: | ||
| 1340 | scsi_host_put(host); | ||
| 1341 | out_disable_device: | ||
| 1342 | pci_set_drvdata(pdev, NULL); | ||
| 1343 | pci_disable_device(pdev); | ||
| 1344 | |||
| 1345 | return error; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) | ||
| 1349 | { | ||
| 1350 | pvscsi_mask_intr(adapter); | ||
| 1351 | |||
| 1352 | if (adapter->workqueue) | ||
| 1353 | flush_workqueue(adapter->workqueue); | ||
| 1354 | |||
| 1355 | pvscsi_shutdown_intr(adapter); | ||
| 1356 | |||
| 1357 | pvscsi_process_request_ring(adapter); | ||
| 1358 | pvscsi_process_completion_ring(adapter); | ||
| 1359 | ll_adapter_reset(adapter); | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | static void pvscsi_shutdown(struct pci_dev *dev) | ||
| 1363 | { | ||
| 1364 | struct Scsi_Host *host = pci_get_drvdata(dev); | ||
| 1365 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 1366 | |||
| 1367 | __pvscsi_shutdown(adapter); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static void pvscsi_remove(struct pci_dev *pdev) | ||
| 1371 | { | ||
| 1372 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 1373 | struct pvscsi_adapter *adapter = shost_priv(host); | ||
| 1374 | |||
| 1375 | scsi_remove_host(host); | ||
| 1376 | |||
| 1377 | __pvscsi_shutdown(adapter); | ||
| 1378 | pvscsi_release_resources(adapter); | ||
| 1379 | |||
| 1380 | scsi_host_put(host); | ||
| 1381 | |||
| 1382 | pci_set_drvdata(pdev, NULL); | ||
| 1383 | pci_disable_device(pdev); | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | static struct pci_driver pvscsi_pci_driver = { | ||
| 1387 | .name = "vmw_pvscsi", | ||
| 1388 | .id_table = pvscsi_pci_tbl, | ||
| 1389 | .probe = pvscsi_probe, | ||
| 1390 | .remove = __devexit_p(pvscsi_remove), | ||
| 1391 | .shutdown = pvscsi_shutdown, | ||
| 1392 | }; | ||
| 1393 | |||
| 1394 | static int __init pvscsi_init(void) | ||
| 1395 | { | ||
| 1396 | pr_info("%s - version %s\n", | ||
| 1397 | PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); | ||
| 1398 | return pci_register_driver(&pvscsi_pci_driver); | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | static void __exit pvscsi_exit(void) | ||
| 1402 | { | ||
| 1403 | pci_unregister_driver(&pvscsi_pci_driver); | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | module_init(pvscsi_init); | ||
| 1407 | module_exit(pvscsi_exit); | ||
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h new file mode 100644 index 000000000000..62e36e75715e --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.h | |||
| @@ -0,0 +1,397 @@ | |||
| 1 | /* | ||
| 2 | * VMware PVSCSI header file | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; version 2 of the License and no later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
| 14 | * details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | * | ||
| 20 | * Maintained by: Alok N Kataria <akataria@vmware.com> | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _VMW_PVSCSI_H_ | ||
| 25 | #define _VMW_PVSCSI_H_ | ||
| 26 | |||
| 27 | #include <linux/types.h> | ||
| 28 | |||
| 29 | #define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" | ||
| 30 | |||
| 31 | #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 | ||
| 32 | |||
| 33 | #define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ | ||
| 34 | |||
| 35 | #define PCI_VENDOR_ID_VMWARE 0x15AD | ||
| 36 | #define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 | ||
| 37 | |||
| 38 | /* | ||
| 39 | * host adapter status/error codes | ||
| 40 | */ | ||
| 41 | enum HostBusAdapterStatus { | ||
| 42 | BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ | ||
| 43 | BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, | ||
| 44 | BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, | ||
| 45 | BTSTAT_DATA_UNDERRUN = 0x0c, | ||
| 46 | BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ | ||
| 47 | BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ | ||
| 48 | BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ | ||
| 49 | BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ | ||
| 50 | BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ | ||
| 51 | BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ | ||
| 52 | BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ | ||
| 53 | BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ | ||
| 54 | BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ | ||
| 55 | BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ | ||
| 56 | BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ | ||
| 57 | BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ | ||
| 58 | BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ | ||
| 59 | BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ | ||
| 60 | BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ | ||
| 61 | BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ | ||
| 62 | BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ | ||
| 63 | BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Register offsets. | ||
| 68 | * | ||
| 69 | * These registers are accessible both via i/o space and mm i/o. | ||
| 70 | */ | ||
| 71 | |||
| 72 | enum PVSCSIRegOffset { | ||
| 73 | PVSCSI_REG_OFFSET_COMMAND = 0x0, | ||
| 74 | PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4, | ||
| 75 | PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8, | ||
| 76 | PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100, | ||
| 77 | PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104, | ||
| 78 | PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108, | ||
| 79 | PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c, | ||
| 80 | PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c, | ||
| 81 | PVSCSI_REG_OFFSET_INTR_MASK = 0x2010, | ||
| 82 | PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014, | ||
| 83 | PVSCSI_REG_OFFSET_DEBUG = 0x3018, | ||
| 84 | PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018, | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Virtual h/w commands. | ||
| 89 | */ | ||
| 90 | |||
| 91 | enum PVSCSICommands { | ||
| 92 | PVSCSI_CMD_FIRST = 0, /* has to be first */ | ||
| 93 | |||
| 94 | PVSCSI_CMD_ADAPTER_RESET = 1, | ||
| 95 | PVSCSI_CMD_ISSUE_SCSI = 2, | ||
| 96 | PVSCSI_CMD_SETUP_RINGS = 3, | ||
| 97 | PVSCSI_CMD_RESET_BUS = 4, | ||
| 98 | PVSCSI_CMD_RESET_DEVICE = 5, | ||
| 99 | PVSCSI_CMD_ABORT_CMD = 6, | ||
| 100 | PVSCSI_CMD_CONFIG = 7, | ||
| 101 | PVSCSI_CMD_SETUP_MSG_RING = 8, | ||
| 102 | PVSCSI_CMD_DEVICE_UNPLUG = 9, | ||
| 103 | |||
| 104 | PVSCSI_CMD_LAST = 10 /* has to be last */ | ||
| 105 | }; | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Command descriptor for PVSCSI_CMD_RESET_DEVICE -- | ||
| 109 | */ | ||
| 110 | |||
| 111 | struct PVSCSICmdDescResetDevice { | ||
| 112 | u32 target; | ||
| 113 | u8 lun[8]; | ||
| 114 | } __packed; | ||
| 115 | |||
| 116 | /* | ||
| 117 | * Command descriptor for PVSCSI_CMD_ABORT_CMD -- | ||
| 118 | * | ||
| 119 | * - currently does not support specifying the LUN. | ||
| 120 | * - _pad should be 0. | ||
| 121 | */ | ||
| 122 | |||
| 123 | struct PVSCSICmdDescAbortCmd { | ||
| 124 | u64 context; | ||
| 125 | u32 target; | ||
| 126 | u32 _pad; | ||
| 127 | } __packed; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Command descriptor for PVSCSI_CMD_SETUP_RINGS -- | ||
| 131 | * | ||
| 132 | * Notes: | ||
| 133 | * - reqRingNumPages and cmpRingNumPages need to be power of two. | ||
| 134 | * - reqRingNumPages and cmpRingNumPages need to be different from 0, | ||
| 135 | * - reqRingNumPages and cmpRingNumPages need to be inferior to | ||
| 136 | * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES. | ||
| 137 | */ | ||
| 138 | |||
| 139 | #define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32 | ||
| 140 | struct PVSCSICmdDescSetupRings { | ||
| 141 | u32 reqRingNumPages; | ||
| 142 | u32 cmpRingNumPages; | ||
| 143 | u64 ringsStatePPN; | ||
| 144 | u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; | ||
| 145 | u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; | ||
| 146 | } __packed; | ||
| 147 | |||
| 148 | /* | ||
| 149 | * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING -- | ||
| 150 | * | ||
| 151 | * Notes: | ||
| 152 | * - this command was not supported in the initial revision of the h/w | ||
| 153 | * interface. Before using it, you need to check that it is supported by | ||
| 154 | * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then | ||
| 155 | * immediately after read the 'command status' register: | ||
| 156 | * * a value of -1 means that the cmd is NOT supported, | ||
| 157 | * * a value != -1 means that the cmd IS supported. | ||
| 158 | * If it's supported the 'command status' register should return: | ||
| 159 | * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32). | ||
| 160 | * - this command should be issued _after_ the usual SETUP_RINGS so that the | ||
| 161 | * RingsState page is already setup. If not, the command is a nop. | ||
| 162 | * - numPages needs to be a power of two, | ||
| 163 | * - numPages needs to be different from 0, | ||
| 164 | * - _pad should be zero. | ||
| 165 | */ | ||
| 166 | |||
| 167 | #define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16 | ||
| 168 | |||
| 169 | struct PVSCSICmdDescSetupMsgRing { | ||
| 170 | u32 numPages; | ||
| 171 | u32 _pad; | ||
| 172 | u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; | ||
| 173 | } __packed; | ||
| 174 | |||
| 175 | enum PVSCSIMsgType { | ||
| 176 | PVSCSI_MSG_DEV_ADDED = 0, | ||
| 177 | PVSCSI_MSG_DEV_REMOVED = 1, | ||
| 178 | PVSCSI_MSG_LAST = 2, | ||
| 179 | }; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Msg descriptor. | ||
| 183 | * | ||
| 184 | * sizeof(struct PVSCSIRingMsgDesc) == 128. | ||
| 185 | * | ||
| 186 | * - type is of type enum PVSCSIMsgType. | ||
| 187 | * - the content of args depend on the type of event being delivered. | ||
| 188 | */ | ||
| 189 | |||
| 190 | struct PVSCSIRingMsgDesc { | ||
| 191 | u32 type; | ||
| 192 | u32 args[31]; | ||
| 193 | } __packed; | ||
| 194 | |||
| 195 | struct PVSCSIMsgDescDevStatusChanged { | ||
| 196 | u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */ | ||
| 197 | u32 bus; | ||
| 198 | u32 target; | ||
| 199 | u8 lun[8]; | ||
| 200 | u32 pad[27]; | ||
| 201 | } __packed; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Rings state. | ||
| 205 | * | ||
| 206 | * - the fields: | ||
| 207 | * . msgProdIdx, | ||
| 208 | * . msgConsIdx, | ||
| 209 | * . msgNumEntriesLog2, | ||
| 210 | * .. are only used once the SETUP_MSG_RING cmd has been issued. | ||
| 211 | * - '_pad' helps to ensure that the msg related fields are on their own | ||
| 212 | * cache-line. | ||
| 213 | */ | ||
| 214 | |||
| 215 | struct PVSCSIRingsState { | ||
| 216 | u32 reqProdIdx; | ||
| 217 | u32 reqConsIdx; | ||
| 218 | u32 reqNumEntriesLog2; | ||
| 219 | |||
| 220 | u32 cmpProdIdx; | ||
| 221 | u32 cmpConsIdx; | ||
| 222 | u32 cmpNumEntriesLog2; | ||
| 223 | |||
| 224 | u8 _pad[104]; | ||
| 225 | |||
| 226 | u32 msgProdIdx; | ||
| 227 | u32 msgConsIdx; | ||
| 228 | u32 msgNumEntriesLog2; | ||
| 229 | } __packed; | ||
| 230 | |||
| 231 | /* | ||
| 232 | * Request descriptor. | ||
| 233 | * | ||
| 234 | * sizeof(RingReqDesc) = 128 | ||
| 235 | * | ||
| 236 | * - context: is a unique identifier of a command. It could normally be any | ||
| 237 | * 64bit value, however we currently store it in the serialNumber variable | ||
| 238 | * of struct SCSI_Command, so we have the following restrictions due to the | ||
| 239 | * way this field is handled in the vmkernel storage stack: | ||
| 240 | * * this value can't be 0, | ||
| 241 | * * the upper 32bit need to be 0 since serialNumber is as a u32. | ||
| 242 | * Currently tracked as PR 292060. | ||
| 243 | * - dataLen: contains the total number of bytes that need to be transferred. | ||
| 244 | * - dataAddr: | ||
| 245 | * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first | ||
| 246 | * s/g table segment, each s/g segment is entirely contained on a single | ||
| 247 | * page of physical memory, | ||
| 248 | * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of | ||
| 249 | * the buffer used for the DMA transfer, | ||
| 250 | * - flags: | ||
| 251 | * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above, | ||
| 252 | * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved, | ||
| 253 | * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory, | ||
| 254 | * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device, | ||
| 255 | * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than | ||
| 256 | * 16bytes. To be specified. | ||
| 257 | * - vcpuHint: vcpuId of the processor that will be most likely waiting for the | ||
| 258 | * completion of the i/o. For guest OSes that use lowest priority message | ||
| 259 | * delivery mode (such as windows), we use this "hint" to deliver the | ||
| 260 | * completion action to the proper vcpu. For now, we can use the vcpuId of | ||
| 261 | * the processor that initiated the i/o as a likely candidate for the vcpu | ||
| 262 | * that will be waiting for the completion.. | ||
| 263 | * - bus should be 0: we currently only support bus 0 for now. | ||
| 264 | * - unused should be zero'd. | ||
| 265 | */ | ||
| 266 | |||
| 267 | #define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0) | ||
| 268 | #define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1) | ||
| 269 | #define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2) | ||
| 270 | #define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3) | ||
| 271 | #define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4) | ||
| 272 | |||
| 273 | struct PVSCSIRingReqDesc { | ||
| 274 | u64 context; | ||
| 275 | u64 dataAddr; | ||
| 276 | u64 dataLen; | ||
| 277 | u64 senseAddr; | ||
| 278 | u32 senseLen; | ||
| 279 | u32 flags; | ||
| 280 | u8 cdb[16]; | ||
| 281 | u8 cdbLen; | ||
| 282 | u8 lun[8]; | ||
| 283 | u8 tag; | ||
| 284 | u8 bus; | ||
| 285 | u8 target; | ||
| 286 | u8 vcpuHint; | ||
| 287 | u8 unused[59]; | ||
| 288 | } __packed; | ||
| 289 | |||
| 290 | /* | ||
| 291 | * Scatter-gather list management. | ||
| 292 | * | ||
| 293 | * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the | ||
| 294 | * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g | ||
| 295 | * table segment. | ||
| 296 | * | ||
| 297 | * - each segment of the s/g table contain a succession of struct | ||
| 298 | * PVSCSISGElement. | ||
| 299 | * - each segment is entirely contained on a single physical page of memory. | ||
| 300 | * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in | ||
| 301 | * PVSCSISGElement.flags and in this case: | ||
| 302 | * * addr is the PA of the next s/g segment, | ||
| 303 | * * length is undefined, assumed to be 0. | ||
| 304 | */ | ||
| 305 | |||
| 306 | struct PVSCSISGElement { | ||
| 307 | u64 addr; | ||
| 308 | u32 length; | ||
| 309 | u32 flags; | ||
| 310 | } __packed; | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Completion descriptor. | ||
| 314 | * | ||
| 315 | * sizeof(RingCmpDesc) = 32 | ||
| 316 | * | ||
| 317 | * - context: identifier of the command. The same thing that was specified | ||
| 318 | * under "context" as part of struct RingReqDesc at initiation time, | ||
| 319 | * - dataLen: number of bytes transferred for the actual i/o operation, | ||
| 320 | * - senseLen: number of bytes written into the sense buffer, | ||
| 321 | * - hostStatus: adapter status, | ||
| 322 | * - scsiStatus: device status, | ||
| 323 | * - _pad should be zero. | ||
| 324 | */ | ||
| 325 | |||
| 326 | struct PVSCSIRingCmpDesc { | ||
| 327 | u64 context; | ||
| 328 | u64 dataLen; | ||
| 329 | u32 senseLen; | ||
| 330 | u16 hostStatus; | ||
| 331 | u16 scsiStatus; | ||
| 332 | u32 _pad[2]; | ||
| 333 | } __packed; | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Interrupt status / IRQ bits. | ||
| 337 | */ | ||
| 338 | |||
| 339 | #define PVSCSI_INTR_CMPL_0 (1 << 0) | ||
| 340 | #define PVSCSI_INTR_CMPL_1 (1 << 1) | ||
| 341 | #define PVSCSI_INTR_CMPL_MASK MASK(2) | ||
| 342 | |||
| 343 | #define PVSCSI_INTR_MSG_0 (1 << 2) | ||
| 344 | #define PVSCSI_INTR_MSG_1 (1 << 3) | ||
| 345 | #define PVSCSI_INTR_MSG_MASK (MASK(2) << 2) | ||
| 346 | |||
| 347 | #define PVSCSI_INTR_ALL_SUPPORTED MASK(4) | ||
| 348 | |||
| 349 | /* | ||
| 350 | * Number of MSI-X vectors supported. | ||
| 351 | */ | ||
| 352 | #define PVSCSI_MAX_INTRS 24 | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Enumeration of supported MSI-X vectors | ||
| 356 | */ | ||
| 357 | #define PVSCSI_VECTOR_COMPLETION 0 | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Misc constants for the rings. | ||
| 361 | */ | ||
| 362 | |||
| 363 | #define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES | ||
| 364 | #define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES | ||
| 365 | #define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES | ||
| 366 | |||
| 367 | #define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \ | ||
| 368 | (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc)) | ||
| 369 | |||
| 370 | #define PVSCSI_MAX_REQ_QUEUE_DEPTH \ | ||
| 371 | (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE) | ||
| 372 | |||
| 373 | #define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1 | ||
| 374 | #define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1 | ||
| 375 | #define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2 | ||
| 376 | #define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2 | ||
| 377 | #define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2 | ||
| 378 | |||
| 379 | enum PVSCSIMemSpace { | ||
| 380 | PVSCSI_MEM_SPACE_COMMAND_PAGE = 0, | ||
| 381 | PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1, | ||
| 382 | PVSCSI_MEM_SPACE_MISC_PAGE = 2, | ||
| 383 | PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4, | ||
| 384 | PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6, | ||
| 385 | PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7, | ||
| 386 | }; | ||
| 387 | |||
| 388 | #define PVSCSI_MEM_SPACE_NUM_PAGES \ | ||
| 389 | (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \ | ||
| 390 | PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \ | ||
| 391 | PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \ | ||
| 392 | PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \ | ||
| 393 | PVSCSI_MEM_SPACE_MSIX_NUM_PAGES) | ||
| 394 | |||
| 395 | #define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE) | ||
| 396 | |||
| 397 | #endif /* _VMW_PVSCSI_H_ */ | ||
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 093610bcfcce..2f6e9d8eaf71 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
| @@ -161,7 +161,7 @@ | |||
| 161 | * | 161 | * |
| 162 | * 2003/02/12 - Christoph Hellwig <hch@infradead.org> | 162 | * 2003/02/12 - Christoph Hellwig <hch@infradead.org> |
| 163 | * | 163 | * |
| 164 | * Cleaned up host template defintion | 164 | * Cleaned up host template definition |
| 165 | * Removed now obsolete wd7000.h | 165 | * Removed now obsolete wd7000.h |
| 166 | */ | 166 | */ |
| 167 | 167 | ||
