diff options
45 files changed, 1756 insertions, 1076 deletions
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c index f1b7f659d3c9..e22957665808 100644 --- a/drivers/firmware/edd.c +++ b/drivers/firmware/edd.c | |||
| @@ -151,7 +151,8 @@ edd_show_host_bus(struct edd_device *edev, char *buf) | |||
| 151 | p += scnprintf(p, left, "\tbase_address: %x\n", | 151 | p += scnprintf(p, left, "\tbase_address: %x\n", |
| 152 | info->params.interface_path.isa.base_address); | 152 | info->params.interface_path.isa.base_address); |
| 153 | } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || | 153 | } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || |
| 154 | !strncmp(info->params.host_bus_type, "PCI", 3)) { | 154 | !strncmp(info->params.host_bus_type, "PCI", 3) || |
| 155 | !strncmp(info->params.host_bus_type, "XPRS", 4)) { | ||
| 155 | p += scnprintf(p, left, | 156 | p += scnprintf(p, left, |
| 156 | "\t%02x:%02x.%d channel: %u\n", | 157 | "\t%02x:%02x.%d channel: %u\n", |
| 157 | info->params.interface_path.pci.bus, | 158 | info->params.interface_path.pci.bus, |
| @@ -159,7 +160,6 @@ edd_show_host_bus(struct edd_device *edev, char *buf) | |||
| 159 | info->params.interface_path.pci.function, | 160 | info->params.interface_path.pci.function, |
| 160 | info->params.interface_path.pci.channel); | 161 | info->params.interface_path.pci.channel); |
| 161 | } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || | 162 | } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || |
| 162 | !strncmp(info->params.host_bus_type, "XPRS", 4) || | ||
| 163 | !strncmp(info->params.host_bus_type, "HTPT", 4)) { | 163 | !strncmp(info->params.host_bus_type, "HTPT", 4)) { |
| 164 | p += scnprintf(p, left, | 164 | p += scnprintf(p, left, |
| 165 | "\tTBD: %llx\n", | 165 | "\tTBD: %llx\n", |
| @@ -668,7 +668,7 @@ edd_get_pci_dev(struct edd_device *edev) | |||
| 668 | { | 668 | { |
| 669 | struct edd_info *info = edd_dev_get_info(edev); | 669 | struct edd_info *info = edd_dev_get_info(edev); |
| 670 | 670 | ||
| 671 | if (edd_dev_is_type(edev, "PCI")) { | 671 | if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) { |
| 672 | return pci_get_bus_and_slot(info->params.interface_path.pci.bus, | 672 | return pci_get_bus_and_slot(info->params.interface_path.pci.bus, |
| 673 | PCI_DEVFN(info->params.interface_path.pci.slot, | 673 | PCI_DEVFN(info->params.interface_path.pci.slot, |
| 674 | info->params.interface_path.pci. | 674 | info->params.interface_path.pci. |
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 63de1c7cd0cb..049ea907e04a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
| @@ -62,7 +62,7 @@ | |||
| 62 | #include "bnx2fc_constants.h" | 62 | #include "bnx2fc_constants.h" |
| 63 | 63 | ||
| 64 | #define BNX2FC_NAME "bnx2fc" | 64 | #define BNX2FC_NAME "bnx2fc" |
| 65 | #define BNX2FC_VERSION "1.0.8" | 65 | #define BNX2FC_VERSION "1.0.9" |
| 66 | 66 | ||
| 67 | #define PFX "bnx2fc: " | 67 | #define PFX "bnx2fc: " |
| 68 | 68 | ||
| @@ -145,6 +145,9 @@ | |||
| 145 | #define REC_RETRY_COUNT 1 | 145 | #define REC_RETRY_COUNT 1 |
| 146 | #define BNX2FC_NUM_ERR_BITS 63 | 146 | #define BNX2FC_NUM_ERR_BITS 63 |
| 147 | 147 | ||
| 148 | #define BNX2FC_RELOGIN_WAIT_TIME 200 | ||
| 149 | #define BNX2FC_RELOGIN_WAIT_CNT 10 | ||
| 150 | |||
| 148 | /* bnx2fc driver uses only one instance of fcoe_percpu_s */ | 151 | /* bnx2fc driver uses only one instance of fcoe_percpu_s */ |
| 149 | extern struct fcoe_percpu_s bnx2fc_global; | 152 | extern struct fcoe_percpu_s bnx2fc_global; |
| 150 | 153 | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index fd382fe33f6e..ce0ce3e32f33 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c | |||
| @@ -268,17 +268,6 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) | |||
| 268 | 268 | ||
| 269 | orig_io_req = cb_arg->aborted_io_req; | 269 | orig_io_req = cb_arg->aborted_io_req; |
| 270 | srr_req = cb_arg->io_req; | 270 | srr_req = cb_arg->io_req; |
| 271 | if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { | ||
| 272 | BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed", | ||
| 273 | orig_io_req->xid); | ||
| 274 | goto srr_compl_done; | ||
| 275 | } | ||
| 276 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { | ||
| 277 | BNX2FC_IO_DBG(srr_req, "rec abts in prog " | ||
| 278 | "orig_io - 0x%x\n", | ||
| 279 | orig_io_req->xid); | ||
| 280 | goto srr_compl_done; | ||
| 281 | } | ||
| 282 | if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { | 271 | if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { |
| 283 | /* SRR timedout */ | 272 | /* SRR timedout */ |
| 284 | BNX2FC_IO_DBG(srr_req, "srr timed out, abort " | 273 | BNX2FC_IO_DBG(srr_req, "srr timed out, abort " |
| @@ -290,6 +279,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) | |||
| 290 | "failed. issue cleanup\n"); | 279 | "failed. issue cleanup\n"); |
| 291 | bnx2fc_initiate_cleanup(srr_req); | 280 | bnx2fc_initiate_cleanup(srr_req); |
| 292 | } | 281 | } |
| 282 | if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || | ||
| 283 | test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { | ||
| 284 | BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx", | ||
| 285 | orig_io_req->xid, orig_io_req->req_flags); | ||
| 286 | goto srr_compl_done; | ||
| 287 | } | ||
| 293 | orig_io_req->srr_retry++; | 288 | orig_io_req->srr_retry++; |
| 294 | if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { | 289 | if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { |
| 295 | struct bnx2fc_rport *tgt = orig_io_req->tgt; | 290 | struct bnx2fc_rport *tgt = orig_io_req->tgt; |
| @@ -311,6 +306,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) | |||
| 311 | } | 306 | } |
| 312 | goto srr_compl_done; | 307 | goto srr_compl_done; |
| 313 | } | 308 | } |
| 309 | if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || | ||
| 310 | test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { | ||
| 311 | BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx", | ||
| 312 | orig_io_req->xid, orig_io_req->req_flags); | ||
| 313 | goto srr_compl_done; | ||
| 314 | } | ||
| 314 | mp_req = &(srr_req->mp_req); | 315 | mp_req = &(srr_req->mp_req); |
| 315 | fc_hdr = &(mp_req->resp_fc_hdr); | 316 | fc_hdr = &(mp_req->resp_fc_hdr); |
| 316 | resp_len = mp_req->resp_len; | 317 | resp_len = mp_req->resp_len; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 85bcc4b55965..8c6156a10d90 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
| @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
| 22 | 22 | ||
| 23 | #define DRV_MODULE_NAME "bnx2fc" | 23 | #define DRV_MODULE_NAME "bnx2fc" |
| 24 | #define DRV_MODULE_VERSION BNX2FC_VERSION | 24 | #define DRV_MODULE_VERSION BNX2FC_VERSION |
| 25 | #define DRV_MODULE_RELDATE "Oct 02, 2011" | 25 | #define DRV_MODULE_RELDATE "Oct 21, 2011" |
| 26 | 26 | ||
| 27 | 27 | ||
| 28 | static char version[] __devinitdata = | 28 | static char version[] __devinitdata = |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0c64d184d731..84a78af83f90 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
| @@ -1103,7 +1103,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
| 1103 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 1103 | struct fc_rport_libfc_priv *rp = rport->dd_data; |
| 1104 | struct bnx2fc_cmd *io_req; | 1104 | struct bnx2fc_cmd *io_req; |
| 1105 | struct fc_lport *lport; | 1105 | struct fc_lport *lport; |
| 1106 | struct fc_rport_priv *rdata; | ||
| 1106 | struct bnx2fc_rport *tgt; | 1107 | struct bnx2fc_rport *tgt; |
| 1108 | int logo_issued; | ||
| 1109 | int wait_cnt = 0; | ||
| 1107 | int rc = FAILED; | 1110 | int rc = FAILED; |
| 1108 | 1111 | ||
| 1109 | 1112 | ||
| @@ -1192,8 +1195,40 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
| 1192 | } else { | 1195 | } else { |
| 1193 | printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " | 1196 | printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " |
| 1194 | "already in abts processing\n", io_req->xid); | 1197 | "already in abts processing\n", io_req->xid); |
| 1198 | if (cancel_delayed_work(&io_req->timeout_work)) | ||
| 1199 | kref_put(&io_req->refcount, | ||
| 1200 | bnx2fc_cmd_release); /* drop timer hold */ | ||
| 1201 | bnx2fc_initiate_cleanup(io_req); | ||
| 1202 | |||
| 1203 | spin_unlock_bh(&tgt->tgt_lock); | ||
| 1204 | |||
| 1205 | wait_for_completion(&io_req->tm_done); | ||
| 1206 | |||
| 1207 | spin_lock_bh(&tgt->tgt_lock); | ||
| 1208 | io_req->wait_for_comp = 0; | ||
| 1209 | rdata = io_req->tgt->rdata; | ||
| 1210 | logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO, | ||
| 1211 | &tgt->flags); | ||
| 1195 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 1212 | kref_put(&io_req->refcount, bnx2fc_cmd_release); |
| 1196 | spin_unlock_bh(&tgt->tgt_lock); | 1213 | spin_unlock_bh(&tgt->tgt_lock); |
| 1214 | |||
| 1215 | if (!logo_issued) { | ||
| 1216 | BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n", | ||
| 1217 | tgt->flags); | ||
| 1218 | mutex_lock(&lport->disc.disc_mutex); | ||
| 1219 | lport->tt.rport_logoff(rdata); | ||
| 1220 | mutex_unlock(&lport->disc.disc_mutex); | ||
| 1221 | do { | ||
| 1222 | msleep(BNX2FC_RELOGIN_WAIT_TIME); | ||
| 1223 | /* | ||
| 1224 | * If session not recovered, let SCSI-ml | ||
| 1225 | * escalate error recovery. | ||
| 1226 | */ | ||
| 1227 | if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) | ||
| 1228 | return FAILED; | ||
| 1229 | } while (!test_bit(BNX2FC_FLAG_SESSION_READY, | ||
| 1230 | &tgt->flags)); | ||
| 1231 | } | ||
| 1197 | return SUCCESS; | 1232 | return SUCCESS; |
| 1198 | } | 1233 | } |
| 1199 | if (rc == FAILED) { | 1234 | if (rc == FAILED) { |
| @@ -1275,6 +1310,8 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, | |||
| 1275 | io_req->refcount.refcount.counter, io_req->cmd_type); | 1310 | io_req->refcount.refcount.counter, io_req->cmd_type); |
| 1276 | bnx2fc_scsi_done(io_req, DID_ERROR); | 1311 | bnx2fc_scsi_done(io_req, DID_ERROR); |
| 1277 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 1312 | kref_put(&io_req->refcount, bnx2fc_cmd_release); |
| 1313 | if (io_req->wait_for_comp) | ||
| 1314 | complete(&io_req->tm_done); | ||
| 1278 | } | 1315 | } |
| 1279 | 1316 | ||
| 1280 | void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, | 1317 | void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, |
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 7c05fd9dccfd..339ea23a8675 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c | |||
| @@ -441,7 +441,15 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) | |||
| 441 | 441 | ||
| 442 | spin_lock_irqsave(q->queue_lock, flags); | 442 | spin_lock_irqsave(q->queue_lock, flags); |
| 443 | sdev = q->queuedata; | 443 | sdev = q->queuedata; |
| 444 | if (sdev && sdev->scsi_dh_data) | 444 | if (!sdev) { |
| 445 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 446 | err = SCSI_DH_NOSYS; | ||
| 447 | if (fn) | ||
| 448 | fn(data, err); | ||
| 449 | return err; | ||
| 450 | } | ||
| 451 | |||
| 452 | if (sdev->scsi_dh_data) | ||
| 445 | scsi_dh = sdev->scsi_dh_data->scsi_dh; | 453 | scsi_dh = sdev->scsi_dh_data->scsi_dh; |
| 446 | dev = get_device(&sdev->sdev_gendev); | 454 | dev = get_device(&sdev->sdev_gendev); |
| 447 | if (!scsi_dh || !dev || | 455 | if (!scsi_dh || !dev || |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 627f4b5e5176..fe4df2da309c 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
| @@ -507,7 +507,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) | |||
| 507 | int len, k, off, valid_states = 0; | 507 | int len, k, off, valid_states = 0; |
| 508 | unsigned char *ucp; | 508 | unsigned char *ucp; |
| 509 | unsigned err; | 509 | unsigned err; |
| 510 | unsigned long expiry, interval = 1; | 510 | unsigned long expiry, interval = 1000; |
| 511 | 511 | ||
| 512 | expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); | 512 | expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); |
| 513 | retry: | 513 | retry: |
| @@ -734,6 +734,7 @@ static int alua_bus_attach(struct scsi_device *sdev) | |||
| 734 | spin_lock_irqsave(sdev->request_queue->queue_lock, flags); | 734 | spin_lock_irqsave(sdev->request_queue->queue_lock, flags); |
| 735 | sdev->scsi_dh_data = scsi_dh_data; | 735 | sdev->scsi_dh_data = scsi_dh_data; |
| 736 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); | 736 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); |
| 737 | sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); | ||
| 737 | 738 | ||
| 738 | return 0; | 739 | return 0; |
| 739 | 740 | ||
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 61384ee4049b..cefbe44bb84a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
| @@ -2347,14 +2347,11 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
| 2347 | goto done; | 2347 | goto done; |
| 2348 | 2348 | ||
| 2349 | mac = fr_cb(fp)->granted_mac; | 2349 | mac = fr_cb(fp)->granted_mac; |
| 2350 | if (is_zero_ether_addr(mac)) { | 2350 | /* pre-FIP */ |
| 2351 | /* pre-FIP */ | 2351 | if (is_zero_ether_addr(mac)) |
| 2352 | if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { | 2352 | fcoe_ctlr_recv_flogi(fip, lport, fp); |
| 2353 | fc_frame_free(fp); | 2353 | if (!is_zero_ether_addr(mac)) |
| 2354 | return; | 2354 | fcoe_update_src_mac(lport, mac); |
| 2355 | } | ||
| 2356 | } | ||
| 2357 | fcoe_update_src_mac(lport, mac); | ||
| 2358 | done: | 2355 | done: |
| 2359 | fc_lport_flogi_resp(seq, fp, lport); | 2356 | fc_lport_flogi_resp(seq, fp, lport); |
| 2360 | } | 2357 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 4f7a5829ea4c..351dc0b86fab 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev) | |||
| 286 | { | 286 | { |
| 287 | struct Scsi_Host *shost = dev_to_shost(dev); | 287 | struct Scsi_Host *shost = dev_to_shost(dev); |
| 288 | struct device *parent = dev->parent; | 288 | struct device *parent = dev->parent; |
| 289 | struct request_queue *q; | ||
| 289 | 290 | ||
| 290 | scsi_proc_hostdir_rm(shost->hostt); | 291 | scsi_proc_hostdir_rm(shost->hostt); |
| 291 | 292 | ||
| @@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev) | |||
| 293 | kthread_stop(shost->ehandler); | 294 | kthread_stop(shost->ehandler); |
| 294 | if (shost->work_q) | 295 | if (shost->work_q) |
| 295 | destroy_workqueue(shost->work_q); | 296 | destroy_workqueue(shost->work_q); |
| 296 | if (shost->uspace_req_q) { | 297 | q = shost->uspace_req_q; |
| 297 | kfree(shost->uspace_req_q->queuedata); | 298 | if (q) { |
| 298 | scsi_free_queue(shost->uspace_req_q); | 299 | kfree(q->queuedata); |
| 300 | q->queuedata = NULL; | ||
| 301 | scsi_free_queue(q); | ||
| 299 | } | 302 | } |
| 300 | 303 | ||
| 301 | scsi_destroy_command_freelist(shost); | 304 | scsi_destroy_command_freelist(shost); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index bbdc9f960a66..e76107b2ade3 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <linux/bitmap.h> | 48 | #include <linux/bitmap.h> |
| 49 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
| 50 | #include <linux/kthread.h> | 50 | #include <linux/kthread.h> |
| 51 | #include <linux/jiffies.h> | ||
| 51 | #include "hpsa_cmd.h" | 52 | #include "hpsa_cmd.h" |
| 52 | #include "hpsa.h" | 53 | #include "hpsa.h" |
| 53 | 54 | ||
| @@ -127,6 +128,10 @@ static struct board_type products[] = { | |||
| 127 | 128 | ||
| 128 | static int number_of_controllers; | 129 | static int number_of_controllers; |
| 129 | 130 | ||
| 131 | static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); | ||
| 132 | static spinlock_t lockup_detector_lock; | ||
| 133 | static struct task_struct *hpsa_lockup_detector; | ||
| 134 | |||
| 130 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); | 135 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
| 131 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | 136 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
| 132 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); | 137 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
| @@ -484,6 +489,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
| 484 | #endif | 489 | #endif |
| 485 | .sdev_attrs = hpsa_sdev_attrs, | 490 | .sdev_attrs = hpsa_sdev_attrs, |
| 486 | .shost_attrs = hpsa_shost_attrs, | 491 | .shost_attrs = hpsa_shost_attrs, |
| 492 | .max_sectors = 8192, | ||
| 487 | }; | 493 | }; |
| 488 | 494 | ||
| 489 | 495 | ||
| @@ -566,16 +572,16 @@ static int hpsa_find_target_lun(struct ctlr_info *h, | |||
| 566 | * assumes h->devlock is held | 572 | * assumes h->devlock is held |
| 567 | */ | 573 | */ |
| 568 | int i, found = 0; | 574 | int i, found = 0; |
| 569 | DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); | 575 | DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); |
| 570 | 576 | ||
| 571 | memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); | 577 | memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3); |
| 572 | 578 | ||
| 573 | for (i = 0; i < h->ndevices; i++) { | 579 | for (i = 0; i < h->ndevices; i++) { |
| 574 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) | 580 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) |
| 575 | set_bit(h->dev[i]->target, lun_taken); | 581 | set_bit(h->dev[i]->target, lun_taken); |
| 576 | } | 582 | } |
| 577 | 583 | ||
| 578 | for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { | 584 | for (i = 0; i < HPSA_MAX_DEVICES; i++) { |
| 579 | if (!test_bit(i, lun_taken)) { | 585 | if (!test_bit(i, lun_taken)) { |
| 580 | /* *bus = 1; */ | 586 | /* *bus = 1; */ |
| 581 | *target = i; | 587 | *target = i; |
| @@ -598,7 +604,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |||
| 598 | unsigned char addr1[8], addr2[8]; | 604 | unsigned char addr1[8], addr2[8]; |
| 599 | struct hpsa_scsi_dev_t *sd; | 605 | struct hpsa_scsi_dev_t *sd; |
| 600 | 606 | ||
| 601 | if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { | 607 | if (n >= HPSA_MAX_DEVICES) { |
| 602 | dev_err(&h->pdev->dev, "too many devices, some will be " | 608 | dev_err(&h->pdev->dev, "too many devices, some will be " |
| 603 | "inaccessible.\n"); | 609 | "inaccessible.\n"); |
| 604 | return -1; | 610 | return -1; |
| @@ -673,7 +679,7 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | |||
| 673 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | 679 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
| 674 | { | 680 | { |
| 675 | /* assumes h->devlock is held */ | 681 | /* assumes h->devlock is held */ |
| 676 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | 682 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
| 677 | removed[*nremoved] = h->dev[entry]; | 683 | removed[*nremoved] = h->dev[entry]; |
| 678 | (*nremoved)++; | 684 | (*nremoved)++; |
| 679 | 685 | ||
| @@ -702,7 +708,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |||
| 702 | int i; | 708 | int i; |
| 703 | struct hpsa_scsi_dev_t *sd; | 709 | struct hpsa_scsi_dev_t *sd; |
| 704 | 710 | ||
| 705 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | 711 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
| 706 | 712 | ||
| 707 | sd = h->dev[entry]; | 713 | sd = h->dev[entry]; |
| 708 | removed[*nremoved] = h->dev[entry]; | 714 | removed[*nremoved] = h->dev[entry]; |
| @@ -814,10 +820,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
| 814 | int nadded, nremoved; | 820 | int nadded, nremoved; |
| 815 | struct Scsi_Host *sh = NULL; | 821 | struct Scsi_Host *sh = NULL; |
| 816 | 822 | ||
| 817 | added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 823 | added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| 818 | GFP_KERNEL); | 824 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| 819 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, | ||
| 820 | GFP_KERNEL); | ||
| 821 | 825 | ||
| 822 | if (!added || !removed) { | 826 | if (!added || !removed) { |
| 823 | dev_warn(&h->pdev->dev, "out of memory in " | 827 | dev_warn(&h->pdev->dev, "out of memory in " |
| @@ -1338,6 +1342,22 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |||
| 1338 | wait_for_completion(&wait); | 1342 | wait_for_completion(&wait); |
| 1339 | } | 1343 | } |
| 1340 | 1344 | ||
| 1345 | static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, | ||
| 1346 | struct CommandList *c) | ||
| 1347 | { | ||
| 1348 | unsigned long flags; | ||
| 1349 | |||
| 1350 | /* If controller lockup detected, fake a hardware error. */ | ||
| 1351 | spin_lock_irqsave(&h->lock, flags); | ||
| 1352 | if (unlikely(h->lockup_detected)) { | ||
| 1353 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 1354 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
| 1355 | } else { | ||
| 1356 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 1357 | hpsa_scsi_do_simple_cmd_core(h, c); | ||
| 1358 | } | ||
| 1359 | } | ||
| 1360 | |||
| 1341 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, | 1361 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
| 1342 | struct CommandList *c, int data_direction) | 1362 | struct CommandList *c, int data_direction) |
| 1343 | { | 1363 | { |
| @@ -1735,7 +1755,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
| 1735 | if (is_scsi_rev_5(h)) | 1755 | if (is_scsi_rev_5(h)) |
| 1736 | return 0; /* p1210m doesn't need to do this. */ | 1756 | return 0; /* p1210m doesn't need to do this. */ |
| 1737 | 1757 | ||
| 1738 | #define MAX_MSA2XXX_ENCLOSURES 32 | ||
| 1739 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | 1758 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { |
| 1740 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | 1759 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " |
| 1741 | "enclosures exceeded. Check your hardware " | 1760 | "enclosures exceeded. Check your hardware " |
| @@ -1846,8 +1865,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
| 1846 | int raid_ctlr_position; | 1865 | int raid_ctlr_position; |
| 1847 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); | 1866 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
| 1848 | 1867 | ||
| 1849 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 1868 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| 1850 | GFP_KERNEL); | ||
| 1851 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1869 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
| 1852 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1870 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
| 1853 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | 1871 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
| @@ -1870,6 +1888,13 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
| 1870 | 1888 | ||
| 1871 | /* Allocate the per device structures */ | 1889 | /* Allocate the per device structures */ |
| 1872 | for (i = 0; i < ndevs_to_allocate; i++) { | 1890 | for (i = 0; i < ndevs_to_allocate; i++) { |
| 1891 | if (i >= HPSA_MAX_DEVICES) { | ||
| 1892 | dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." | ||
| 1893 | " %d devices ignored.\n", HPSA_MAX_DEVICES, | ||
| 1894 | ndevs_to_allocate - HPSA_MAX_DEVICES); | ||
| 1895 | break; | ||
| 1896 | } | ||
| 1897 | |||
| 1873 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); | 1898 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); |
| 1874 | if (!currentsd[i]) { | 1899 | if (!currentsd[i]) { |
| 1875 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", | 1900 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", |
| @@ -1956,7 +1981,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
| 1956 | default: | 1981 | default: |
| 1957 | break; | 1982 | break; |
| 1958 | } | 1983 | } |
| 1959 | if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) | 1984 | if (ncurrent >= HPSA_MAX_DEVICES) |
| 1960 | break; | 1985 | break; |
| 1961 | } | 1986 | } |
| 1962 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | 1987 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); |
| @@ -2048,8 +2073,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, | |||
| 2048 | } | 2073 | } |
| 2049 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | 2074 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
| 2050 | 2075 | ||
| 2051 | /* Need a lock as this is being allocated from the pool */ | ||
| 2052 | spin_lock_irqsave(&h->lock, flags); | 2076 | spin_lock_irqsave(&h->lock, flags); |
| 2077 | if (unlikely(h->lockup_detected)) { | ||
| 2078 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 2079 | cmd->result = DID_ERROR << 16; | ||
| 2080 | done(cmd); | ||
| 2081 | return 0; | ||
| 2082 | } | ||
| 2083 | /* Need a lock as this is being allocated from the pool */ | ||
| 2053 | c = cmd_alloc(h); | 2084 | c = cmd_alloc(h); |
| 2054 | spin_unlock_irqrestore(&h->lock, flags); | 2085 | spin_unlock_irqrestore(&h->lock, flags); |
| 2055 | if (c == NULL) { /* trouble... */ | 2086 | if (c == NULL) { /* trouble... */ |
| @@ -2601,7 +2632,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
| 2601 | c->SG[0].Len = iocommand.buf_size; | 2632 | c->SG[0].Len = iocommand.buf_size; |
| 2602 | c->SG[0].Ext = 0; /* we are not chaining*/ | 2633 | c->SG[0].Ext = 0; /* we are not chaining*/ |
| 2603 | } | 2634 | } |
| 2604 | hpsa_scsi_do_simple_cmd_core(h, c); | 2635 | hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); |
| 2605 | if (iocommand.buf_size > 0) | 2636 | if (iocommand.buf_size > 0) |
| 2606 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | 2637 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); |
| 2607 | check_ioctl_unit_attention(h, c); | 2638 | check_ioctl_unit_attention(h, c); |
| @@ -2724,7 +2755,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
| 2724 | c->SG[i].Ext = 0; | 2755 | c->SG[i].Ext = 0; |
| 2725 | } | 2756 | } |
| 2726 | } | 2757 | } |
| 2727 | hpsa_scsi_do_simple_cmd_core(h, c); | 2758 | hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); |
| 2728 | if (sg_used) | 2759 | if (sg_used) |
| 2729 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | 2760 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); |
| 2730 | check_ioctl_unit_attention(h, c); | 2761 | check_ioctl_unit_attention(h, c); |
| @@ -2872,6 +2903,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
| 2872 | c->Request.Timeout = 0; | 2903 | c->Request.Timeout = 0; |
| 2873 | c->Request.CDB[0] = BMIC_WRITE; | 2904 | c->Request.CDB[0] = BMIC_WRITE; |
| 2874 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | 2905 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; |
| 2906 | c->Request.CDB[7] = (size >> 8) & 0xFF; | ||
| 2907 | c->Request.CDB[8] = size & 0xFF; | ||
| 2875 | break; | 2908 | break; |
| 2876 | case TEST_UNIT_READY: | 2909 | case TEST_UNIT_READY: |
| 2877 | c->Request.CDBLen = 6; | 2910 | c->Request.CDBLen = 6; |
| @@ -3091,6 +3124,7 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) | |||
| 3091 | if (interrupt_not_for_us(h)) | 3124 | if (interrupt_not_for_us(h)) |
| 3092 | return IRQ_NONE; | 3125 | return IRQ_NONE; |
| 3093 | spin_lock_irqsave(&h->lock, flags); | 3126 | spin_lock_irqsave(&h->lock, flags); |
| 3127 | h->last_intr_timestamp = get_jiffies_64(); | ||
| 3094 | while (interrupt_pending(h)) { | 3128 | while (interrupt_pending(h)) { |
| 3095 | raw_tag = get_next_completion(h); | 3129 | raw_tag = get_next_completion(h); |
| 3096 | while (raw_tag != FIFO_EMPTY) | 3130 | while (raw_tag != FIFO_EMPTY) |
| @@ -3110,6 +3144,7 @@ static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) | |||
| 3110 | return IRQ_NONE; | 3144 | return IRQ_NONE; |
| 3111 | 3145 | ||
| 3112 | spin_lock_irqsave(&h->lock, flags); | 3146 | spin_lock_irqsave(&h->lock, flags); |
| 3147 | h->last_intr_timestamp = get_jiffies_64(); | ||
| 3113 | raw_tag = get_next_completion(h); | 3148 | raw_tag = get_next_completion(h); |
| 3114 | while (raw_tag != FIFO_EMPTY) | 3149 | while (raw_tag != FIFO_EMPTY) |
| 3115 | raw_tag = next_command(h); | 3150 | raw_tag = next_command(h); |
| @@ -3126,6 +3161,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) | |||
| 3126 | if (interrupt_not_for_us(h)) | 3161 | if (interrupt_not_for_us(h)) |
| 3127 | return IRQ_NONE; | 3162 | return IRQ_NONE; |
| 3128 | spin_lock_irqsave(&h->lock, flags); | 3163 | spin_lock_irqsave(&h->lock, flags); |
| 3164 | h->last_intr_timestamp = get_jiffies_64(); | ||
| 3129 | while (interrupt_pending(h)) { | 3165 | while (interrupt_pending(h)) { |
| 3130 | raw_tag = get_next_completion(h); | 3166 | raw_tag = get_next_completion(h); |
| 3131 | while (raw_tag != FIFO_EMPTY) { | 3167 | while (raw_tag != FIFO_EMPTY) { |
| @@ -3146,6 +3182,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) | |||
| 3146 | u32 raw_tag; | 3182 | u32 raw_tag; |
| 3147 | 3183 | ||
| 3148 | spin_lock_irqsave(&h->lock, flags); | 3184 | spin_lock_irqsave(&h->lock, flags); |
| 3185 | h->last_intr_timestamp = get_jiffies_64(); | ||
| 3149 | raw_tag = get_next_completion(h); | 3186 | raw_tag = get_next_completion(h); |
| 3150 | while (raw_tag != FIFO_EMPTY) { | 3187 | while (raw_tag != FIFO_EMPTY) { |
| 3151 | if (hpsa_tag_contains_index(raw_tag)) | 3188 | if (hpsa_tag_contains_index(raw_tag)) |
| @@ -4090,6 +4127,149 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
| 4090 | kfree(h); | 4127 | kfree(h); |
| 4091 | } | 4128 | } |
| 4092 | 4129 | ||
| 4130 | static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) | ||
| 4131 | { | ||
| 4132 | assert_spin_locked(&lockup_detector_lock); | ||
| 4133 | if (!hpsa_lockup_detector) | ||
| 4134 | return; | ||
| 4135 | if (h->lockup_detected) | ||
| 4136 | return; /* already stopped the lockup detector */ | ||
| 4137 | list_del(&h->lockup_list); | ||
| 4138 | } | ||
| 4139 | |||
| 4140 | /* Called when controller lockup detected. */ | ||
| 4141 | static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) | ||
| 4142 | { | ||
| 4143 | struct CommandList *c = NULL; | ||
| 4144 | |||
| 4145 | assert_spin_locked(&h->lock); | ||
| 4146 | /* Mark all outstanding commands as failed and complete them. */ | ||
| 4147 | while (!list_empty(list)) { | ||
| 4148 | c = list_entry(list->next, struct CommandList, list); | ||
| 4149 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
| 4150 | finish_cmd(c, c->Header.Tag.lower); | ||
| 4151 | } | ||
| 4152 | } | ||
| 4153 | |||
| 4154 | static void controller_lockup_detected(struct ctlr_info *h) | ||
| 4155 | { | ||
| 4156 | unsigned long flags; | ||
| 4157 | |||
| 4158 | assert_spin_locked(&lockup_detector_lock); | ||
| 4159 | remove_ctlr_from_lockup_detector_list(h); | ||
| 4160 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | ||
| 4161 | spin_lock_irqsave(&h->lock, flags); | ||
| 4162 | h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); | ||
| 4163 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 4164 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", | ||
| 4165 | h->lockup_detected); | ||
| 4166 | pci_disable_device(h->pdev); | ||
| 4167 | spin_lock_irqsave(&h->lock, flags); | ||
| 4168 | fail_all_cmds_on_list(h, &h->cmpQ); | ||
| 4169 | fail_all_cmds_on_list(h, &h->reqQ); | ||
| 4170 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 4171 | } | ||
| 4172 | |||
| 4173 | #define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ) | ||
| 4174 | #define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2) | ||
| 4175 | |||
| 4176 | static void detect_controller_lockup(struct ctlr_info *h) | ||
| 4177 | { | ||
| 4178 | u64 now; | ||
| 4179 | u32 heartbeat; | ||
| 4180 | unsigned long flags; | ||
| 4181 | |||
| 4182 | assert_spin_locked(&lockup_detector_lock); | ||
| 4183 | now = get_jiffies_64(); | ||
| 4184 | /* If we've received an interrupt recently, we're ok. */ | ||
| 4185 | if (time_after64(h->last_intr_timestamp + | ||
| 4186 | (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) | ||
| 4187 | return; | ||
| 4188 | |||
| 4189 | /* | ||
| 4190 | * If we've already checked the heartbeat recently, we're ok. | ||
| 4191 | * This could happen if someone sends us a signal. We | ||
| 4192 | * otherwise don't care about signals in this thread. | ||
| 4193 | */ | ||
| 4194 | if (time_after64(h->last_heartbeat_timestamp + | ||
| 4195 | (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) | ||
| 4196 | return; | ||
| 4197 | |||
| 4198 | /* If heartbeat has not changed since we last looked, we're not ok. */ | ||
| 4199 | spin_lock_irqsave(&h->lock, flags); | ||
| 4200 | heartbeat = readl(&h->cfgtable->HeartBeat); | ||
| 4201 | spin_unlock_irqrestore(&h->lock, flags); | ||
| 4202 | if (h->last_heartbeat == heartbeat) { | ||
| 4203 | controller_lockup_detected(h); | ||
| 4204 | return; | ||
| 4205 | } | ||
| 4206 | |||
| 4207 | /* We're ok. */ | ||
| 4208 | h->last_heartbeat = heartbeat; | ||
| 4209 | h->last_heartbeat_timestamp = now; | ||
| 4210 | } | ||
| 4211 | |||
| 4212 | static int detect_controller_lockup_thread(void *notused) | ||
| 4213 | { | ||
| 4214 | struct ctlr_info *h; | ||
| 4215 | unsigned long flags; | ||
| 4216 | |||
| 4217 | while (1) { | ||
| 4218 | struct list_head *this, *tmp; | ||
| 4219 | |||
| 4220 | schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); | ||
| 4221 | if (kthread_should_stop()) | ||
| 4222 | break; | ||
| 4223 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
| 4224 | list_for_each_safe(this, tmp, &hpsa_ctlr_list) { | ||
| 4225 | h = list_entry(this, struct ctlr_info, lockup_list); | ||
| 4226 | detect_controller_lockup(h); | ||
| 4227 | } | ||
| 4228 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
| 4229 | } | ||
| 4230 | return 0; | ||
| 4231 | } | ||
| 4232 | |||
| 4233 | static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) | ||
| 4234 | { | ||
| 4235 | unsigned long flags; | ||
| 4236 | |||
| 4237 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
| 4238 | list_add_tail(&h->lockup_list, &hpsa_ctlr_list); | ||
| 4239 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
| 4240 | } | ||
| 4241 | |||
| 4242 | static void start_controller_lockup_detector(struct ctlr_info *h) | ||
| 4243 | { | ||
| 4244 | /* Start the lockup detector thread if not already started */ | ||
| 4245 | if (!hpsa_lockup_detector) { | ||
| 4246 | spin_lock_init(&lockup_detector_lock); | ||
| 4247 | hpsa_lockup_detector = | ||
| 4248 | kthread_run(detect_controller_lockup_thread, | ||
| 4249 | NULL, "hpsa"); | ||
| 4250 | } | ||
| 4251 | if (!hpsa_lockup_detector) { | ||
| 4252 | dev_warn(&h->pdev->dev, | ||
| 4253 | "Could not start lockup detector thread\n"); | ||
| 4254 | return; | ||
| 4255 | } | ||
| 4256 | add_ctlr_to_lockup_detector_list(h); | ||
| 4257 | } | ||
| 4258 | |||
| 4259 | static void stop_controller_lockup_detector(struct ctlr_info *h) | ||
| 4260 | { | ||
| 4261 | unsigned long flags; | ||
| 4262 | |||
| 4263 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
| 4264 | remove_ctlr_from_lockup_detector_list(h); | ||
| 4265 | /* If the list of ctlr's to monitor is empty, stop the thread */ | ||
| 4266 | if (list_empty(&hpsa_ctlr_list)) { | ||
| 4267 | kthread_stop(hpsa_lockup_detector); | ||
| 4268 | hpsa_lockup_detector = NULL; | ||
| 4269 | } | ||
| 4270 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
| 4271 | } | ||
| 4272 | |||
| 4093 | static int __devinit hpsa_init_one(struct pci_dev *pdev, | 4273 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
| 4094 | const struct pci_device_id *ent) | 4274 | const struct pci_device_id *ent) |
| 4095 | { | 4275 | { |
| @@ -4127,7 +4307,6 @@ reinit_after_soft_reset: | |||
| 4127 | return -ENOMEM; | 4307 | return -ENOMEM; |
| 4128 | 4308 | ||
| 4129 | h->pdev = pdev; | 4309 | h->pdev = pdev; |
| 4130 | h->busy_initializing = 1; | ||
| 4131 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; | 4310 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
| 4132 | INIT_LIST_HEAD(&h->cmpQ); | 4311 | INIT_LIST_HEAD(&h->cmpQ); |
| 4133 | INIT_LIST_HEAD(&h->reqQ); | 4312 | INIT_LIST_HEAD(&h->reqQ); |
| @@ -4236,7 +4415,7 @@ reinit_after_soft_reset: | |||
| 4236 | 4415 | ||
| 4237 | hpsa_hba_inquiry(h); | 4416 | hpsa_hba_inquiry(h); |
| 4238 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ | 4417 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
| 4239 | h->busy_initializing = 0; | 4418 | start_controller_lockup_detector(h); |
| 4240 | return 1; | 4419 | return 1; |
| 4241 | 4420 | ||
| 4242 | clean4: | 4421 | clean4: |
| @@ -4245,7 +4424,6 @@ clean4: | |||
| 4245 | free_irq(h->intr[h->intr_mode], h); | 4424 | free_irq(h->intr[h->intr_mode], h); |
| 4246 | clean2: | 4425 | clean2: |
| 4247 | clean1: | 4426 | clean1: |
| 4248 | h->busy_initializing = 0; | ||
| 4249 | kfree(h); | 4427 | kfree(h); |
| 4250 | return rc; | 4428 | return rc; |
| 4251 | } | 4429 | } |
| @@ -4300,10 +4478,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |||
| 4300 | struct ctlr_info *h; | 4478 | struct ctlr_info *h; |
| 4301 | 4479 | ||
| 4302 | if (pci_get_drvdata(pdev) == NULL) { | 4480 | if (pci_get_drvdata(pdev) == NULL) { |
| 4303 | dev_err(&pdev->dev, "unable to remove device \n"); | 4481 | dev_err(&pdev->dev, "unable to remove device\n"); |
| 4304 | return; | 4482 | return; |
| 4305 | } | 4483 | } |
| 4306 | h = pci_get_drvdata(pdev); | 4484 | h = pci_get_drvdata(pdev); |
| 4485 | stop_controller_lockup_detector(h); | ||
| 4307 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ | 4486 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
| 4308 | hpsa_shutdown(pdev); | 4487 | hpsa_shutdown(pdev); |
| 4309 | iounmap(h->vaddr); | 4488 | iounmap(h->vaddr); |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 7f53ceaa7239..91edafb8c7e6 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
| @@ -95,8 +95,6 @@ struct ctlr_info { | |||
| 95 | unsigned long *cmd_pool_bits; | 95 | unsigned long *cmd_pool_bits; |
| 96 | int nr_allocs; | 96 | int nr_allocs; |
| 97 | int nr_frees; | 97 | int nr_frees; |
| 98 | int busy_initializing; | ||
| 99 | int busy_scanning; | ||
| 100 | int scan_finished; | 98 | int scan_finished; |
| 101 | spinlock_t scan_lock; | 99 | spinlock_t scan_lock; |
| 102 | wait_queue_head_t scan_wait_queue; | 100 | wait_queue_head_t scan_wait_queue; |
| @@ -104,8 +102,7 @@ struct ctlr_info { | |||
| 104 | struct Scsi_Host *scsi_host; | 102 | struct Scsi_Host *scsi_host; |
| 105 | spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ | 103 | spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ |
| 106 | int ndevices; /* number of used elements in .dev[] array. */ | 104 | int ndevices; /* number of used elements in .dev[] array. */ |
| 107 | #define HPSA_MAX_SCSI_DEVS_PER_HBA 256 | 105 | struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; |
| 108 | struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA]; | ||
| 109 | /* | 106 | /* |
| 110 | * Performant mode tables. | 107 | * Performant mode tables. |
| 111 | */ | 108 | */ |
| @@ -124,6 +121,11 @@ struct ctlr_info { | |||
| 124 | unsigned char reply_pool_wraparound; | 121 | unsigned char reply_pool_wraparound; |
| 125 | u32 *blockFetchTable; | 122 | u32 *blockFetchTable; |
| 126 | unsigned char *hba_inquiry_data; | 123 | unsigned char *hba_inquiry_data; |
| 124 | u64 last_intr_timestamp; | ||
| 125 | u32 last_heartbeat; | ||
| 126 | u64 last_heartbeat_timestamp; | ||
| 127 | u32 lockup_detected; | ||
| 128 | struct list_head lockup_list; | ||
| 127 | }; | 129 | }; |
| 128 | #define HPSA_ABORT_MSG 0 | 130 | #define HPSA_ABORT_MSG 0 |
| 129 | #define HPSA_DEVICE_RESET_MSG 1 | 131 | #define HPSA_DEVICE_RESET_MSG 1 |
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 55d741b019db..3fd4715935c2 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
| @@ -123,8 +123,11 @@ union u64bit { | |||
| 123 | 123 | ||
| 124 | /* FIXME this is a per controller value (barf!) */ | 124 | /* FIXME this is a per controller value (barf!) */ |
| 125 | #define HPSA_MAX_TARGETS_PER_CTLR 16 | 125 | #define HPSA_MAX_TARGETS_PER_CTLR 16 |
| 126 | #define HPSA_MAX_LUN 256 | 126 | #define HPSA_MAX_LUN 1024 |
| 127 | #define HPSA_MAX_PHYS_LUN 1024 | 127 | #define HPSA_MAX_PHYS_LUN 1024 |
| 128 | #define MAX_MSA2XXX_ENCLOSURES 32 | ||
| 129 | #define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \ | ||
| 130 | MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */ | ||
| 128 | 131 | ||
| 129 | /* SCSI-3 Commands */ | 132 | /* SCSI-3 Commands */ |
| 130 | #pragma pack(1) | 133 | #pragma pack(1) |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 73e24b48dced..fd860d952b28 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -9123,6 +9123,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { | |||
| 9123 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, | 9123 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, |
| 9124 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, | 9124 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, |
| 9125 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, | 9125 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, |
| 9126 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, | ||
| 9127 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, | ||
| 9126 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, | 9128 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, |
| 9127 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, | 9129 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, |
| 9128 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, | 9130 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 6d257e0dd6a5..ac84736c1b9c 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -82,6 +82,7 @@ | |||
| 82 | 82 | ||
| 83 | #define IPR_SUBS_DEV_ID_57B4 0x033B | 83 | #define IPR_SUBS_DEV_ID_57B4 0x033B |
| 84 | #define IPR_SUBS_DEV_ID_57B2 0x035F | 84 | #define IPR_SUBS_DEV_ID_57B2 0x035F |
| 85 | #define IPR_SUBS_DEV_ID_57C3 0x0353 | ||
| 85 | #define IPR_SUBS_DEV_ID_57C4 0x0354 | 86 | #define IPR_SUBS_DEV_ID_57C4 0x0354 |
| 86 | #define IPR_SUBS_DEV_ID_57C6 0x0357 | 87 | #define IPR_SUBS_DEV_ID_57C6 0x0357 |
| 87 | #define IPR_SUBS_DEV_ID_57CC 0x035C | 88 | #define IPR_SUBS_DEV_ID_57CC 0x035C |
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index f07f30fada1b..e7fe9c4c85b8 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
| @@ -1350,7 +1350,7 @@ static void isci_user_parameters_get(struct sci_user_parameters *u) | |||
| 1350 | u->stp_max_occupancy_timeout = stp_max_occ_to; | 1350 | u->stp_max_occupancy_timeout = stp_max_occ_to; |
| 1351 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; | 1351 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; |
| 1352 | u->no_outbound_task_timeout = no_outbound_task_to; | 1352 | u->no_outbound_task_timeout = no_outbound_task_to; |
| 1353 | u->max_number_concurrent_device_spin_up = max_concurr_spinup; | 1353 | u->max_concurr_spinup = max_concurr_spinup; |
| 1354 | } | 1354 | } |
| 1355 | 1355 | ||
| 1356 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) | 1356 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) |
| @@ -1661,7 +1661,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost | |||
| 1661 | ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | 1661 | ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; |
| 1662 | 1662 | ||
| 1663 | /* Default to APC mode. */ | 1663 | /* Default to APC mode. */ |
| 1664 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; | 1664 | ihost->oem_parameters.controller.max_concurr_spin_up = 1; |
| 1665 | 1665 | ||
| 1666 | /* Default to no SSC operation. */ | 1666 | /* Default to no SSC operation. */ |
| 1667 | ihost->oem_parameters.controller.do_enable_ssc = false; | 1667 | ihost->oem_parameters.controller.do_enable_ssc = false; |
| @@ -1787,7 +1787,8 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem) | |||
| 1787 | } else | 1787 | } else |
| 1788 | return -EINVAL; | 1788 | return -EINVAL; |
| 1789 | 1789 | ||
| 1790 | if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) | 1790 | if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || |
| 1791 | oem->controller.max_concurr_spin_up < 1) | ||
| 1791 | return -EINVAL; | 1792 | return -EINVAL; |
| 1792 | 1793 | ||
| 1793 | return 0; | 1794 | return 0; |
| @@ -1810,6 +1811,16 @@ static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) | |||
| 1810 | return SCI_FAILURE_INVALID_STATE; | 1811 | return SCI_FAILURE_INVALID_STATE; |
| 1811 | } | 1812 | } |
| 1812 | 1813 | ||
| 1814 | static u8 max_spin_up(struct isci_host *ihost) | ||
| 1815 | { | ||
| 1816 | if (ihost->user_parameters.max_concurr_spinup) | ||
| 1817 | return min_t(u8, ihost->user_parameters.max_concurr_spinup, | ||
| 1818 | MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); | ||
| 1819 | else | ||
| 1820 | return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, | ||
| 1821 | MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); | ||
| 1822 | } | ||
| 1823 | |||
| 1813 | static void power_control_timeout(unsigned long data) | 1824 | static void power_control_timeout(unsigned long data) |
| 1814 | { | 1825 | { |
| 1815 | struct sci_timer *tmr = (struct sci_timer *)data; | 1826 | struct sci_timer *tmr = (struct sci_timer *)data; |
| @@ -1839,8 +1850,7 @@ static void power_control_timeout(unsigned long data) | |||
| 1839 | if (iphy == NULL) | 1850 | if (iphy == NULL) |
| 1840 | continue; | 1851 | continue; |
| 1841 | 1852 | ||
| 1842 | if (ihost->power_control.phys_granted_power >= | 1853 | if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) |
| 1843 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) | ||
| 1844 | break; | 1854 | break; |
| 1845 | 1855 | ||
| 1846 | ihost->power_control.requesters[i] = NULL; | 1856 | ihost->power_control.requesters[i] = NULL; |
| @@ -1865,8 +1875,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, | |||
| 1865 | { | 1875 | { |
| 1866 | BUG_ON(iphy == NULL); | 1876 | BUG_ON(iphy == NULL); |
| 1867 | 1877 | ||
| 1868 | if (ihost->power_control.phys_granted_power < | 1878 | if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { |
| 1869 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { | ||
| 1870 | ihost->power_control.phys_granted_power++; | 1879 | ihost->power_control.phys_granted_power++; |
| 1871 | sci_phy_consume_power_handler(iphy); | 1880 | sci_phy_consume_power_handler(iphy); |
| 1872 | 1881 | ||
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 43fe840fbe9c..a97edabcb85a 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
| @@ -118,7 +118,7 @@ unsigned char phy_gen = 3; | |||
| 118 | module_param(phy_gen, byte, 0); | 118 | module_param(phy_gen, byte, 0); |
| 119 | MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); | 119 | MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); |
| 120 | 120 | ||
| 121 | unsigned char max_concurr_spinup = 1; | 121 | unsigned char max_concurr_spinup; |
| 122 | module_param(max_concurr_spinup, byte, 0); | 122 | module_param(max_concurr_spinup, byte, 0); |
| 123 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); | 123 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); |
| 124 | 124 | ||
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 8e59c8865dcd..ac7f27749f97 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
| @@ -145,48 +145,15 @@ static void sci_port_bcn_enable(struct isci_port *iport) | |||
| 145 | } | 145 | } |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | /* called under sci_lock to stabilize phy:port associations */ | ||
| 149 | void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) | ||
| 150 | { | ||
| 151 | int i; | ||
| 152 | |||
| 153 | clear_bit(IPORT_BCN_BLOCKED, &iport->flags); | ||
| 154 | wake_up(&ihost->eventq); | ||
| 155 | |||
| 156 | if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags)) | ||
| 157 | return; | ||
| 158 | |||
| 159 | for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { | ||
| 160 | struct isci_phy *iphy = iport->phy_table[i]; | ||
| 161 | |||
| 162 | if (!iphy) | ||
| 163 | continue; | ||
| 164 | |||
| 165 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, | ||
| 166 | PORTE_BROADCAST_RCVD); | ||
| 167 | break; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | static void isci_port_bc_change_received(struct isci_host *ihost, | 148 | static void isci_port_bc_change_received(struct isci_host *ihost, |
| 172 | struct isci_port *iport, | 149 | struct isci_port *iport, |
| 173 | struct isci_phy *iphy) | 150 | struct isci_phy *iphy) |
| 174 | { | 151 | { |
| 175 | if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) { | 152 | dev_dbg(&ihost->pdev->dev, |
| 176 | dev_dbg(&ihost->pdev->dev, | 153 | "%s: isci_phy = %p, sas_phy = %p\n", |
| 177 | "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n", | 154 | __func__, iphy, &iphy->sas_phy); |
| 178 | __func__, iphy, &iphy->sas_phy); | ||
| 179 | set_bit(IPORT_BCN_PENDING, &iport->flags); | ||
| 180 | atomic_inc(&iport->event); | ||
| 181 | wake_up(&ihost->eventq); | ||
| 182 | } else { | ||
| 183 | dev_dbg(&ihost->pdev->dev, | ||
| 184 | "%s: isci_phy = %p, sas_phy = %p\n", | ||
| 185 | __func__, iphy, &iphy->sas_phy); | ||
| 186 | 155 | ||
| 187 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, | 156 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); |
| 188 | PORTE_BROADCAST_RCVD); | ||
| 189 | } | ||
| 190 | sci_port_bcn_enable(iport); | 157 | sci_port_bcn_enable(iport); |
| 191 | } | 158 | } |
| 192 | 159 | ||
| @@ -278,9 +245,6 @@ static void isci_port_link_down(struct isci_host *isci_host, | |||
| 278 | /* check to see if this is the last phy on this port. */ | 245 | /* check to see if this is the last phy on this port. */ |
| 279 | if (isci_phy->sas_phy.port && | 246 | if (isci_phy->sas_phy.port && |
| 280 | isci_phy->sas_phy.port->num_phys == 1) { | 247 | isci_phy->sas_phy.port->num_phys == 1) { |
| 281 | atomic_inc(&isci_port->event); | ||
| 282 | isci_port_bcn_enable(isci_host, isci_port); | ||
| 283 | |||
| 284 | /* change the state for all devices on this port. The | 248 | /* change the state for all devices on this port. The |
| 285 | * next task sent to this device will be returned as | 249 | * next task sent to this device will be returned as |
| 286 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will | 250 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will |
| @@ -350,6 +314,34 @@ static void isci_port_stop_complete(struct isci_host *ihost, | |||
| 350 | dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); | 314 | dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); |
| 351 | } | 315 | } |
| 352 | 316 | ||
| 317 | |||
| 318 | static bool is_port_ready_state(enum sci_port_states state) | ||
| 319 | { | ||
| 320 | switch (state) { | ||
| 321 | case SCI_PORT_READY: | ||
| 322 | case SCI_PORT_SUB_WAITING: | ||
| 323 | case SCI_PORT_SUB_OPERATIONAL: | ||
| 324 | case SCI_PORT_SUB_CONFIGURING: | ||
| 325 | return true; | ||
| 326 | default: | ||
| 327 | return false; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | |||
| 331 | /* flag dummy rnc hanling when exiting a ready state */ | ||
| 332 | static void port_state_machine_change(struct isci_port *iport, | ||
| 333 | enum sci_port_states state) | ||
| 334 | { | ||
| 335 | struct sci_base_state_machine *sm = &iport->sm; | ||
| 336 | enum sci_port_states old_state = sm->current_state_id; | ||
| 337 | |||
| 338 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) | ||
| 339 | iport->ready_exit = true; | ||
| 340 | |||
| 341 | sci_change_state(sm, state); | ||
| 342 | iport->ready_exit = false; | ||
| 343 | } | ||
| 344 | |||
| 353 | /** | 345 | /** |
| 354 | * isci_port_hard_reset_complete() - This function is called by the sci core | 346 | * isci_port_hard_reset_complete() - This function is called by the sci core |
| 355 | * when the hard reset complete notification has been received. | 347 | * when the hard reset complete notification has been received. |
| @@ -368,6 +360,26 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port, | |||
| 368 | /* Save the status of the hard reset from the port. */ | 360 | /* Save the status of the hard reset from the port. */ |
| 369 | isci_port->hard_reset_status = completion_status; | 361 | isci_port->hard_reset_status = completion_status; |
| 370 | 362 | ||
| 363 | if (completion_status != SCI_SUCCESS) { | ||
| 364 | |||
| 365 | /* The reset failed. The port state is now SCI_PORT_FAILED. */ | ||
| 366 | if (isci_port->active_phy_mask == 0) { | ||
| 367 | |||
| 368 | /* Generate the link down now to the host, since it | ||
| 369 | * was intercepted by the hard reset state machine when | ||
| 370 | * it really happened. | ||
| 371 | */ | ||
| 372 | isci_port_link_down(isci_port->isci_host, | ||
| 373 | &isci_port->isci_host->phys[ | ||
| 374 | isci_port->last_active_phy], | ||
| 375 | isci_port); | ||
| 376 | } | ||
| 377 | /* Advance the port state so that link state changes will be | ||
| 378 | * noticed. | ||
| 379 | */ | ||
| 380 | port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); | ||
| 381 | |||
| 382 | } | ||
| 371 | complete_all(&isci_port->hard_reset_complete); | 383 | complete_all(&isci_port->hard_reset_complete); |
| 372 | } | 384 | } |
| 373 | 385 | ||
| @@ -657,6 +669,8 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, | |||
| 657 | struct isci_host *ihost = iport->owning_controller; | 669 | struct isci_host *ihost = iport->owning_controller; |
| 658 | 670 | ||
| 659 | iport->active_phy_mask &= ~(1 << iphy->phy_index); | 671 | iport->active_phy_mask &= ~(1 << iphy->phy_index); |
| 672 | if (!iport->active_phy_mask) | ||
| 673 | iport->last_active_phy = iphy->phy_index; | ||
| 660 | 674 | ||
| 661 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; | 675 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; |
| 662 | 676 | ||
| @@ -683,33 +697,6 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i | |||
| 683 | } | 697 | } |
| 684 | } | 698 | } |
| 685 | 699 | ||
| 686 | static bool is_port_ready_state(enum sci_port_states state) | ||
| 687 | { | ||
| 688 | switch (state) { | ||
| 689 | case SCI_PORT_READY: | ||
| 690 | case SCI_PORT_SUB_WAITING: | ||
| 691 | case SCI_PORT_SUB_OPERATIONAL: | ||
| 692 | case SCI_PORT_SUB_CONFIGURING: | ||
| 693 | return true; | ||
| 694 | default: | ||
| 695 | return false; | ||
| 696 | } | ||
| 697 | } | ||
| 698 | |||
| 699 | /* flag dummy rnc hanling when exiting a ready state */ | ||
| 700 | static void port_state_machine_change(struct isci_port *iport, | ||
| 701 | enum sci_port_states state) | ||
| 702 | { | ||
| 703 | struct sci_base_state_machine *sm = &iport->sm; | ||
| 704 | enum sci_port_states old_state = sm->current_state_id; | ||
| 705 | |||
| 706 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) | ||
| 707 | iport->ready_exit = true; | ||
| 708 | |||
| 709 | sci_change_state(sm, state); | ||
| 710 | iport->ready_exit = false; | ||
| 711 | } | ||
| 712 | |||
| 713 | /** | 700 | /** |
| 714 | * sci_port_general_link_up_handler - phy can be assigned to port? | 701 | * sci_port_general_link_up_handler - phy can be assigned to port? |
| 715 | * @sci_port: sci_port object for which has a phy that has gone link up. | 702 | * @sci_port: sci_port object for which has a phy that has gone link up. |
| @@ -1622,7 +1609,8 @@ void sci_port_construct(struct isci_port *iport, u8 index, | |||
| 1622 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; | 1609 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; |
| 1623 | iport->physical_port_index = index; | 1610 | iport->physical_port_index = index; |
| 1624 | iport->active_phy_mask = 0; | 1611 | iport->active_phy_mask = 0; |
| 1625 | iport->ready_exit = false; | 1612 | iport->last_active_phy = 0; |
| 1613 | iport->ready_exit = false; | ||
| 1626 | 1614 | ||
| 1627 | iport->owning_controller = ihost; | 1615 | iport->owning_controller = ihost; |
| 1628 | 1616 | ||
| @@ -1648,7 +1636,6 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index) | |||
| 1648 | init_completion(&iport->start_complete); | 1636 | init_completion(&iport->start_complete); |
| 1649 | iport->isci_host = ihost; | 1637 | iport->isci_host = ihost; |
| 1650 | isci_port_change_state(iport, isci_freed); | 1638 | isci_port_change_state(iport, isci_freed); |
| 1651 | atomic_set(&iport->event, 0); | ||
| 1652 | } | 1639 | } |
| 1653 | 1640 | ||
| 1654 | /** | 1641 | /** |
| @@ -1676,7 +1663,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
| 1676 | { | 1663 | { |
| 1677 | unsigned long flags; | 1664 | unsigned long flags; |
| 1678 | enum sci_status status; | 1665 | enum sci_status status; |
| 1679 | int idx, ret = TMF_RESP_FUNC_COMPLETE; | 1666 | int ret = TMF_RESP_FUNC_COMPLETE; |
| 1680 | 1667 | ||
| 1681 | dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", | 1668 | dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", |
| 1682 | __func__, iport); | 1669 | __func__, iport); |
| @@ -1697,8 +1684,13 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
| 1697 | "%s: iport = %p; hard reset completion\n", | 1684 | "%s: iport = %p; hard reset completion\n", |
| 1698 | __func__, iport); | 1685 | __func__, iport); |
| 1699 | 1686 | ||
| 1700 | if (iport->hard_reset_status != SCI_SUCCESS) | 1687 | if (iport->hard_reset_status != SCI_SUCCESS) { |
| 1701 | ret = TMF_RESP_FUNC_FAILED; | 1688 | ret = TMF_RESP_FUNC_FAILED; |
| 1689 | |||
| 1690 | dev_err(&ihost->pdev->dev, | ||
| 1691 | "%s: iport = %p; hard reset failed (0x%x)\n", | ||
| 1692 | __func__, iport, iport->hard_reset_status); | ||
| 1693 | } | ||
| 1702 | } else { | 1694 | } else { |
| 1703 | ret = TMF_RESP_FUNC_FAILED; | 1695 | ret = TMF_RESP_FUNC_FAILED; |
| 1704 | 1696 | ||
| @@ -1718,18 +1710,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
| 1718 | "%s: iport = %p; hard reset failed " | 1710 | "%s: iport = %p; hard reset failed " |
| 1719 | "(0x%x) - driving explicit link fail for all phys\n", | 1711 | "(0x%x) - driving explicit link fail for all phys\n", |
| 1720 | __func__, iport, iport->hard_reset_status); | 1712 | __func__, iport, iport->hard_reset_status); |
| 1721 | |||
| 1722 | /* Down all phys in the port. */ | ||
| 1723 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
| 1724 | for (idx = 0; idx < SCI_MAX_PHYS; ++idx) { | ||
| 1725 | struct isci_phy *iphy = iport->phy_table[idx]; | ||
| 1726 | |||
| 1727 | if (!iphy) | ||
| 1728 | continue; | ||
| 1729 | sci_phy_stop(iphy); | ||
| 1730 | sci_phy_start(iphy); | ||
| 1731 | } | ||
| 1732 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
| 1733 | } | 1713 | } |
| 1734 | return ret; | 1714 | return ret; |
| 1735 | } | 1715 | } |
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index b50ecd4e8f9c..cb5ffbc38603 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
| @@ -77,7 +77,6 @@ enum isci_status { | |||
| 77 | 77 | ||
| 78 | /** | 78 | /** |
| 79 | * struct isci_port - isci direct attached sas port object | 79 | * struct isci_port - isci direct attached sas port object |
| 80 | * @event: counts bcns and port stop events (for bcn filtering) | ||
| 81 | * @ready_exit: several states constitute 'ready'. When exiting ready we | 80 | * @ready_exit: several states constitute 'ready'. When exiting ready we |
| 82 | * need to take extra port-teardown actions that are | 81 | * need to take extra port-teardown actions that are |
| 83 | * skipped when exiting to another 'ready' state. | 82 | * skipped when exiting to another 'ready' state. |
| @@ -92,10 +91,6 @@ enum isci_status { | |||
| 92 | */ | 91 | */ |
| 93 | struct isci_port { | 92 | struct isci_port { |
| 94 | enum isci_status status; | 93 | enum isci_status status; |
| 95 | #define IPORT_BCN_BLOCKED 0 | ||
| 96 | #define IPORT_BCN_PENDING 1 | ||
| 97 | unsigned long flags; | ||
| 98 | atomic_t event; | ||
| 99 | struct isci_host *isci_host; | 94 | struct isci_host *isci_host; |
| 100 | struct asd_sas_port sas_port; | 95 | struct asd_sas_port sas_port; |
| 101 | struct list_head remote_dev_list; | 96 | struct list_head remote_dev_list; |
| @@ -109,6 +104,7 @@ struct isci_port { | |||
| 109 | u8 logical_port_index; | 104 | u8 logical_port_index; |
| 110 | u8 physical_port_index; | 105 | u8 physical_port_index; |
| 111 | u8 active_phy_mask; | 106 | u8 active_phy_mask; |
| 107 | u8 last_active_phy; | ||
| 112 | u16 reserved_rni; | 108 | u16 reserved_rni; |
| 113 | u16 reserved_tag; | 109 | u16 reserved_tag; |
| 114 | u32 started_request_count; | 110 | u32 started_request_count; |
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index dc007e692f4e..2c75248ca326 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h | |||
| @@ -112,7 +112,7 @@ struct sci_user_parameters { | |||
| 112 | * This field specifies the maximum number of direct attached devices | 112 | * This field specifies the maximum number of direct attached devices |
| 113 | * that can have power supplied to them simultaneously. | 113 | * that can have power supplied to them simultaneously. |
| 114 | */ | 114 | */ |
| 115 | u8 max_number_concurrent_device_spin_up; | 115 | u8 max_concurr_spinup; |
| 116 | 116 | ||
| 117 | /** | 117 | /** |
| 118 | * This field specifies the number of seconds to allow a phy to consume | 118 | * This field specifies the number of seconds to allow a phy to consume |
| @@ -219,7 +219,7 @@ struct sci_bios_oem_param_block_hdr { | |||
| 219 | struct sci_oem_params { | 219 | struct sci_oem_params { |
| 220 | struct { | 220 | struct { |
| 221 | uint8_t mode_type; | 221 | uint8_t mode_type; |
| 222 | uint8_t max_concurrent_dev_spin_up; | 222 | uint8_t max_concurr_spin_up; |
| 223 | uint8_t do_enable_ssc; | 223 | uint8_t do_enable_ssc; |
| 224 | uint8_t reserved; | 224 | uint8_t reserved; |
| 225 | } controller; | 225 | } controller; |
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index fbf9ce28c3f5..b207cd3b15a0 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
| @@ -1438,88 +1438,3 @@ int isci_remote_device_found(struct domain_device *domain_dev) | |||
| 1438 | 1438 | ||
| 1439 | return status == SCI_SUCCESS ? 0 : -ENODEV; | 1439 | return status == SCI_SUCCESS ? 0 : -ENODEV; |
| 1440 | } | 1440 | } |
| 1441 | /** | ||
| 1442 | * isci_device_is_reset_pending() - This function will check if there is any | ||
| 1443 | * pending reset condition on the device. | ||
| 1444 | * @request: This parameter is the isci_device object. | ||
| 1445 | * | ||
| 1446 | * true if there is a reset pending for the device. | ||
| 1447 | */ | ||
| 1448 | bool isci_device_is_reset_pending( | ||
| 1449 | struct isci_host *isci_host, | ||
| 1450 | struct isci_remote_device *isci_device) | ||
| 1451 | { | ||
| 1452 | struct isci_request *isci_request; | ||
| 1453 | struct isci_request *tmp_req; | ||
| 1454 | bool reset_is_pending = false; | ||
| 1455 | unsigned long flags; | ||
| 1456 | |||
| 1457 | dev_dbg(&isci_host->pdev->dev, | ||
| 1458 | "%s: isci_device = %p\n", __func__, isci_device); | ||
| 1459 | |||
| 1460 | spin_lock_irqsave(&isci_host->scic_lock, flags); | ||
| 1461 | |||
| 1462 | /* Check for reset on all pending requests. */ | ||
| 1463 | list_for_each_entry_safe(isci_request, tmp_req, | ||
| 1464 | &isci_device->reqs_in_process, dev_node) { | ||
| 1465 | dev_dbg(&isci_host->pdev->dev, | ||
| 1466 | "%s: isci_device = %p request = %p\n", | ||
| 1467 | __func__, isci_device, isci_request); | ||
| 1468 | |||
| 1469 | if (isci_request->ttype == io_task) { | ||
| 1470 | struct sas_task *task = isci_request_access_task( | ||
| 1471 | isci_request); | ||
| 1472 | |||
| 1473 | spin_lock(&task->task_state_lock); | ||
| 1474 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) | ||
| 1475 | reset_is_pending = true; | ||
| 1476 | spin_unlock(&task->task_state_lock); | ||
| 1477 | } | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
| 1481 | |||
| 1482 | dev_dbg(&isci_host->pdev->dev, | ||
| 1483 | "%s: isci_device = %p reset_is_pending = %d\n", | ||
| 1484 | __func__, isci_device, reset_is_pending); | ||
| 1485 | |||
| 1486 | return reset_is_pending; | ||
| 1487 | } | ||
| 1488 | |||
| 1489 | /** | ||
| 1490 | * isci_device_clear_reset_pending() - This function will clear if any pending | ||
| 1491 | * reset condition flags on the device. | ||
| 1492 | * @request: This parameter is the isci_device object. | ||
| 1493 | * | ||
| 1494 | * true if there is a reset pending for the device. | ||
| 1495 | */ | ||
| 1496 | void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev) | ||
| 1497 | { | ||
| 1498 | struct isci_request *isci_request; | ||
| 1499 | struct isci_request *tmp_req; | ||
| 1500 | unsigned long flags = 0; | ||
| 1501 | |||
| 1502 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n", | ||
| 1503 | __func__, idev, ihost); | ||
| 1504 | |||
| 1505 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
| 1506 | |||
| 1507 | /* Clear reset pending on all pending requests. */ | ||
| 1508 | list_for_each_entry_safe(isci_request, tmp_req, | ||
| 1509 | &idev->reqs_in_process, dev_node) { | ||
| 1510 | dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n", | ||
| 1511 | __func__, idev, isci_request); | ||
| 1512 | |||
| 1513 | if (isci_request->ttype == io_task) { | ||
| 1514 | |||
| 1515 | unsigned long flags2; | ||
| 1516 | struct sas_task *task = isci_request_access_task( | ||
| 1517 | isci_request); | ||
| 1518 | |||
| 1519 | spin_lock_irqsave(&task->task_state_lock, flags2); | ||
| 1520 | task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; | ||
| 1521 | spin_unlock_irqrestore(&task->task_state_lock, flags2); | ||
| 1522 | } | ||
| 1523 | } | ||
| 1524 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
| 1525 | } | ||
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index e1747ea0d0ea..483ee50152f3 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
| @@ -132,10 +132,7 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost, | |||
| 132 | struct isci_remote_device *idev); | 132 | struct isci_remote_device *idev); |
| 133 | void isci_remote_device_gone(struct domain_device *domain_dev); | 133 | void isci_remote_device_gone(struct domain_device *domain_dev); |
| 134 | int isci_remote_device_found(struct domain_device *domain_dev); | 134 | int isci_remote_device_found(struct domain_device *domain_dev); |
| 135 | bool isci_device_is_reset_pending(struct isci_host *ihost, | 135 | |
| 136 | struct isci_remote_device *idev); | ||
| 137 | void isci_device_clear_reset_pending(struct isci_host *ihost, | ||
| 138 | struct isci_remote_device *idev); | ||
| 139 | /** | 136 | /** |
| 140 | * sci_remote_device_stop() - This method will stop both transmission and | 137 | * sci_remote_device_stop() - This method will stop both transmission and |
| 141 | * reception of link activity for the supplied remote device. This method | 138 | * reception of link activity for the supplied remote device. This method |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 565a9f0a9bc2..192cb48d849a 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
| @@ -191,7 +191,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) | |||
| 191 | 191 | ||
| 192 | task_iu->task_func = isci_tmf->tmf_code; | 192 | task_iu->task_func = isci_tmf->tmf_code; |
| 193 | task_iu->task_tag = | 193 | task_iu->task_tag = |
| 194 | (ireq->ttype == tmf_task) ? | 194 | (test_bit(IREQ_TMF, &ireq->flags)) ? |
| 195 | isci_tmf->io_tag : | 195 | isci_tmf->io_tag : |
| 196 | SCI_CONTROLLER_INVALID_IO_TAG; | 196 | SCI_CONTROLLER_INVALID_IO_TAG; |
| 197 | } | 197 | } |
| @@ -516,7 +516,7 @@ sci_io_request_construct_sata(struct isci_request *ireq, | |||
| 516 | struct domain_device *dev = ireq->target_device->domain_dev; | 516 | struct domain_device *dev = ireq->target_device->domain_dev; |
| 517 | 517 | ||
| 518 | /* check for management protocols */ | 518 | /* check for management protocols */ |
| 519 | if (ireq->ttype == tmf_task) { | 519 | if (test_bit(IREQ_TMF, &ireq->flags)) { |
| 520 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 520 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
| 521 | 521 | ||
| 522 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 522 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
| @@ -632,7 +632,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) | |||
| 632 | enum sci_status status = SCI_SUCCESS; | 632 | enum sci_status status = SCI_SUCCESS; |
| 633 | 633 | ||
| 634 | /* check for management protocols */ | 634 | /* check for management protocols */ |
| 635 | if (ireq->ttype == tmf_task) { | 635 | if (test_bit(IREQ_TMF, &ireq->flags)) { |
| 636 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 636 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
| 637 | 637 | ||
| 638 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 638 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
| @@ -2630,14 +2630,8 @@ static void isci_task_save_for_upper_layer_completion( | |||
| 2630 | switch (task_notification_selection) { | 2630 | switch (task_notification_selection) { |
| 2631 | 2631 | ||
| 2632 | case isci_perform_normal_io_completion: | 2632 | case isci_perform_normal_io_completion: |
| 2633 | |||
| 2634 | /* Normal notification (task_done) */ | 2633 | /* Normal notification (task_done) */ |
| 2635 | dev_dbg(&host->pdev->dev, | 2634 | |
| 2636 | "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", | ||
| 2637 | __func__, | ||
| 2638 | task, | ||
| 2639 | task->task_status.resp, response, | ||
| 2640 | task->task_status.stat, status); | ||
| 2641 | /* Add to the completed list. */ | 2635 | /* Add to the completed list. */ |
| 2642 | list_add(&request->completed_node, | 2636 | list_add(&request->completed_node, |
| 2643 | &host->requests_to_complete); | 2637 | &host->requests_to_complete); |
| @@ -2650,13 +2644,6 @@ static void isci_task_save_for_upper_layer_completion( | |||
| 2650 | /* No notification to libsas because this request is | 2644 | /* No notification to libsas because this request is |
| 2651 | * already in the abort path. | 2645 | * already in the abort path. |
| 2652 | */ | 2646 | */ |
| 2653 | dev_dbg(&host->pdev->dev, | ||
| 2654 | "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", | ||
| 2655 | __func__, | ||
| 2656 | task, | ||
| 2657 | task->task_status.resp, response, | ||
| 2658 | task->task_status.stat, status); | ||
| 2659 | |||
| 2660 | /* Wake up whatever process was waiting for this | 2647 | /* Wake up whatever process was waiting for this |
| 2661 | * request to complete. | 2648 | * request to complete. |
| 2662 | */ | 2649 | */ |
| @@ -2673,30 +2660,22 @@ static void isci_task_save_for_upper_layer_completion( | |||
| 2673 | 2660 | ||
| 2674 | case isci_perform_error_io_completion: | 2661 | case isci_perform_error_io_completion: |
| 2675 | /* Use sas_task_abort */ | 2662 | /* Use sas_task_abort */ |
| 2676 | dev_dbg(&host->pdev->dev, | ||
| 2677 | "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", | ||
| 2678 | __func__, | ||
| 2679 | task, | ||
| 2680 | task->task_status.resp, response, | ||
| 2681 | task->task_status.stat, status); | ||
| 2682 | /* Add to the aborted list. */ | 2663 | /* Add to the aborted list. */ |
| 2683 | list_add(&request->completed_node, | 2664 | list_add(&request->completed_node, |
| 2684 | &host->requests_to_errorback); | 2665 | &host->requests_to_errorback); |
| 2685 | break; | 2666 | break; |
| 2686 | 2667 | ||
| 2687 | default: | 2668 | default: |
| 2688 | dev_dbg(&host->pdev->dev, | ||
| 2689 | "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", | ||
| 2690 | __func__, | ||
| 2691 | task, | ||
| 2692 | task->task_status.resp, response, | ||
| 2693 | task->task_status.stat, status); | ||
| 2694 | |||
| 2695 | /* Add to the error to libsas list. */ | 2669 | /* Add to the error to libsas list. */ |
| 2696 | list_add(&request->completed_node, | 2670 | list_add(&request->completed_node, |
| 2697 | &host->requests_to_errorback); | 2671 | &host->requests_to_errorback); |
| 2698 | break; | 2672 | break; |
| 2699 | } | 2673 | } |
| 2674 | dev_dbg(&host->pdev->dev, | ||
| 2675 | "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", | ||
| 2676 | __func__, task_notification_selection, task, | ||
| 2677 | (task) ? task->task_status.resp : 0, response, | ||
| 2678 | (task) ? task->task_status.stat : 0, status); | ||
| 2700 | } | 2679 | } |
| 2701 | 2680 | ||
| 2702 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) | 2681 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
| @@ -2728,9 +2707,9 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
| 2728 | struct sas_task *task = isci_request_access_task(request); | 2707 | struct sas_task *task = isci_request_access_task(request); |
| 2729 | struct ssp_response_iu *resp_iu; | 2708 | struct ssp_response_iu *resp_iu; |
| 2730 | unsigned long task_flags; | 2709 | unsigned long task_flags; |
| 2731 | struct isci_remote_device *idev = isci_lookup_device(task->dev); | 2710 | struct isci_remote_device *idev = request->target_device; |
| 2732 | enum service_response response = SAS_TASK_UNDELIVERED; | 2711 | enum service_response response = SAS_TASK_UNDELIVERED; |
| 2733 | enum exec_status status = SAS_ABORTED_TASK; | 2712 | enum exec_status status = SAS_ABORTED_TASK; |
| 2734 | enum isci_request_status request_status; | 2713 | enum isci_request_status request_status; |
| 2735 | enum isci_completion_selection complete_to_host | 2714 | enum isci_completion_selection complete_to_host |
| 2736 | = isci_perform_normal_io_completion; | 2715 | = isci_perform_normal_io_completion; |
| @@ -3061,7 +3040,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
| 3061 | 3040 | ||
| 3062 | /* complete the io request to the core. */ | 3041 | /* complete the io request to the core. */ |
| 3063 | sci_controller_complete_io(ihost, request->target_device, request); | 3042 | sci_controller_complete_io(ihost, request->target_device, request); |
| 3064 | isci_put_device(idev); | ||
| 3065 | 3043 | ||
| 3066 | /* set terminated handle so it cannot be completed or | 3044 | /* set terminated handle so it cannot be completed or |
| 3067 | * terminated again, and to cause any calls into abort | 3045 | * terminated again, and to cause any calls into abort |
| @@ -3080,7 +3058,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) | |||
| 3080 | /* XXX as hch said always creating an internal sas_task for tmf | 3058 | /* XXX as hch said always creating an internal sas_task for tmf |
| 3081 | * requests would simplify the driver | 3059 | * requests would simplify the driver |
| 3082 | */ | 3060 | */ |
| 3083 | task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; | 3061 | task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); |
| 3084 | 3062 | ||
| 3085 | /* all unaccelerated request types (non ssp or ncq) handled with | 3063 | /* all unaccelerated request types (non ssp or ncq) handled with |
| 3086 | * substates | 3064 | * substates |
| @@ -3564,7 +3542,7 @@ static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, | |||
| 3564 | 3542 | ||
| 3565 | ireq = isci_request_from_tag(ihost, tag); | 3543 | ireq = isci_request_from_tag(ihost, tag); |
| 3566 | ireq->ttype_ptr.io_task_ptr = task; | 3544 | ireq->ttype_ptr.io_task_ptr = task; |
| 3567 | ireq->ttype = io_task; | 3545 | clear_bit(IREQ_TMF, &ireq->flags); |
| 3568 | task->lldd_task = ireq; | 3546 | task->lldd_task = ireq; |
| 3569 | 3547 | ||
| 3570 | return ireq; | 3548 | return ireq; |
| @@ -3578,7 +3556,7 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | |||
| 3578 | 3556 | ||
| 3579 | ireq = isci_request_from_tag(ihost, tag); | 3557 | ireq = isci_request_from_tag(ihost, tag); |
| 3580 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; | 3558 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; |
| 3581 | ireq->ttype = tmf_task; | 3559 | set_bit(IREQ_TMF, &ireq->flags); |
| 3582 | 3560 | ||
| 3583 | return ireq; | 3561 | return ireq; |
| 3584 | } | 3562 | } |
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index f720b97b7bb5..be38933dd6df 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
| @@ -77,11 +77,6 @@ enum isci_request_status { | |||
| 77 | dead = 0x07 | 77 | dead = 0x07 |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | enum task_type { | ||
| 81 | io_task = 0, | ||
| 82 | tmf_task = 1 | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum sci_request_protocol { | 80 | enum sci_request_protocol { |
| 86 | SCIC_NO_PROTOCOL, | 81 | SCIC_NO_PROTOCOL, |
| 87 | SCIC_SMP_PROTOCOL, | 82 | SCIC_SMP_PROTOCOL, |
| @@ -116,7 +111,6 @@ struct isci_request { | |||
| 116 | #define IREQ_ACTIVE 3 | 111 | #define IREQ_ACTIVE 3 |
| 117 | unsigned long flags; | 112 | unsigned long flags; |
| 118 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ | 113 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ |
| 119 | enum task_type ttype; | ||
| 120 | union ttype_ptr_union { | 114 | union ttype_ptr_union { |
| 121 | struct sas_task *io_task_ptr; /* When ttype==io_task */ | 115 | struct sas_task *io_task_ptr; /* When ttype==io_task */ |
| 122 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ | 116 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index e2d9418683ce..66ad3dc89498 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
| @@ -212,16 +212,27 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
| 212 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 212 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
| 213 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 213 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
| 214 | 214 | ||
| 215 | /* Indicate QUEUE_FULL so that the scsi | 215 | if (test_bit(IDEV_GONE, &idev->flags)) { |
| 216 | * midlayer retries. if the request | 216 | |
| 217 | * failed for remote device reasons, | 217 | /* Indicate that the device |
| 218 | * it gets returned as | 218 | * is gone. |
| 219 | * SAS_TASK_UNDELIVERED next time | 219 | */ |
| 220 | * through. | 220 | isci_task_refuse(ihost, task, |
| 221 | */ | 221 | SAS_TASK_UNDELIVERED, |
| 222 | isci_task_refuse(ihost, task, | 222 | SAS_DEVICE_UNKNOWN); |
| 223 | SAS_TASK_COMPLETE, | 223 | } else { |
| 224 | SAS_QUEUE_FULL); | 224 | /* Indicate QUEUE_FULL so that |
| 225 | * the scsi midlayer retries. | ||
| 226 | * If the request failed for | ||
| 227 | * remote device reasons, it | ||
| 228 | * gets returned as | ||
| 229 | * SAS_TASK_UNDELIVERED next | ||
| 230 | * time through. | ||
| 231 | */ | ||
| 232 | isci_task_refuse(ihost, task, | ||
| 233 | SAS_TASK_COMPLETE, | ||
| 234 | SAS_QUEUE_FULL); | ||
| 235 | } | ||
| 225 | } | 236 | } |
| 226 | } | 237 | } |
| 227 | } | 238 | } |
| @@ -243,7 +254,7 @@ static enum sci_status isci_sata_management_task_request_build(struct isci_reque | |||
| 243 | struct isci_tmf *isci_tmf; | 254 | struct isci_tmf *isci_tmf; |
| 244 | enum sci_status status; | 255 | enum sci_status status; |
| 245 | 256 | ||
| 246 | if (tmf_task != ireq->ttype) | 257 | if (!test_bit(IREQ_TMF, &ireq->flags)) |
| 247 | return SCI_FAILURE; | 258 | return SCI_FAILURE; |
| 248 | 259 | ||
| 249 | isci_tmf = isci_request_access_tmf(ireq); | 260 | isci_tmf = isci_request_access_tmf(ireq); |
| @@ -327,6 +338,60 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
| 327 | return ireq; | 338 | return ireq; |
| 328 | } | 339 | } |
| 329 | 340 | ||
| 341 | /** | ||
| 342 | * isci_request_mark_zombie() - This function must be called with scic_lock held. | ||
| 343 | */ | ||
| 344 | static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) | ||
| 345 | { | ||
| 346 | struct completion *tmf_completion = NULL; | ||
| 347 | struct completion *req_completion; | ||
| 348 | |||
| 349 | /* Set the request state to "dead". */ | ||
| 350 | ireq->status = dead; | ||
| 351 | |||
| 352 | req_completion = ireq->io_request_completion; | ||
| 353 | ireq->io_request_completion = NULL; | ||
| 354 | |||
| 355 | if (test_bit(IREQ_TMF, &ireq->flags)) { | ||
| 356 | /* Break links with the TMF request. */ | ||
| 357 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | ||
| 358 | |||
| 359 | /* In the case where a task request is dying, | ||
| 360 | * the thread waiting on the complete will sit and | ||
| 361 | * timeout unless we wake it now. Since the TMF | ||
| 362 | * has a default error status, complete it here | ||
| 363 | * to wake the waiting thread. | ||
| 364 | */ | ||
| 365 | if (tmf) { | ||
| 366 | tmf_completion = tmf->complete; | ||
| 367 | tmf->complete = NULL; | ||
| 368 | } | ||
| 369 | ireq->ttype_ptr.tmf_task_ptr = NULL; | ||
| 370 | dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", | ||
| 371 | __func__, tmf->tmf_code, tmf->io_tag); | ||
| 372 | } else { | ||
| 373 | /* Break links with the sas_task - the callback is done | ||
| 374 | * elsewhere. | ||
| 375 | */ | ||
| 376 | struct sas_task *task = isci_request_access_task(ireq); | ||
| 377 | |||
| 378 | if (task) | ||
| 379 | task->lldd_task = NULL; | ||
| 380 | |||
| 381 | ireq->ttype_ptr.io_task_ptr = NULL; | ||
| 382 | } | ||
| 383 | |||
| 384 | dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", | ||
| 385 | ireq->io_tag); | ||
| 386 | |||
| 387 | /* Don't force waiting threads to timeout. */ | ||
| 388 | if (req_completion) | ||
| 389 | complete(req_completion); | ||
| 390 | |||
| 391 | if (tmf_completion != NULL) | ||
| 392 | complete(tmf_completion); | ||
| 393 | } | ||
| 394 | |||
| 330 | static int isci_task_execute_tmf(struct isci_host *ihost, | 395 | static int isci_task_execute_tmf(struct isci_host *ihost, |
| 331 | struct isci_remote_device *idev, | 396 | struct isci_remote_device *idev, |
| 332 | struct isci_tmf *tmf, unsigned long timeout_ms) | 397 | struct isci_tmf *tmf, unsigned long timeout_ms) |
| @@ -364,6 +429,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
| 364 | 429 | ||
| 365 | /* Assign the pointer to the TMF's completion kernel wait structure. */ | 430 | /* Assign the pointer to the TMF's completion kernel wait structure. */ |
| 366 | tmf->complete = &completion; | 431 | tmf->complete = &completion; |
| 432 | tmf->status = SCI_FAILURE_TIMEOUT; | ||
| 367 | 433 | ||
| 368 | ireq = isci_task_request_build(ihost, idev, tag, tmf); | 434 | ireq = isci_task_request_build(ihost, idev, tag, tmf); |
| 369 | if (!ireq) | 435 | if (!ireq) |
| @@ -399,18 +465,35 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
| 399 | msecs_to_jiffies(timeout_ms)); | 465 | msecs_to_jiffies(timeout_ms)); |
| 400 | 466 | ||
| 401 | if (timeleft == 0) { | 467 | if (timeleft == 0) { |
| 468 | /* The TMF did not complete - this could be because | ||
| 469 | * of an unplug. Terminate the TMF request now. | ||
| 470 | */ | ||
| 402 | spin_lock_irqsave(&ihost->scic_lock, flags); | 471 | spin_lock_irqsave(&ihost->scic_lock, flags); |
| 403 | 472 | ||
| 404 | if (tmf->cb_state_func != NULL) | 473 | if (tmf->cb_state_func != NULL) |
| 405 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); | 474 | tmf->cb_state_func(isci_tmf_timed_out, tmf, |
| 475 | tmf->cb_data); | ||
| 406 | 476 | ||
| 407 | sci_controller_terminate_request(ihost, | 477 | sci_controller_terminate_request(ihost, idev, ireq); |
| 408 | idev, | ||
| 409 | ireq); | ||
| 410 | 478 | ||
| 411 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 479 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
| 412 | 480 | ||
| 413 | wait_for_completion(tmf->complete); | 481 | timeleft = wait_for_completion_timeout( |
| 482 | &completion, | ||
| 483 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
| 484 | |||
| 485 | if (!timeleft) { | ||
| 486 | /* Strange condition - the termination of the TMF | ||
| 487 | * request timed-out. | ||
| 488 | */ | ||
| 489 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
| 490 | |||
| 491 | /* If the TMF status has not changed, kill it. */ | ||
| 492 | if (tmf->status == SCI_FAILURE_TIMEOUT) | ||
| 493 | isci_request_mark_zombie(ihost, ireq); | ||
| 494 | |||
| 495 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
| 496 | } | ||
| 414 | } | 497 | } |
| 415 | 498 | ||
| 416 | isci_print_tmf(tmf); | 499 | isci_print_tmf(tmf); |
| @@ -501,48 +584,17 @@ static enum isci_request_status isci_task_validate_request_to_abort( | |||
| 501 | return old_state; | 584 | return old_state; |
| 502 | } | 585 | } |
| 503 | 586 | ||
| 504 | /** | 587 | static int isci_request_is_dealloc_managed(enum isci_request_status stat) |
| 505 | * isci_request_cleanup_completed_loiterer() - This function will take care of | ||
| 506 | * the final cleanup on any request which has been explicitly terminated. | ||
| 507 | * @isci_host: This parameter specifies the ISCI host object | ||
| 508 | * @isci_device: This is the device to which the request is pending. | ||
| 509 | * @isci_request: This parameter specifies the terminated request object. | ||
| 510 | * @task: This parameter is the libsas I/O request. | ||
| 511 | */ | ||
| 512 | static void isci_request_cleanup_completed_loiterer( | ||
| 513 | struct isci_host *isci_host, | ||
| 514 | struct isci_remote_device *isci_device, | ||
| 515 | struct isci_request *isci_request, | ||
| 516 | struct sas_task *task) | ||
| 517 | { | 588 | { |
| 518 | unsigned long flags; | 589 | switch (stat) { |
| 519 | 590 | case aborted: | |
| 520 | dev_dbg(&isci_host->pdev->dev, | 591 | case aborting: |
| 521 | "%s: isci_device=%p, request=%p, task=%p\n", | 592 | case terminating: |
| 522 | __func__, isci_device, isci_request, task); | 593 | case completed: |
| 523 | 594 | case dead: | |
| 524 | if (task != NULL) { | 595 | return true; |
| 525 | 596 | default: | |
| 526 | spin_lock_irqsave(&task->task_state_lock, flags); | 597 | return false; |
| 527 | task->lldd_task = NULL; | ||
| 528 | |||
| 529 | task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; | ||
| 530 | |||
| 531 | isci_set_task_doneflags(task); | ||
| 532 | |||
| 533 | /* If this task is not in the abort path, call task_done. */ | ||
| 534 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
| 535 | |||
| 536 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 537 | task->task_done(task); | ||
| 538 | } else | ||
| 539 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 540 | } | ||
| 541 | |||
| 542 | if (isci_request != NULL) { | ||
| 543 | spin_lock_irqsave(&isci_host->scic_lock, flags); | ||
| 544 | list_del_init(&isci_request->dev_node); | ||
| 545 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
| 546 | } | 598 | } |
| 547 | } | 599 | } |
| 548 | 600 | ||
| @@ -563,11 +615,9 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
| 563 | enum sci_status status = SCI_SUCCESS; | 615 | enum sci_status status = SCI_SUCCESS; |
| 564 | bool was_terminated = false; | 616 | bool was_terminated = false; |
| 565 | bool needs_cleanup_handling = false; | 617 | bool needs_cleanup_handling = false; |
| 566 | enum isci_request_status request_status; | ||
| 567 | unsigned long flags; | 618 | unsigned long flags; |
| 568 | unsigned long termination_completed = 1; | 619 | unsigned long termination_completed = 1; |
| 569 | struct completion *io_request_completion; | 620 | struct completion *io_request_completion; |
| 570 | struct sas_task *task; | ||
| 571 | 621 | ||
| 572 | dev_dbg(&ihost->pdev->dev, | 622 | dev_dbg(&ihost->pdev->dev, |
| 573 | "%s: device = %p; request = %p\n", | 623 | "%s: device = %p; request = %p\n", |
| @@ -577,10 +627,6 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
| 577 | 627 | ||
| 578 | io_request_completion = isci_request->io_request_completion; | 628 | io_request_completion = isci_request->io_request_completion; |
| 579 | 629 | ||
| 580 | task = (isci_request->ttype == io_task) | ||
| 581 | ? isci_request_access_task(isci_request) | ||
| 582 | : NULL; | ||
| 583 | |||
| 584 | /* Note that we are not going to control | 630 | /* Note that we are not going to control |
| 585 | * the target to abort the request. | 631 | * the target to abort the request. |
| 586 | */ | 632 | */ |
| @@ -619,42 +665,27 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
| 619 | __func__, isci_request, io_request_completion); | 665 | __func__, isci_request, io_request_completion); |
| 620 | 666 | ||
| 621 | /* Wait here for the request to complete. */ | 667 | /* Wait here for the request to complete. */ |
| 622 | #define TERMINATION_TIMEOUT_MSEC 500 | ||
| 623 | termination_completed | 668 | termination_completed |
| 624 | = wait_for_completion_timeout( | 669 | = wait_for_completion_timeout( |
| 625 | io_request_completion, | 670 | io_request_completion, |
| 626 | msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC)); | 671 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); |
| 627 | 672 | ||
| 628 | if (!termination_completed) { | 673 | if (!termination_completed) { |
| 629 | 674 | ||
| 630 | /* The request to terminate has timed out. */ | 675 | /* The request to terminate has timed out. */ |
| 631 | spin_lock_irqsave(&ihost->scic_lock, | 676 | spin_lock_irqsave(&ihost->scic_lock, flags); |
| 632 | flags); | ||
| 633 | 677 | ||
| 634 | /* Check for state changes. */ | 678 | /* Check for state changes. */ |
| 635 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | 679 | if (!test_bit(IREQ_TERMINATED, |
| 680 | &isci_request->flags)) { | ||
| 636 | 681 | ||
| 637 | /* The best we can do is to have the | 682 | /* The best we can do is to have the |
| 638 | * request die a silent death if it | 683 | * request die a silent death if it |
| 639 | * ever really completes. | 684 | * ever really completes. |
| 640 | * | ||
| 641 | * Set the request state to "dead", | ||
| 642 | * and clear the task pointer so that | ||
| 643 | * an actual completion event callback | ||
| 644 | * doesn't do anything. | ||
| 645 | */ | 685 | */ |
| 646 | isci_request->status = dead; | 686 | isci_request_mark_zombie(ihost, |
| 647 | isci_request->io_request_completion | 687 | isci_request); |
| 648 | = NULL; | 688 | needs_cleanup_handling = true; |
| 649 | |||
| 650 | if (isci_request->ttype == io_task) { | ||
| 651 | |||
| 652 | /* Break links with the | ||
| 653 | * sas_task. | ||
| 654 | */ | ||
| 655 | isci_request->ttype_ptr.io_task_ptr | ||
| 656 | = NULL; | ||
| 657 | } | ||
| 658 | } else | 689 | } else |
| 659 | termination_completed = 1; | 690 | termination_completed = 1; |
| 660 | 691 | ||
| @@ -691,29 +722,28 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
| 691 | * needs to be detached and freed here. | 722 | * needs to be detached and freed here. |
| 692 | */ | 723 | */ |
| 693 | spin_lock_irqsave(&isci_request->state_lock, flags); | 724 | spin_lock_irqsave(&isci_request->state_lock, flags); |
| 694 | request_status = isci_request->status; | 725 | |
| 695 | 726 | needs_cleanup_handling | |
| 696 | if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ | 727 | = isci_request_is_dealloc_managed( |
| 697 | && ((request_status == aborted) | 728 | isci_request->status); |
| 698 | || (request_status == aborting) | 729 | |
| 699 | || (request_status == terminating) | ||
| 700 | || (request_status == completed) | ||
| 701 | || (request_status == dead) | ||
| 702 | ) | ||
| 703 | ) { | ||
| 704 | |||
| 705 | /* The completion routine won't free a request in | ||
| 706 | * the aborted/aborting/etc. states, so we do | ||
| 707 | * it here. | ||
| 708 | */ | ||
| 709 | needs_cleanup_handling = true; | ||
| 710 | } | ||
| 711 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | 730 | spin_unlock_irqrestore(&isci_request->state_lock, flags); |
| 712 | 731 | ||
| 713 | } | 732 | } |
| 714 | if (needs_cleanup_handling) | 733 | if (needs_cleanup_handling) { |
| 715 | isci_request_cleanup_completed_loiterer( | 734 | |
| 716 | ihost, idev, isci_request, task); | 735 | dev_dbg(&ihost->pdev->dev, |
| 736 | "%s: cleanup isci_device=%p, request=%p\n", | ||
| 737 | __func__, idev, isci_request); | ||
| 738 | |||
| 739 | if (isci_request != NULL) { | ||
| 740 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
| 741 | isci_free_tag(ihost, isci_request->io_tag); | ||
| 742 | isci_request_change_state(isci_request, unallocated); | ||
| 743 | list_del_init(&isci_request->dev_node); | ||
| 744 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
| 745 | } | ||
| 746 | } | ||
| 717 | } | 747 | } |
| 718 | } | 748 | } |
| 719 | 749 | ||
| @@ -772,7 +802,9 @@ void isci_terminate_pending_requests(struct isci_host *ihost, | |||
| 772 | dev_dbg(&ihost->pdev->dev, | 802 | dev_dbg(&ihost->pdev->dev, |
| 773 | "%s: idev=%p request=%p; task=%p old_state=%d\n", | 803 | "%s: idev=%p request=%p; task=%p old_state=%d\n", |
| 774 | __func__, idev, ireq, | 804 | __func__, idev, ireq, |
| 775 | ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL, | 805 | (!test_bit(IREQ_TMF, &ireq->flags) |
| 806 | ? isci_request_access_task(ireq) | ||
| 807 | : NULL), | ||
| 776 | old_state); | 808 | old_state); |
| 777 | 809 | ||
| 778 | /* If the old_state is started: | 810 | /* If the old_state is started: |
| @@ -889,22 +921,14 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) | |||
| 889 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", | 921 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", |
| 890 | __func__, domain_device, isci_host, isci_device); | 922 | __func__, domain_device, isci_host, isci_device); |
| 891 | 923 | ||
| 892 | if (isci_device) | 924 | if (!isci_device) { |
| 893 | set_bit(IDEV_EH, &isci_device->flags); | 925 | /* If the device is gone, stop the escalations. */ |
| 926 | dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); | ||
| 894 | 927 | ||
| 895 | /* If there is a device reset pending on any request in the | 928 | ret = TMF_RESP_FUNC_COMPLETE; |
| 896 | * device's list, fail this LUN reset request in order to | ||
| 897 | * escalate to the device reset. | ||
| 898 | */ | ||
| 899 | if (!isci_device || | ||
| 900 | isci_device_is_reset_pending(isci_host, isci_device)) { | ||
| 901 | dev_dbg(&isci_host->pdev->dev, | ||
| 902 | "%s: No dev (%p), or " | ||
| 903 | "RESET PENDING: domain_device=%p\n", | ||
| 904 | __func__, isci_device, domain_device); | ||
| 905 | ret = TMF_RESP_FUNC_FAILED; | ||
| 906 | goto out; | 929 | goto out; |
| 907 | } | 930 | } |
| 931 | set_bit(IDEV_EH, &isci_device->flags); | ||
| 908 | 932 | ||
| 909 | /* Send the task management part of the reset. */ | 933 | /* Send the task management part of the reset. */ |
| 910 | if (sas_protocol_ata(domain_device->tproto)) { | 934 | if (sas_protocol_ata(domain_device->tproto)) { |
| @@ -1013,7 +1037,7 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 1013 | struct isci_tmf tmf; | 1037 | struct isci_tmf tmf; |
| 1014 | int ret = TMF_RESP_FUNC_FAILED; | 1038 | int ret = TMF_RESP_FUNC_FAILED; |
| 1015 | unsigned long flags; | 1039 | unsigned long flags; |
| 1016 | bool any_dev_reset = false; | 1040 | int perform_termination = 0; |
| 1017 | 1041 | ||
| 1018 | /* Get the isci_request reference from the task. Note that | 1042 | /* Get the isci_request reference from the task. Note that |
| 1019 | * this check does not depend on the pending request list | 1043 | * this check does not depend on the pending request list |
| @@ -1035,89 +1059,34 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 1035 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 1059 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
| 1036 | 1060 | ||
| 1037 | dev_dbg(&isci_host->pdev->dev, | 1061 | dev_dbg(&isci_host->pdev->dev, |
| 1038 | "%s: task = %p\n", __func__, task); | 1062 | "%s: dev = %p, task = %p, old_request == %p\n", |
| 1039 | 1063 | __func__, isci_device, task, old_request); | |
| 1040 | if (!isci_device || !old_request) | ||
| 1041 | goto out; | ||
| 1042 | |||
| 1043 | set_bit(IDEV_EH, &isci_device->flags); | ||
| 1044 | |||
| 1045 | /* This version of the driver will fail abort requests for | ||
| 1046 | * SATA/STP. Failing the abort request this way will cause the | ||
| 1047 | * SCSI error handler thread to escalate to LUN reset | ||
| 1048 | */ | ||
| 1049 | if (sas_protocol_ata(task->task_proto)) { | ||
| 1050 | dev_dbg(&isci_host->pdev->dev, | ||
| 1051 | " task %p is for a STP/SATA device;" | ||
| 1052 | " returning TMF_RESP_FUNC_FAILED\n" | ||
| 1053 | " to cause a LUN reset...\n", task); | ||
| 1054 | goto out; | ||
| 1055 | } | ||
| 1056 | 1064 | ||
| 1057 | dev_dbg(&isci_host->pdev->dev, | 1065 | if (isci_device) |
| 1058 | "%s: old_request == %p\n", __func__, old_request); | 1066 | set_bit(IDEV_EH, &isci_device->flags); |
| 1059 | |||
| 1060 | any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device); | ||
| 1061 | |||
| 1062 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
| 1063 | |||
| 1064 | any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET); | ||
| 1065 | 1067 | ||
| 1066 | /* If the extraction of the request reference from the task | 1068 | /* Device reset conditions signalled in task_state_flags are the |
| 1067 | * failed, then the request has been completed (or if there is a | 1069 | * responsbility of libsas to observe at the start of the error |
| 1068 | * pending reset then this abort request function must be failed | 1070 | * handler thread. |
| 1069 | * in order to escalate to the target reset). | ||
| 1070 | */ | 1071 | */ |
| 1071 | if ((old_request == NULL) || any_dev_reset) { | 1072 | if (!isci_device || !old_request) { |
| 1072 | 1073 | /* The request has already completed and there | |
| 1073 | /* If the device reset task flag is set, fail the task | 1074 | * is nothing to do here other than to set the task |
| 1074 | * management request. Otherwise, the original request | 1075 | * done bit, and indicate that the task abort function |
| 1075 | * has completed. | 1076 | * was sucessful. |
| 1076 | */ | 1077 | */ |
| 1077 | if (any_dev_reset) { | 1078 | spin_lock_irqsave(&task->task_state_lock, flags); |
| 1078 | 1079 | task->task_state_flags |= SAS_TASK_STATE_DONE; | |
| 1079 | /* Turn off the task's DONE to make sure this | 1080 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | |
| 1080 | * task is escalated to a target reset. | 1081 | SAS_TASK_STATE_PENDING); |
| 1081 | */ | 1082 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
| 1082 | task->task_state_flags &= ~SAS_TASK_STATE_DONE; | ||
| 1083 | |||
| 1084 | /* Make the reset happen as soon as possible. */ | ||
| 1085 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
| 1086 | |||
| 1087 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1088 | |||
| 1089 | /* Fail the task management request in order to | ||
| 1090 | * escalate to the target reset. | ||
| 1091 | */ | ||
| 1092 | ret = TMF_RESP_FUNC_FAILED; | ||
| 1093 | |||
| 1094 | dev_dbg(&isci_host->pdev->dev, | ||
| 1095 | "%s: Failing task abort in order to " | ||
| 1096 | "escalate to target reset because\n" | ||
| 1097 | "SAS_TASK_NEED_DEV_RESET is set for " | ||
| 1098 | "task %p on dev %p\n", | ||
| 1099 | __func__, task, isci_device); | ||
| 1100 | |||
| 1101 | |||
| 1102 | } else { | ||
| 1103 | /* The request has already completed and there | ||
| 1104 | * is nothing to do here other than to set the task | ||
| 1105 | * done bit, and indicate that the task abort function | ||
| 1106 | * was sucessful. | ||
| 1107 | */ | ||
| 1108 | isci_set_task_doneflags(task); | ||
| 1109 | |||
| 1110 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1111 | 1083 | ||
| 1112 | ret = TMF_RESP_FUNC_COMPLETE; | 1084 | ret = TMF_RESP_FUNC_COMPLETE; |
| 1113 | 1085 | ||
| 1114 | dev_dbg(&isci_host->pdev->dev, | 1086 | dev_dbg(&isci_host->pdev->dev, |
| 1115 | "%s: abort task not needed for %p\n", | 1087 | "%s: abort task not needed for %p\n", |
| 1116 | __func__, task); | 1088 | __func__, task); |
| 1117 | } | ||
| 1118 | goto out; | 1089 | goto out; |
| 1119 | } else { | ||
| 1120 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1121 | } | 1090 | } |
| 1122 | 1091 | ||
| 1123 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 1092 | spin_lock_irqsave(&isci_host->scic_lock, flags); |
| @@ -1146,24 +1115,44 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 1146 | goto out; | 1115 | goto out; |
| 1147 | } | 1116 | } |
| 1148 | if (task->task_proto == SAS_PROTOCOL_SMP || | 1117 | if (task->task_proto == SAS_PROTOCOL_SMP || |
| 1118 | sas_protocol_ata(task->task_proto) || | ||
| 1149 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { | 1119 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { |
| 1150 | 1120 | ||
| 1151 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 1121 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
| 1152 | 1122 | ||
| 1153 | dev_dbg(&isci_host->pdev->dev, | 1123 | dev_dbg(&isci_host->pdev->dev, |
| 1154 | "%s: SMP request (%d)" | 1124 | "%s: %s request" |
| 1155 | " or complete_in_target (%d), thus no TMF\n", | 1125 | " or complete_in_target (%d), thus no TMF\n", |
| 1156 | __func__, (task->task_proto == SAS_PROTOCOL_SMP), | 1126 | __func__, |
| 1127 | ((task->task_proto == SAS_PROTOCOL_SMP) | ||
| 1128 | ? "SMP" | ||
| 1129 | : (sas_protocol_ata(task->task_proto) | ||
| 1130 | ? "SATA/STP" | ||
| 1131 | : "<other>") | ||
| 1132 | ), | ||
| 1157 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); | 1133 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); |
| 1158 | 1134 | ||
| 1159 | /* Set the state on the task. */ | 1135 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { |
| 1160 | isci_task_all_done(task); | 1136 | spin_lock_irqsave(&task->task_state_lock, flags); |
| 1161 | 1137 | task->task_state_flags |= SAS_TASK_STATE_DONE; | |
| 1162 | ret = TMF_RESP_FUNC_COMPLETE; | 1138 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | |
| 1139 | SAS_TASK_STATE_PENDING); | ||
| 1140 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1141 | ret = TMF_RESP_FUNC_COMPLETE; | ||
| 1142 | } else { | ||
| 1143 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
| 1144 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
| 1145 | SAS_TASK_STATE_PENDING); | ||
| 1146 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1147 | } | ||
| 1163 | 1148 | ||
| 1164 | /* Stopping and SMP devices are not sent a TMF, and are not | 1149 | /* STP and SMP devices are not sent a TMF, but the |
| 1165 | * reset, but the outstanding I/O request is terminated below. | 1150 | * outstanding I/O request is terminated below. This is |
| 1151 | * because SATA/STP and SMP discovery path timeouts directly | ||
| 1152 | * call the abort task interface for cleanup. | ||
| 1166 | */ | 1153 | */ |
| 1154 | perform_termination = 1; | ||
| 1155 | |||
| 1167 | } else { | 1156 | } else { |
| 1168 | /* Fill in the tmf stucture */ | 1157 | /* Fill in the tmf stucture */ |
| 1169 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, | 1158 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, |
| @@ -1172,22 +1161,24 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 1172 | 1161 | ||
| 1173 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 1162 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
| 1174 | 1163 | ||
| 1175 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ | 1164 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ |
| 1176 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, | 1165 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, |
| 1177 | ISCI_ABORT_TASK_TIMEOUT_MS); | 1166 | ISCI_ABORT_TASK_TIMEOUT_MS); |
| 1178 | 1167 | ||
| 1179 | if (ret != TMF_RESP_FUNC_COMPLETE) | 1168 | if (ret == TMF_RESP_FUNC_COMPLETE) |
| 1169 | perform_termination = 1; | ||
| 1170 | else | ||
| 1180 | dev_dbg(&isci_host->pdev->dev, | 1171 | dev_dbg(&isci_host->pdev->dev, |
| 1181 | "%s: isci_task_send_tmf failed\n", | 1172 | "%s: isci_task_send_tmf failed\n", __func__); |
| 1182 | __func__); | ||
| 1183 | } | 1173 | } |
| 1184 | if (ret == TMF_RESP_FUNC_COMPLETE) { | 1174 | if (perform_termination) { |
| 1185 | set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); | 1175 | set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); |
| 1186 | 1176 | ||
| 1187 | /* Clean up the request on our side, and wait for the aborted | 1177 | /* Clean up the request on our side, and wait for the aborted |
| 1188 | * I/O to complete. | 1178 | * I/O to complete. |
| 1189 | */ | 1179 | */ |
| 1190 | isci_terminate_request_core(isci_host, isci_device, old_request); | 1180 | isci_terminate_request_core(isci_host, isci_device, |
| 1181 | old_request); | ||
| 1191 | } | 1182 | } |
| 1192 | 1183 | ||
| 1193 | /* Make sure we do not leave a reference to aborted_io_completion */ | 1184 | /* Make sure we do not leave a reference to aborted_io_completion */ |
| @@ -1288,7 +1279,8 @@ isci_task_request_complete(struct isci_host *ihost, | |||
| 1288 | enum sci_task_status completion_status) | 1279 | enum sci_task_status completion_status) |
| 1289 | { | 1280 | { |
| 1290 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 1281 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
| 1291 | struct completion *tmf_complete; | 1282 | struct completion *tmf_complete = NULL; |
| 1283 | struct completion *request_complete = ireq->io_request_completion; | ||
| 1292 | 1284 | ||
| 1293 | dev_dbg(&ihost->pdev->dev, | 1285 | dev_dbg(&ihost->pdev->dev, |
| 1294 | "%s: request = %p, status=%d\n", | 1286 | "%s: request = %p, status=%d\n", |
| @@ -1296,255 +1288,53 @@ isci_task_request_complete(struct isci_host *ihost, | |||
| 1296 | 1288 | ||
| 1297 | isci_request_change_state(ireq, completed); | 1289 | isci_request_change_state(ireq, completed); |
| 1298 | 1290 | ||
| 1299 | tmf->status = completion_status; | ||
| 1300 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); | 1291 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); |
| 1301 | 1292 | ||
| 1302 | if (tmf->proto == SAS_PROTOCOL_SSP) { | 1293 | if (tmf) { |
| 1303 | memcpy(&tmf->resp.resp_iu, | 1294 | tmf->status = completion_status; |
| 1304 | &ireq->ssp.rsp, | 1295 | |
| 1305 | SSP_RESP_IU_MAX_SIZE); | 1296 | if (tmf->proto == SAS_PROTOCOL_SSP) { |
| 1306 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { | 1297 | memcpy(&tmf->resp.resp_iu, |
| 1307 | memcpy(&tmf->resp.d2h_fis, | 1298 | &ireq->ssp.rsp, |
| 1308 | &ireq->stp.rsp, | 1299 | SSP_RESP_IU_MAX_SIZE); |
| 1309 | sizeof(struct dev_to_host_fis)); | 1300 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { |
| 1301 | memcpy(&tmf->resp.d2h_fis, | ||
| 1302 | &ireq->stp.rsp, | ||
| 1303 | sizeof(struct dev_to_host_fis)); | ||
| 1304 | } | ||
| 1305 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
| 1306 | tmf_complete = tmf->complete; | ||
| 1310 | } | 1307 | } |
| 1311 | |||
| 1312 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
| 1313 | tmf_complete = tmf->complete; | ||
| 1314 | |||
| 1315 | sci_controller_complete_io(ihost, ireq->target_device, ireq); | 1308 | sci_controller_complete_io(ihost, ireq->target_device, ireq); |
| 1316 | /* set the 'terminated' flag handle to make sure it cannot be terminated | 1309 | /* set the 'terminated' flag handle to make sure it cannot be terminated |
| 1317 | * or completed again. | 1310 | * or completed again. |
| 1318 | */ | 1311 | */ |
| 1319 | set_bit(IREQ_TERMINATED, &ireq->flags); | 1312 | set_bit(IREQ_TERMINATED, &ireq->flags); |
| 1320 | 1313 | ||
| 1321 | isci_request_change_state(ireq, unallocated); | 1314 | /* As soon as something is in the terminate path, deallocation is |
| 1322 | list_del_init(&ireq->dev_node); | 1315 | * managed there. Note that the final non-managed state of a task |
| 1323 | 1316 | * request is "completed". | |
| 1324 | /* The task management part completes last. */ | 1317 | */ |
| 1325 | complete(tmf_complete); | 1318 | if ((ireq->status == completed) || |
| 1326 | } | 1319 | !isci_request_is_dealloc_managed(ireq->status)) { |
| 1327 | 1320 | isci_request_change_state(ireq, unallocated); | |
| 1328 | static void isci_smp_task_timedout(unsigned long _task) | 1321 | isci_free_tag(ihost, ireq->io_tag); |
| 1329 | { | 1322 | list_del_init(&ireq->dev_node); |
| 1330 | struct sas_task *task = (void *) _task; | ||
| 1331 | unsigned long flags; | ||
| 1332 | |||
| 1333 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
| 1334 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) | ||
| 1335 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | ||
| 1336 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 1337 | |||
| 1338 | complete(&task->completion); | ||
| 1339 | } | ||
| 1340 | |||
| 1341 | static void isci_smp_task_done(struct sas_task *task) | ||
| 1342 | { | ||
| 1343 | if (!del_timer(&task->timer)) | ||
| 1344 | return; | ||
| 1345 | complete(&task->completion); | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | static int isci_smp_execute_task(struct isci_host *ihost, | ||
| 1349 | struct domain_device *dev, void *req, | ||
| 1350 | int req_size, void *resp, int resp_size) | ||
| 1351 | { | ||
| 1352 | int res, retry; | ||
| 1353 | struct sas_task *task = NULL; | ||
| 1354 | |||
| 1355 | for (retry = 0; retry < 3; retry++) { | ||
| 1356 | task = sas_alloc_task(GFP_KERNEL); | ||
| 1357 | if (!task) | ||
| 1358 | return -ENOMEM; | ||
| 1359 | |||
| 1360 | task->dev = dev; | ||
| 1361 | task->task_proto = dev->tproto; | ||
| 1362 | sg_init_one(&task->smp_task.smp_req, req, req_size); | ||
| 1363 | sg_init_one(&task->smp_task.smp_resp, resp, resp_size); | ||
| 1364 | |||
| 1365 | task->task_done = isci_smp_task_done; | ||
| 1366 | |||
| 1367 | task->timer.data = (unsigned long) task; | ||
| 1368 | task->timer.function = isci_smp_task_timedout; | ||
| 1369 | task->timer.expires = jiffies + 10*HZ; | ||
| 1370 | add_timer(&task->timer); | ||
| 1371 | |||
| 1372 | res = isci_task_execute_task(task, 1, GFP_KERNEL); | ||
| 1373 | |||
| 1374 | if (res) { | ||
| 1375 | del_timer(&task->timer); | ||
| 1376 | dev_dbg(&ihost->pdev->dev, | ||
| 1377 | "%s: executing SMP task failed:%d\n", | ||
| 1378 | __func__, res); | ||
| 1379 | goto ex_err; | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | wait_for_completion(&task->completion); | ||
| 1383 | res = -ECOMM; | ||
| 1384 | if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
| 1385 | dev_dbg(&ihost->pdev->dev, | ||
| 1386 | "%s: smp task timed out or aborted\n", | ||
| 1387 | __func__); | ||
| 1388 | isci_task_abort_task(task); | ||
| 1389 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { | ||
| 1390 | dev_dbg(&ihost->pdev->dev, | ||
| 1391 | "%s: SMP task aborted and not done\n", | ||
| 1392 | __func__); | ||
| 1393 | goto ex_err; | ||
| 1394 | } | ||
| 1395 | } | ||
| 1396 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 1397 | task->task_status.stat == SAM_STAT_GOOD) { | ||
| 1398 | res = 0; | ||
| 1399 | break; | ||
| 1400 | } | ||
| 1401 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 1402 | task->task_status.stat == SAS_DATA_UNDERRUN) { | ||
| 1403 | /* no error, but return the number of bytes of | ||
| 1404 | * underrun */ | ||
| 1405 | res = task->task_status.residual; | ||
| 1406 | break; | ||
| 1407 | } | ||
| 1408 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
| 1409 | task->task_status.stat == SAS_DATA_OVERRUN) { | ||
| 1410 | res = -EMSGSIZE; | ||
| 1411 | break; | ||
| 1412 | } else { | ||
| 1413 | dev_dbg(&ihost->pdev->dev, | ||
| 1414 | "%s: task to dev %016llx response: 0x%x " | ||
| 1415 | "status 0x%x\n", __func__, | ||
| 1416 | SAS_ADDR(dev->sas_addr), | ||
| 1417 | task->task_status.resp, | ||
| 1418 | task->task_status.stat); | ||
| 1419 | sas_free_task(task); | ||
| 1420 | task = NULL; | ||
| 1421 | } | ||
| 1422 | } | ||
| 1423 | ex_err: | ||
| 1424 | BUG_ON(retry == 3 && task != NULL); | ||
| 1425 | sas_free_task(task); | ||
| 1426 | return res; | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | #define DISCOVER_REQ_SIZE 16 | ||
| 1430 | #define DISCOVER_RESP_SIZE 56 | ||
| 1431 | |||
| 1432 | int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost, | ||
| 1433 | struct domain_device *dev, | ||
| 1434 | int phy_id, int *adt) | ||
| 1435 | { | ||
| 1436 | struct smp_resp *disc_resp; | ||
| 1437 | u8 *disc_req; | ||
| 1438 | int res; | ||
| 1439 | |||
| 1440 | disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL); | ||
| 1441 | if (!disc_resp) | ||
| 1442 | return -ENOMEM; | ||
| 1443 | |||
| 1444 | disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL); | ||
| 1445 | if (disc_req) { | ||
| 1446 | disc_req[0] = SMP_REQUEST; | ||
| 1447 | disc_req[1] = SMP_DISCOVER; | ||
| 1448 | disc_req[9] = phy_id; | ||
| 1449 | } else { | ||
| 1450 | kfree(disc_resp); | ||
| 1451 | return -ENOMEM; | ||
| 1452 | } | ||
| 1453 | res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE, | ||
| 1454 | disc_resp, DISCOVER_RESP_SIZE); | ||
| 1455 | if (!res) { | ||
| 1456 | if (disc_resp->result != SMP_RESP_FUNC_ACC) | ||
| 1457 | res = disc_resp->result; | ||
| 1458 | else | ||
| 1459 | *adt = disc_resp->disc.attached_dev_type; | ||
| 1460 | } | 1323 | } |
| 1461 | kfree(disc_req); | ||
| 1462 | kfree(disc_resp); | ||
| 1463 | |||
| 1464 | return res; | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num) | ||
| 1468 | { | ||
| 1469 | struct domain_device *dev = idev->domain_dev; | ||
| 1470 | struct isci_port *iport = idev->isci_port; | ||
| 1471 | struct isci_host *ihost = iport->isci_host; | ||
| 1472 | int res, iteration = 0, attached_device_type; | ||
| 1473 | #define STP_WAIT_MSECS 25000 | ||
| 1474 | unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS); | ||
| 1475 | unsigned long deadline = jiffies + tmo; | ||
| 1476 | enum { | ||
| 1477 | SMP_PHYWAIT_PHYDOWN, | ||
| 1478 | SMP_PHYWAIT_PHYUP, | ||
| 1479 | SMP_PHYWAIT_DONE | ||
| 1480 | } phy_state = SMP_PHYWAIT_PHYDOWN; | ||
| 1481 | |||
| 1482 | /* While there is time, wait for the phy to go away and come back */ | ||
| 1483 | while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) { | ||
| 1484 | int event = atomic_read(&iport->event); | ||
| 1485 | |||
| 1486 | ++iteration; | ||
| 1487 | |||
| 1488 | tmo = wait_event_timeout(ihost->eventq, | ||
| 1489 | event != atomic_read(&iport->event) || | ||
| 1490 | !test_bit(IPORT_BCN_BLOCKED, &iport->flags), | ||
| 1491 | tmo); | ||
| 1492 | /* link down, stop polling */ | ||
| 1493 | if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags)) | ||
| 1494 | break; | ||
| 1495 | 1324 | ||
| 1496 | dev_dbg(&ihost->pdev->dev, | 1325 | /* "request_complete" is set if the task was being terminated. */ |
| 1497 | "%s: iport %p, iteration %d," | 1326 | if (request_complete) |
| 1498 | " phase %d: time_remaining %lu, bcns = %d\n", | 1327 | complete(request_complete); |
| 1499 | __func__, iport, iteration, phy_state, | ||
| 1500 | tmo, test_bit(IPORT_BCN_PENDING, &iport->flags)); | ||
| 1501 | |||
| 1502 | res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num, | ||
| 1503 | &attached_device_type); | ||
| 1504 | tmo = deadline - jiffies; | ||
| 1505 | |||
| 1506 | if (res) { | ||
| 1507 | dev_dbg(&ihost->pdev->dev, | ||
| 1508 | "%s: iteration %d, phase %d:" | ||
| 1509 | " SMP error=%d, time_remaining=%lu\n", | ||
| 1510 | __func__, iteration, phy_state, res, tmo); | ||
| 1511 | break; | ||
| 1512 | } | ||
| 1513 | dev_dbg(&ihost->pdev->dev, | ||
| 1514 | "%s: iport %p, iteration %d," | ||
| 1515 | " phase %d: time_remaining %lu, bcns = %d, " | ||
| 1516 | "attdevtype = %x\n", | ||
| 1517 | __func__, iport, iteration, phy_state, | ||
| 1518 | tmo, test_bit(IPORT_BCN_PENDING, &iport->flags), | ||
| 1519 | attached_device_type); | ||
| 1520 | |||
| 1521 | switch (phy_state) { | ||
| 1522 | case SMP_PHYWAIT_PHYDOWN: | ||
| 1523 | /* Has the device gone away? */ | ||
| 1524 | if (!attached_device_type) | ||
| 1525 | phy_state = SMP_PHYWAIT_PHYUP; | ||
| 1526 | |||
| 1527 | break; | ||
| 1528 | |||
| 1529 | case SMP_PHYWAIT_PHYUP: | ||
| 1530 | /* Has the device come back? */ | ||
| 1531 | if (attached_device_type) | ||
| 1532 | phy_state = SMP_PHYWAIT_DONE; | ||
| 1533 | break; | ||
| 1534 | |||
| 1535 | case SMP_PHYWAIT_DONE: | ||
| 1536 | break; | ||
| 1537 | } | ||
| 1538 | 1328 | ||
| 1539 | } | 1329 | /* The task management part completes last. */ |
| 1540 | dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); | 1330 | if (tmf_complete) |
| 1331 | complete(tmf_complete); | ||
| 1541 | } | 1332 | } |
| 1542 | 1333 | ||
| 1543 | static int isci_reset_device(struct isci_host *ihost, | 1334 | static int isci_reset_device(struct isci_host *ihost, |
| 1544 | struct isci_remote_device *idev) | 1335 | struct isci_remote_device *idev) |
| 1545 | { | 1336 | { |
| 1546 | struct sas_phy *phy = sas_find_local_phy(idev->domain_dev); | 1337 | struct sas_phy *phy = sas_find_local_phy(idev->domain_dev); |
| 1547 | struct isci_port *iport = idev->isci_port; | ||
| 1548 | enum sci_status status; | 1338 | enum sci_status status; |
| 1549 | unsigned long flags; | 1339 | unsigned long flags; |
| 1550 | int rc; | 1340 | int rc; |
| @@ -1564,13 +1354,6 @@ static int isci_reset_device(struct isci_host *ihost, | |||
| 1564 | } | 1354 | } |
| 1565 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1355 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
| 1566 | 1356 | ||
| 1567 | /* Make sure all pending requests are able to be fully terminated. */ | ||
| 1568 | isci_device_clear_reset_pending(ihost, idev); | ||
| 1569 | |||
| 1570 | /* If this is a device on an expander, disable BCN processing. */ | ||
| 1571 | if (!scsi_is_sas_phy_local(phy)) | ||
| 1572 | set_bit(IPORT_BCN_BLOCKED, &iport->flags); | ||
| 1573 | |||
| 1574 | rc = sas_phy_reset(phy, true); | 1357 | rc = sas_phy_reset(phy, true); |
| 1575 | 1358 | ||
| 1576 | /* Terminate in-progress I/O now. */ | 1359 | /* Terminate in-progress I/O now. */ |
| @@ -1581,21 +1364,6 @@ static int isci_reset_device(struct isci_host *ihost, | |||
| 1581 | status = sci_remote_device_reset_complete(idev); | 1364 | status = sci_remote_device_reset_complete(idev); |
| 1582 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1365 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
| 1583 | 1366 | ||
| 1584 | /* If this is a device on an expander, bring the phy back up. */ | ||
| 1585 | if (!scsi_is_sas_phy_local(phy)) { | ||
| 1586 | /* A phy reset will cause the device to go away then reappear. | ||
| 1587 | * Since libsas will take action on incoming BCNs (eg. remove | ||
| 1588 | * a device going through an SMP phy-control driven reset), | ||
| 1589 | * we need to wait until the phy comes back up before letting | ||
| 1590 | * discovery proceed in libsas. | ||
| 1591 | */ | ||
| 1592 | isci_wait_for_smp_phy_reset(idev, phy->number); | ||
| 1593 | |||
| 1594 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
| 1595 | isci_port_bcn_enable(ihost, idev->isci_port); | ||
| 1596 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
| 1597 | } | ||
| 1598 | |||
| 1599 | if (status != SCI_SUCCESS) { | 1367 | if (status != SCI_SUCCESS) { |
| 1600 | dev_dbg(&ihost->pdev->dev, | 1368 | dev_dbg(&ihost->pdev->dev, |
| 1601 | "%s: sci_remote_device_reset_complete(%p) " | 1369 | "%s: sci_remote_device_reset_complete(%p) " |
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 15b18d158993..bc78c0a41d5c 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h | |||
| @@ -58,6 +58,8 @@ | |||
| 58 | #include <scsi/sas_ata.h> | 58 | #include <scsi/sas_ata.h> |
| 59 | #include "host.h" | 59 | #include "host.h" |
| 60 | 60 | ||
| 61 | #define ISCI_TERMINATION_TIMEOUT_MSEC 500 | ||
| 62 | |||
| 61 | struct isci_request; | 63 | struct isci_request; |
| 62 | 64 | ||
| 63 | /** | 65 | /** |
| @@ -224,35 +226,6 @@ enum isci_completion_selection { | |||
| 224 | isci_perform_error_io_completion /* Use sas_task_abort */ | 226 | isci_perform_error_io_completion /* Use sas_task_abort */ |
| 225 | }; | 227 | }; |
| 226 | 228 | ||
| 227 | static inline void isci_set_task_doneflags( | ||
| 228 | struct sas_task *task) | ||
| 229 | { | ||
| 230 | /* Since no futher action will be taken on this task, | ||
| 231 | * make sure to mark it complete from the lldd perspective. | ||
| 232 | */ | ||
| 233 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
| 234 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
| 235 | task->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
| 236 | } | ||
| 237 | /** | ||
| 238 | * isci_task_all_done() - This function clears the task bits to indicate the | ||
| 239 | * LLDD is done with the task. | ||
| 240 | * | ||
| 241 | * | ||
| 242 | */ | ||
| 243 | static inline void isci_task_all_done( | ||
| 244 | struct sas_task *task) | ||
| 245 | { | ||
| 246 | unsigned long flags; | ||
| 247 | |||
| 248 | /* Since no futher action will be taken on this task, | ||
| 249 | * make sure to mark it complete from the lldd perspective. | ||
| 250 | */ | ||
| 251 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
| 252 | isci_set_task_doneflags(task); | ||
| 253 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
| 254 | } | ||
| 255 | |||
| 256 | /** | 229 | /** |
| 257 | * isci_task_set_completion_status() - This function sets the completion status | 230 | * isci_task_set_completion_status() - This function sets the completion status |
| 258 | * for the request. | 231 | * for the request. |
| @@ -334,7 +307,9 @@ isci_task_set_completion_status( | |||
| 334 | /* Fall through to the normal case... */ | 307 | /* Fall through to the normal case... */ |
| 335 | case isci_perform_normal_io_completion: | 308 | case isci_perform_normal_io_completion: |
| 336 | /* Normal notification (task_done) */ | 309 | /* Normal notification (task_done) */ |
| 337 | isci_set_task_doneflags(task); | 310 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
| 311 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
| 312 | SAS_TASK_STATE_PENDING); | ||
| 338 | break; | 313 | break; |
| 339 | default: | 314 | default: |
| 340 | WARN_ONCE(1, "unknown task_notification_selection: %d\n", | 315 | WARN_ONCE(1, "unknown task_notification_selection: %d\n", |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 7c055fdca45d..1b22130035da 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
| @@ -469,6 +469,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, | |||
| 469 | struct fc_frame_header *fh = fc_frame_header_get(fp); | 469 | struct fc_frame_header *fh = fc_frame_header_get(fp); |
| 470 | int error; | 470 | int error; |
| 471 | u32 f_ctl; | 471 | u32 f_ctl; |
| 472 | u8 fh_type = fh->fh_type; | ||
| 472 | 473 | ||
| 473 | ep = fc_seq_exch(sp); | 474 | ep = fc_seq_exch(sp); |
| 474 | WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); | 475 | WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); |
| @@ -493,7 +494,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, | |||
| 493 | */ | 494 | */ |
| 494 | error = lport->tt.frame_send(lport, fp); | 495 | error = lport->tt.frame_send(lport, fp); |
| 495 | 496 | ||
| 496 | if (fh->fh_type == FC_TYPE_BLS) | 497 | if (fh_type == FC_TYPE_BLS) |
| 497 | return error; | 498 | return error; |
| 498 | 499 | ||
| 499 | /* | 500 | /* |
| @@ -1792,6 +1793,9 @@ restart: | |||
| 1792 | goto restart; | 1793 | goto restart; |
| 1793 | } | 1794 | } |
| 1794 | } | 1795 | } |
| 1796 | pool->next_index = 0; | ||
| 1797 | pool->left = FC_XID_UNKNOWN; | ||
| 1798 | pool->right = FC_XID_UNKNOWN; | ||
| 1795 | spin_unlock_bh(&pool->lock); | 1799 | spin_unlock_bh(&pool->lock); |
| 1796 | } | 1800 | } |
| 1797 | 1801 | ||
| @@ -2280,6 +2284,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, | |||
| 2280 | goto free_mempool; | 2284 | goto free_mempool; |
| 2281 | for_each_possible_cpu(cpu) { | 2285 | for_each_possible_cpu(cpu) { |
| 2282 | pool = per_cpu_ptr(mp->pool, cpu); | 2286 | pool = per_cpu_ptr(mp->pool, cpu); |
| 2287 | pool->next_index = 0; | ||
| 2283 | pool->left = FC_XID_UNKNOWN; | 2288 | pool->left = FC_XID_UNKNOWN; |
| 2284 | pool->right = FC_XID_UNKNOWN; | 2289 | pool->right = FC_XID_UNKNOWN; |
| 2285 | spin_lock_init(&pool->lock); | 2290 | spin_lock_init(&pool->lock); |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 628f347404f9..2cb12b9cd3e8 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
| @@ -1030,16 +1030,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
| 1030 | FCH_EVT_LIPRESET, 0); | 1030 | FCH_EVT_LIPRESET, 0); |
| 1031 | fc_vports_linkchange(lport); | 1031 | fc_vports_linkchange(lport); |
| 1032 | fc_lport_reset_locked(lport); | 1032 | fc_lport_reset_locked(lport); |
| 1033 | if (lport->link_up) { | 1033 | if (lport->link_up) |
| 1034 | /* | ||
| 1035 | * Wait upto resource allocation time out before | ||
| 1036 | * doing re-login since incomplete FIP exchanged | ||
| 1037 | * from last session may collide with exchanges | ||
| 1038 | * in new session. | ||
| 1039 | */ | ||
| 1040 | msleep(lport->r_a_tov); | ||
| 1041 | fc_lport_enter_flogi(lport); | 1034 | fc_lport_enter_flogi(lport); |
| 1042 | } | ||
| 1043 | } | 1035 | } |
| 1044 | 1036 | ||
| 1045 | /** | 1037 | /** |
| @@ -1481,6 +1473,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1481 | void *lp_arg) | 1473 | void *lp_arg) |
| 1482 | { | 1474 | { |
| 1483 | struct fc_lport *lport = lp_arg; | 1475 | struct fc_lport *lport = lp_arg; |
| 1476 | struct fc_frame_header *fh; | ||
| 1484 | struct fc_els_flogi *flp; | 1477 | struct fc_els_flogi *flp; |
| 1485 | u32 did; | 1478 | u32 did; |
| 1486 | u16 csp_flags; | 1479 | u16 csp_flags; |
| @@ -1508,49 +1501,56 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1508 | goto err; | 1501 | goto err; |
| 1509 | } | 1502 | } |
| 1510 | 1503 | ||
| 1504 | fh = fc_frame_header_get(fp); | ||
| 1511 | did = fc_frame_did(fp); | 1505 | did = fc_frame_did(fp); |
| 1512 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { | 1506 | if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || |
| 1513 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | 1507 | fc_frame_payload_op(fp) != ELS_LS_ACC) { |
| 1514 | if (flp) { | 1508 | FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); |
| 1515 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | ||
| 1516 | FC_SP_BB_DATA_MASK; | ||
| 1517 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && | ||
| 1518 | mfs < lport->mfs) | ||
| 1519 | lport->mfs = mfs; | ||
| 1520 | csp_flags = ntohs(flp->fl_csp.sp_features); | ||
| 1521 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); | ||
| 1522 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | ||
| 1523 | if (csp_flags & FC_SP_FT_EDTR) | ||
| 1524 | e_d_tov /= 1000000; | ||
| 1525 | |||
| 1526 | lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); | ||
| 1527 | |||
| 1528 | if ((csp_flags & FC_SP_FT_FPORT) == 0) { | ||
| 1529 | if (e_d_tov > lport->e_d_tov) | ||
| 1530 | lport->e_d_tov = e_d_tov; | ||
| 1531 | lport->r_a_tov = 2 * e_d_tov; | ||
| 1532 | fc_lport_set_port_id(lport, did, fp); | ||
| 1533 | printk(KERN_INFO "host%d: libfc: " | ||
| 1534 | "Port (%6.6x) entered " | ||
| 1535 | "point-to-point mode\n", | ||
| 1536 | lport->host->host_no, did); | ||
| 1537 | fc_lport_ptp_setup(lport, fc_frame_sid(fp), | ||
| 1538 | get_unaligned_be64( | ||
| 1539 | &flp->fl_wwpn), | ||
| 1540 | get_unaligned_be64( | ||
| 1541 | &flp->fl_wwnn)); | ||
| 1542 | } else { | ||
| 1543 | lport->e_d_tov = e_d_tov; | ||
| 1544 | lport->r_a_tov = r_a_tov; | ||
| 1545 | fc_host_fabric_name(lport->host) = | ||
| 1546 | get_unaligned_be64(&flp->fl_wwnn); | ||
| 1547 | fc_lport_set_port_id(lport, did, fp); | ||
| 1548 | fc_lport_enter_dns(lport); | ||
| 1549 | } | ||
| 1550 | } | ||
| 1551 | } else { | ||
| 1552 | FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); | ||
| 1553 | fc_lport_error(lport, fp); | 1509 | fc_lport_error(lport, fp); |
| 1510 | goto err; | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | ||
| 1514 | if (!flp) { | ||
| 1515 | FC_LPORT_DBG(lport, "FLOGI bad response\n"); | ||
| 1516 | fc_lport_error(lport, fp); | ||
| 1517 | goto err; | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | ||
| 1521 | FC_SP_BB_DATA_MASK; | ||
| 1522 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && | ||
| 1523 | mfs < lport->mfs) | ||
| 1524 | lport->mfs = mfs; | ||
| 1525 | csp_flags = ntohs(flp->fl_csp.sp_features); | ||
| 1526 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); | ||
| 1527 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | ||
| 1528 | if (csp_flags & FC_SP_FT_EDTR) | ||
| 1529 | e_d_tov /= 1000000; | ||
| 1530 | |||
| 1531 | lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); | ||
| 1532 | |||
| 1533 | if ((csp_flags & FC_SP_FT_FPORT) == 0) { | ||
| 1534 | if (e_d_tov > lport->e_d_tov) | ||
| 1535 | lport->e_d_tov = e_d_tov; | ||
| 1536 | lport->r_a_tov = 2 * e_d_tov; | ||
| 1537 | fc_lport_set_port_id(lport, did, fp); | ||
| 1538 | printk(KERN_INFO "host%d: libfc: " | ||
| 1539 | "Port (%6.6x) entered " | ||
| 1540 | "point-to-point mode\n", | ||
| 1541 | lport->host->host_no, did); | ||
| 1542 | fc_lport_ptp_setup(lport, fc_frame_sid(fp), | ||
| 1543 | get_unaligned_be64( | ||
| 1544 | &flp->fl_wwpn), | ||
| 1545 | get_unaligned_be64( | ||
| 1546 | &flp->fl_wwnn)); | ||
| 1547 | } else { | ||
| 1548 | lport->e_d_tov = e_d_tov; | ||
| 1549 | lport->r_a_tov = r_a_tov; | ||
| 1550 | fc_host_fabric_name(lport->host) = | ||
| 1551 | get_unaligned_be64(&flp->fl_wwnn); | ||
| 1552 | fc_lport_set_port_id(lport, did, fp); | ||
| 1553 | fc_lport_enter_dns(lport); | ||
| 1554 | } | 1554 | } |
| 1555 | 1555 | ||
| 1556 | out: | 1556 | out: |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 3105d5e8d908..8dc1b32918dd 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2000-2010 LSI Corporation. | 2 | * Copyright (c) 2000-2011 LSI Corporation. |
| 3 | * | 3 | * |
| 4 | * | 4 | * |
| 5 | * Name: mpi2.h | 5 | * Name: mpi2.h |
| @@ -8,7 +8,7 @@ | |||
| 8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
| 9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
| 10 | * | 10 | * |
| 11 | * mpi2.h Version: 02.00.18 | 11 | * mpi2.h Version: 02.00.20 |
| 12 | * | 12 | * |
| 13 | * Version History | 13 | * Version History |
| 14 | * --------------- | 14 | * --------------- |
| @@ -66,6 +66,9 @@ | |||
| 66 | * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. | 66 | * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. |
| 67 | * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. | 67 | * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. |
| 68 | * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. | 68 | * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. |
| 69 | * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. | ||
| 70 | * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. | ||
| 71 | * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. | ||
| 69 | * -------------------------------------------------------------------------- | 72 | * -------------------------------------------------------------------------- |
| 70 | */ | 73 | */ |
| 71 | 74 | ||
| @@ -91,7 +94,7 @@ | |||
| 91 | #define MPI2_VERSION_02_00 (0x0200) | 94 | #define MPI2_VERSION_02_00 (0x0200) |
| 92 | 95 | ||
| 93 | /* versioning for this MPI header set */ | 96 | /* versioning for this MPI header set */ |
| 94 | #define MPI2_HEADER_VERSION_UNIT (0x12) | 97 | #define MPI2_HEADER_VERSION_UNIT (0x14) |
| 95 | #define MPI2_HEADER_VERSION_DEV (0x00) | 98 | #define MPI2_HEADER_VERSION_DEV (0x00) |
| 96 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 99 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
| 97 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 100 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
| @@ -515,6 +518,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION | |||
| 515 | #define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) | 518 | #define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) |
| 516 | /* Power Management Control */ | 519 | /* Power Management Control */ |
| 517 | #define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) | 520 | #define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) |
| 521 | /* Send Host Message */ | ||
| 522 | #define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) | ||
| 518 | /* beginning of product-specific range */ | 523 | /* beginning of product-specific range */ |
| 519 | #define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) | 524 | #define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) |
| 520 | /* end of product-specific range */ | 525 | /* end of product-specific range */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 61475a6480e3..cfd95b4e3004 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
| @@ -1,12 +1,12 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2000-2010 LSI Corporation. | 2 | * Copyright (c) 2000-2011 LSI Corporation. |
| 3 | * | 3 | * |
| 4 | * | 4 | * |
| 5 | * Name: mpi2_cnfg.h | 5 | * Name: mpi2_cnfg.h |
| 6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
| 7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
| 8 | * | 8 | * |
| 9 | * mpi2_cnfg.h Version: 02.00.17 | 9 | * mpi2_cnfg.h Version: 02.00.19 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -134,6 +134,12 @@ | |||
| 134 | * to MPI2_CONFIG_PAGE_IO_UNIT_7. | 134 | * to MPI2_CONFIG_PAGE_IO_UNIT_7. |
| 135 | * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define | 135 | * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define |
| 136 | * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. | 136 | * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. |
| 137 | * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. | ||
| 138 | * Added IO Unit Page 8, IO Unit Page 9, | ||
| 139 | * and IO Unit Page 10. | ||
| 140 | * Added SASNotifyPrimitiveMasks field to | ||
| 141 | * MPI2_CONFIG_PAGE_IOC_7. | ||
| 142 | * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). | ||
| 137 | * -------------------------------------------------------------------------- | 143 | * -------------------------------------------------------------------------- |
| 138 | */ | 144 | */ |
| 139 | 145 | ||
| @@ -329,7 +335,9 @@ typedef struct _MPI2_CONFIG_REQUEST | |||
| 329 | U8 VP_ID; /* 0x08 */ | 335 | U8 VP_ID; /* 0x08 */ |
| 330 | U8 VF_ID; /* 0x09 */ | 336 | U8 VF_ID; /* 0x09 */ |
| 331 | U16 Reserved1; /* 0x0A */ | 337 | U16 Reserved1; /* 0x0A */ |
| 332 | U32 Reserved2; /* 0x0C */ | 338 | U8 Reserved2; /* 0x0C */ |
| 339 | U8 ProxyVF_ID; /* 0x0D */ | ||
| 340 | U16 Reserved4; /* 0x0E */ | ||
| 333 | U32 Reserved3; /* 0x10 */ | 341 | U32 Reserved3; /* 0x10 */ |
| 334 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ | 342 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ |
| 335 | U32 PageAddress; /* 0x18 */ | 343 | U32 PageAddress; /* 0x18 */ |
| @@ -915,6 +923,120 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { | |||
| 915 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) | 923 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) |
| 916 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) | 924 | #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) |
| 917 | 925 | ||
| 926 | /* IO Unit Page 8 */ | ||
| 927 | |||
| 928 | #define MPI2_IOUNIT8_NUM_THRESHOLDS (4) | ||
| 929 | |||
| 930 | typedef struct _MPI2_IOUNIT8_SENSOR { | ||
| 931 | U16 Flags; /* 0x00 */ | ||
| 932 | U16 Reserved1; /* 0x02 */ | ||
| 933 | U16 | ||
| 934 | Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */ | ||
| 935 | U32 Reserved2; /* 0x0C */ | ||
| 936 | U32 Reserved3; /* 0x10 */ | ||
| 937 | U32 Reserved4; /* 0x14 */ | ||
| 938 | } MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR, | ||
| 939 | Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t; | ||
| 940 | |||
| 941 | /* defines for IO Unit Page 8 Sensor Flags field */ | ||
| 942 | #define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) | ||
| 943 | #define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) | ||
| 944 | #define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) | ||
| 945 | #define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) | ||
| 946 | |||
| 947 | /* | ||
| 948 | * Host code (drivers, BIOS, utilities, etc.) should leave this define set to | ||
| 949 | * one and check the value returned for NumSensors at runtime. | ||
| 950 | */ | ||
| 951 | #ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES | ||
| 952 | #define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) | ||
| 953 | #endif | ||
| 954 | |||
| 955 | typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { | ||
| 956 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | ||
| 957 | U32 Reserved1; /* 0x04 */ | ||
| 958 | U32 Reserved2; /* 0x08 */ | ||
| 959 | U8 NumSensors; /* 0x0C */ | ||
| 960 | U8 PollingInterval; /* 0x0D */ | ||
| 961 | U16 Reserved3; /* 0x0E */ | ||
| 962 | MPI2_IOUNIT8_SENSOR | ||
| 963 | Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */ | ||
| 964 | } MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, | ||
| 965 | Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t; | ||
| 966 | |||
| 967 | #define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) | ||
| 968 | |||
| 969 | |||
| 970 | /* IO Unit Page 9 */ | ||
| 971 | |||
| 972 | typedef struct _MPI2_IOUNIT9_SENSOR { | ||
| 973 | U16 CurrentTemperature; /* 0x00 */ | ||
| 974 | U16 Reserved1; /* 0x02 */ | ||
| 975 | U8 Flags; /* 0x04 */ | ||
| 976 | U8 Reserved2; /* 0x05 */ | ||
| 977 | U16 Reserved3; /* 0x06 */ | ||
| 978 | U32 Reserved4; /* 0x08 */ | ||
| 979 | U32 Reserved5; /* 0x0C */ | ||
| 980 | } MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR, | ||
| 981 | Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t; | ||
| 982 | |||
| 983 | /* defines for IO Unit Page 9 Sensor Flags field */ | ||
| 984 | #define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) | ||
| 985 | |||
| 986 | /* | ||
| 987 | * Host code (drivers, BIOS, utilities, etc.) should leave this define set to | ||
| 988 | * one and check the value returned for NumSensors at runtime. | ||
| 989 | */ | ||
| 990 | #ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES | ||
| 991 | #define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) | ||
| 992 | #endif | ||
| 993 | |||
| 994 | typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { | ||
| 995 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | ||
| 996 | U32 Reserved1; /* 0x04 */ | ||
| 997 | U32 Reserved2; /* 0x08 */ | ||
| 998 | U8 NumSensors; /* 0x0C */ | ||
| 999 | U8 Reserved4; /* 0x0D */ | ||
| 1000 | U16 Reserved3; /* 0x0E */ | ||
| 1001 | MPI2_IOUNIT9_SENSOR | ||
| 1002 | Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */ | ||
| 1003 | } MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, | ||
| 1004 | Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t; | ||
| 1005 | |||
| 1006 | #define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) | ||
| 1007 | |||
| 1008 | |||
| 1009 | /* IO Unit Page 10 */ | ||
| 1010 | |||
| 1011 | typedef struct _MPI2_IOUNIT10_FUNCTION { | ||
| 1012 | U8 CreditPercent; /* 0x00 */ | ||
| 1013 | U8 Reserved1; /* 0x01 */ | ||
| 1014 | U16 Reserved2; /* 0x02 */ | ||
| 1015 | } MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION, | ||
| 1016 | Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t; | ||
| 1017 | |||
| 1018 | /* | ||
| 1019 | * Host code (drivers, BIOS, utilities, etc.) should leave this define set to | ||
| 1020 | * one and check the value returned for NumFunctions at runtime. | ||
| 1021 | */ | ||
| 1022 | #ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES | ||
| 1023 | #define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) | ||
| 1024 | #endif | ||
| 1025 | |||
| 1026 | typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { | ||
| 1027 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | ||
| 1028 | U8 NumFunctions; /* 0x04 */ | ||
| 1029 | U8 Reserved1; /* 0x05 */ | ||
| 1030 | U16 Reserved2; /* 0x06 */ | ||
| 1031 | U32 Reserved3; /* 0x08 */ | ||
| 1032 | U32 Reserved4; /* 0x0C */ | ||
| 1033 | MPI2_IOUNIT10_FUNCTION | ||
| 1034 | Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/* 0x10 */ | ||
| 1035 | } MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, | ||
| 1036 | Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t; | ||
| 1037 | |||
| 1038 | #define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) | ||
| 1039 | |||
| 918 | 1040 | ||
| 919 | 1041 | ||
| 920 | /**************************************************************************** | 1042 | /**************************************************************************** |
| @@ -1022,12 +1144,12 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_7 | |||
| 1022 | U32 Reserved1; /* 0x04 */ | 1144 | U32 Reserved1; /* 0x04 */ |
| 1023 | U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */ | 1145 | U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */ |
| 1024 | U16 SASBroadcastPrimitiveMasks; /* 0x18 */ | 1146 | U16 SASBroadcastPrimitiveMasks; /* 0x18 */ |
| 1025 | U16 Reserved2; /* 0x1A */ | 1147 | U16 SASNotifyPrimitiveMasks; /* 0x1A */ |
| 1026 | U32 Reserved3; /* 0x1C */ | 1148 | U32 Reserved3; /* 0x1C */ |
| 1027 | } MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7, | 1149 | } MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7, |
| 1028 | Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t; | 1150 | Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t; |
| 1029 | 1151 | ||
| 1030 | #define MPI2_IOCPAGE7_PAGEVERSION (0x01) | 1152 | #define MPI2_IOCPAGE7_PAGEVERSION (0x02) |
| 1031 | 1153 | ||
| 1032 | 1154 | ||
| 1033 | /* IOC Page 8 */ | 1155 | /* IOC Page 8 */ |
| @@ -2070,16 +2192,16 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { | |||
| 2070 | #define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) | 2192 | #define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) |
| 2071 | 2193 | ||
| 2072 | /* defines for PowerManagementCapabilities field */ | 2194 | /* defines for PowerManagementCapabilities field */ |
| 2073 | #define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x000001000) | 2195 | #define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) |
| 2074 | #define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x000000800) | 2196 | #define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) |
| 2075 | #define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x000000400) | 2197 | #define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) |
| 2076 | #define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x000000200) | 2198 | #define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) |
| 2077 | #define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x000000100) | 2199 | #define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) |
| 2078 | #define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x000000010) | 2200 | #define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) |
| 2079 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x000000008) | 2201 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) |
| 2080 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x000000004) | 2202 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) |
| 2081 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x000000002) | 2203 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) |
| 2082 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x000000001) | 2204 | #define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) |
| 2083 | 2205 | ||
| 2084 | 2206 | ||
| 2085 | 2207 | ||
| @@ -2266,6 +2388,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
| 2266 | /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ | 2388 | /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ |
| 2267 | 2389 | ||
| 2268 | /* values for SAS Device Page 0 Flags field */ | 2390 | /* values for SAS Device Page 0 Flags field */ |
| 2391 | #define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) | ||
| 2269 | #define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) | 2392 | #define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) |
| 2270 | #define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) | 2393 | #define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) |
| 2271 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) | 2394 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index 1f0c190d336e..93d9b6956d05 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
| @@ -1,12 +1,12 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2000-2010 LSI Corporation. | 2 | * Copyright (c) 2000-2011 LSI Corporation. |
| 3 | * | 3 | * |
| 4 | * | 4 | * |
| 5 | * Name: mpi2_ioc.h | 5 | * Name: mpi2_ioc.h |
| 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
| 7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
| 8 | * | 8 | * |
| 9 | * mpi2_ioc.h Version: 02.00.16 | 9 | * mpi2_ioc.h Version: 02.00.17 |
| 10 | * | 10 | * |
| 11 | * Version History | 11 | * Version History |
| 12 | * --------------- | 12 | * --------------- |
| @@ -104,6 +104,12 @@ | |||
| 104 | * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. | 104 | * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. |
| 105 | * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. | 105 | * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. |
| 106 | * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. | 106 | * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. |
| 107 | * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added | ||
| 108 | * SASNotifyPrimitiveMasks field to | ||
| 109 | * MPI2_EVENT_NOTIFICATION_REQUEST. | ||
| 110 | * Added Temperature Threshold Event. | ||
| 111 | * Added Host Message Event. | ||
| 112 | * Added Send Host Message request and reply. | ||
| 107 | * -------------------------------------------------------------------------- | 113 | * -------------------------------------------------------------------------- |
| 108 | */ | 114 | */ |
| 109 | 115 | ||
| @@ -421,7 +427,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST | |||
| 421 | U32 Reserved6; /* 0x10 */ | 427 | U32 Reserved6; /* 0x10 */ |
| 422 | U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */ | 428 | U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */ |
| 423 | U16 SASBroadcastPrimitiveMasks; /* 0x24 */ | 429 | U16 SASBroadcastPrimitiveMasks; /* 0x24 */ |
| 424 | U16 Reserved7; /* 0x26 */ | 430 | U16 SASNotifyPrimitiveMasks; /* 0x26 */ |
| 425 | U32 Reserved8; /* 0x28 */ | 431 | U32 Reserved8; /* 0x28 */ |
| 426 | } MPI2_EVENT_NOTIFICATION_REQUEST, | 432 | } MPI2_EVENT_NOTIFICATION_REQUEST, |
| 427 | MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST, | 433 | MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST, |
| @@ -476,6 +482,9 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY | |||
| 476 | #define MPI2_EVENT_GPIO_INTERRUPT (0x0023) | 482 | #define MPI2_EVENT_GPIO_INTERRUPT (0x0023) |
| 477 | #define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) | 483 | #define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) |
| 478 | #define MPI2_EVENT_SAS_QUIESCE (0x0025) | 484 | #define MPI2_EVENT_SAS_QUIESCE (0x0025) |
| 485 | #define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) | ||
| 486 | #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) | ||
| 487 | #define MPI2_EVENT_HOST_MESSAGE (0x0028) | ||
| 479 | 488 | ||
| 480 | 489 | ||
| 481 | /* Log Entry Added Event data */ | 490 | /* Log Entry Added Event data */ |
| @@ -507,6 +516,39 @@ typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { | |||
| 507 | MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, | 516 | MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, |
| 508 | Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; | 517 | Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; |
| 509 | 518 | ||
| 519 | /* Temperature Threshold Event data */ | ||
| 520 | |||
| 521 | typedef struct _MPI2_EVENT_DATA_TEMPERATURE { | ||
| 522 | U16 Status; /* 0x00 */ | ||
| 523 | U8 SensorNum; /* 0x02 */ | ||
| 524 | U8 Reserved1; /* 0x03 */ | ||
| 525 | U16 CurrentTemperature; /* 0x04 */ | ||
| 526 | U16 Reserved2; /* 0x06 */ | ||
| 527 | U32 Reserved3; /* 0x08 */ | ||
| 528 | U32 Reserved4; /* 0x0C */ | ||
| 529 | } MPI2_EVENT_DATA_TEMPERATURE, | ||
| 530 | MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE, | ||
| 531 | Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t; | ||
| 532 | |||
| 533 | /* Temperature Threshold Event data Status bits */ | ||
| 534 | #define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) | ||
| 535 | #define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) | ||
| 536 | #define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) | ||
| 537 | #define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) | ||
| 538 | |||
| 539 | |||
| 540 | /* Host Message Event data */ | ||
| 541 | |||
| 542 | typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { | ||
| 543 | U8 SourceVF_ID; /* 0x00 */ | ||
| 544 | U8 Reserved1; /* 0x01 */ | ||
| 545 | U16 Reserved2; /* 0x02 */ | ||
| 546 | U32 Reserved3; /* 0x04 */ | ||
| 547 | U32 HostData[1]; /* 0x08 */ | ||
| 548 | } MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE, | ||
| 549 | Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t; | ||
| 550 | |||
| 551 | |||
| 510 | /* Hard Reset Received Event data */ | 552 | /* Hard Reset Received Event data */ |
| 511 | 553 | ||
| 512 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED | 554 | typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED |
| @@ -749,6 +791,24 @@ typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE | |||
| 749 | #define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) | 791 | #define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) |
| 750 | #define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) | 792 | #define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) |
| 751 | 793 | ||
| 794 | /* SAS Notify Primitive Event data */ | ||
| 795 | |||
| 796 | typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { | ||
| 797 | U8 PhyNum; /* 0x00 */ | ||
| 798 | U8 Port; /* 0x01 */ | ||
| 799 | U8 Reserved1; /* 0x02 */ | ||
| 800 | U8 Primitive; /* 0x03 */ | ||
| 801 | } MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, | ||
| 802 | MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, | ||
| 803 | Mpi2EventDataSasNotifyPrimitive_t, | ||
| 804 | MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t; | ||
| 805 | |||
| 806 | /* defines for the Primitive field */ | ||
| 807 | #define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) | ||
| 808 | #define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) | ||
| 809 | #define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) | ||
| 810 | #define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) | ||
| 811 | |||
| 752 | 812 | ||
| 753 | /* SAS Initiator Device Status Change Event data */ | 813 | /* SAS Initiator Device Status Change Event data */ |
| 754 | 814 | ||
| @@ -1001,6 +1061,53 @@ typedef struct _MPI2_EVENT_ACK_REPLY | |||
| 1001 | 1061 | ||
| 1002 | 1062 | ||
| 1003 | /**************************************************************************** | 1063 | /**************************************************************************** |
| 1064 | * SendHostMessage message | ||
| 1065 | ****************************************************************************/ | ||
| 1066 | |||
| 1067 | /* SendHostMessage Request message */ | ||
| 1068 | typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { | ||
| 1069 | U16 HostDataLength; /* 0x00 */ | ||
| 1070 | U8 ChainOffset; /* 0x02 */ | ||
| 1071 | U8 Function; /* 0x03 */ | ||
| 1072 | U16 Reserved1; /* 0x04 */ | ||
| 1073 | U8 Reserved2; /* 0x06 */ | ||
| 1074 | U8 MsgFlags; /* 0x07 */ | ||
| 1075 | U8 VP_ID; /* 0x08 */ | ||
| 1076 | U8 VF_ID; /* 0x09 */ | ||
| 1077 | U16 Reserved3; /* 0x0A */ | ||
| 1078 | U8 Reserved4; /* 0x0C */ | ||
| 1079 | U8 DestVF_ID; /* 0x0D */ | ||
| 1080 | U16 Reserved5; /* 0x0E */ | ||
| 1081 | U32 Reserved6; /* 0x10 */ | ||
| 1082 | U32 Reserved7; /* 0x14 */ | ||
| 1083 | U32 Reserved8; /* 0x18 */ | ||
| 1084 | U32 Reserved9; /* 0x1C */ | ||
| 1085 | U32 Reserved10; /* 0x20 */ | ||
| 1086 | U32 HostData[1]; /* 0x24 */ | ||
| 1087 | } MPI2_SEND_HOST_MESSAGE_REQUEST, | ||
| 1088 | MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, | ||
| 1089 | Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t; | ||
| 1090 | |||
| 1091 | |||
| 1092 | /* SendHostMessage Reply message */ | ||
| 1093 | typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { | ||
| 1094 | U16 HostDataLength; /* 0x00 */ | ||
| 1095 | U8 MsgLength; /* 0x02 */ | ||
| 1096 | U8 Function; /* 0x03 */ | ||
| 1097 | U16 Reserved1; /* 0x04 */ | ||
| 1098 | U8 Reserved2; /* 0x06 */ | ||
| 1099 | U8 MsgFlags; /* 0x07 */ | ||
| 1100 | U8 VP_ID; /* 0x08 */ | ||
| 1101 | U8 VF_ID; /* 0x09 */ | ||
| 1102 | U16 Reserved3; /* 0x0A */ | ||
| 1103 | U16 Reserved4; /* 0x0C */ | ||
| 1104 | U16 IOCStatus; /* 0x0E */ | ||
| 1105 | U32 IOCLogInfo; /* 0x10 */ | ||
| 1106 | } MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY, | ||
| 1107 | Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t; | ||
| 1108 | |||
| 1109 | |||
| 1110 | /**************************************************************************** | ||
| 1004 | * FWDownload message | 1111 | * FWDownload message |
| 1005 | ****************************************************************************/ | 1112 | ****************************************************************************/ |
| 1006 | 1113 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 81209ca87274..beda04a8404b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
| @@ -81,6 +81,15 @@ static int missing_delay[2] = {-1, -1}; | |||
| 81 | module_param_array(missing_delay, int, NULL, 0); | 81 | module_param_array(missing_delay, int, NULL, 0); |
| 82 | MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); | 82 | MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); |
| 83 | 83 | ||
| 84 | static int mpt2sas_fwfault_debug; | ||
| 85 | MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " | ||
| 86 | "and halt firmware - (default=0)"); | ||
| 87 | |||
| 88 | static int disable_discovery = -1; | ||
| 89 | module_param(disable_discovery, int, 0); | ||
| 90 | MODULE_PARM_DESC(disable_discovery, " disable discovery "); | ||
| 91 | |||
| 92 | |||
| 84 | /* diag_buffer_enable is bitwise | 93 | /* diag_buffer_enable is bitwise |
| 85 | * bit 0 set = TRACE | 94 | * bit 0 set = TRACE |
| 86 | * bit 1 set = SNAPSHOT | 95 | * bit 1 set = SNAPSHOT |
| @@ -93,14 +102,6 @@ module_param(diag_buffer_enable, int, 0); | |||
| 93 | MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " | 102 | MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " |
| 94 | "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); | 103 | "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); |
| 95 | 104 | ||
| 96 | static int mpt2sas_fwfault_debug; | ||
| 97 | MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " | ||
| 98 | "and halt firmware - (default=0)"); | ||
| 99 | |||
| 100 | static int disable_discovery = -1; | ||
| 101 | module_param(disable_discovery, int, 0); | ||
| 102 | MODULE_PARM_DESC(disable_discovery, " disable discovery "); | ||
| 103 | |||
| 104 | /** | 105 | /** |
| 105 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. | 106 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. |
| 106 | * | 107 | * |
| @@ -691,6 +692,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 691 | memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); | 692 | memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); |
| 692 | } | 693 | } |
| 693 | ioc->base_cmds.status &= ~MPT2_CMD_PENDING; | 694 | ioc->base_cmds.status &= ~MPT2_CMD_PENDING; |
| 695 | |||
| 694 | complete(&ioc->base_cmds.done); | 696 | complete(&ioc->base_cmds.done); |
| 695 | return 1; | 697 | return 1; |
| 696 | } | 698 | } |
| @@ -3470,6 +3472,58 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 3470 | } | 3472 | } |
| 3471 | 3473 | ||
| 3472 | /** | 3474 | /** |
| 3475 | * mpt2sas_port_enable_done - command completion routine for port enable | ||
| 3476 | * @ioc: per adapter object | ||
| 3477 | * @smid: system request message index | ||
| 3478 | * @msix_index: MSIX table index supplied by the OS | ||
| 3479 | * @reply: reply message frame(lower 32bit addr) | ||
| 3480 | * | ||
| 3481 | * Return 1 meaning mf should be freed from _base_interrupt | ||
| 3482 | * 0 means the mf is freed from this function. | ||
| 3483 | */ | ||
| 3484 | u8 | ||
| 3485 | mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | ||
| 3486 | u32 reply) | ||
| 3487 | { | ||
| 3488 | MPI2DefaultReply_t *mpi_reply; | ||
| 3489 | u16 ioc_status; | ||
| 3490 | |||
| 3491 | mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); | ||
| 3492 | if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) | ||
| 3493 | return 1; | ||
| 3494 | |||
| 3495 | if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED) | ||
| 3496 | return 1; | ||
| 3497 | |||
| 3498 | ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE; | ||
| 3499 | if (mpi_reply) { | ||
| 3500 | ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID; | ||
| 3501 | memcpy(ioc->port_enable_cmds.reply, mpi_reply, | ||
| 3502 | mpi_reply->MsgLength*4); | ||
| 3503 | } | ||
| 3504 | ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING; | ||
| 3505 | |||
| 3506 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; | ||
| 3507 | |||
| 3508 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | ||
| 3509 | ioc->port_enable_failed = 1; | ||
| 3510 | |||
| 3511 | if (ioc->is_driver_loading) { | ||
| 3512 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { | ||
| 3513 | mpt2sas_port_enable_complete(ioc); | ||
| 3514 | return 1; | ||
| 3515 | } else { | ||
| 3516 | ioc->start_scan_failed = ioc_status; | ||
| 3517 | ioc->start_scan = 0; | ||
| 3518 | return 1; | ||
| 3519 | } | ||
| 3520 | } | ||
| 3521 | complete(&ioc->port_enable_cmds.done); | ||
| 3522 | return 1; | ||
| 3523 | } | ||
| 3524 | |||
| 3525 | |||
| 3526 | /** | ||
| 3473 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware | 3527 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware |
| 3474 | * @ioc: per adapter object | 3528 | * @ioc: per adapter object |
| 3475 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | 3529 | * @sleep_flag: CAN_SLEEP or NO_SLEEP |
| @@ -3480,67 +3534,151 @@ static int | |||
| 3480 | _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | 3534 | _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) |
| 3481 | { | 3535 | { |
| 3482 | Mpi2PortEnableRequest_t *mpi_request; | 3536 | Mpi2PortEnableRequest_t *mpi_request; |
| 3483 | u32 ioc_state; | 3537 | Mpi2PortEnableReply_t *mpi_reply; |
| 3484 | unsigned long timeleft; | 3538 | unsigned long timeleft; |
| 3485 | int r = 0; | 3539 | int r = 0; |
| 3486 | u16 smid; | 3540 | u16 smid; |
| 3541 | u16 ioc_status; | ||
| 3487 | 3542 | ||
| 3488 | printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); | 3543 | printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); |
| 3489 | 3544 | ||
| 3490 | if (ioc->base_cmds.status & MPT2_CMD_PENDING) { | 3545 | if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { |
| 3491 | printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", | 3546 | printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", |
| 3492 | ioc->name, __func__); | 3547 | ioc->name, __func__); |
| 3493 | return -EAGAIN; | 3548 | return -EAGAIN; |
| 3494 | } | 3549 | } |
| 3495 | 3550 | ||
| 3496 | smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); | 3551 | smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx); |
| 3497 | if (!smid) { | 3552 | if (!smid) { |
| 3498 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", | 3553 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", |
| 3499 | ioc->name, __func__); | 3554 | ioc->name, __func__); |
| 3500 | return -EAGAIN; | 3555 | return -EAGAIN; |
| 3501 | } | 3556 | } |
| 3502 | 3557 | ||
| 3503 | ioc->base_cmds.status = MPT2_CMD_PENDING; | 3558 | ioc->port_enable_cmds.status = MPT2_CMD_PENDING; |
| 3504 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); | 3559 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); |
| 3505 | ioc->base_cmds.smid = smid; | 3560 | ioc->port_enable_cmds.smid = smid; |
| 3506 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); | 3561 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); |
| 3507 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; | 3562 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; |
| 3508 | mpi_request->VF_ID = 0; /* TODO */ | ||
| 3509 | mpi_request->VP_ID = 0; | ||
| 3510 | 3563 | ||
| 3564 | init_completion(&ioc->port_enable_cmds.done); | ||
| 3511 | mpt2sas_base_put_smid_default(ioc, smid); | 3565 | mpt2sas_base_put_smid_default(ioc, smid); |
| 3512 | init_completion(&ioc->base_cmds.done); | 3566 | timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, |
| 3513 | timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, | ||
| 3514 | 300*HZ); | 3567 | 300*HZ); |
| 3515 | if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { | 3568 | if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) { |
| 3516 | printk(MPT2SAS_ERR_FMT "%s: timeout\n", | 3569 | printk(MPT2SAS_ERR_FMT "%s: timeout\n", |
| 3517 | ioc->name, __func__); | 3570 | ioc->name, __func__); |
| 3518 | _debug_dump_mf(mpi_request, | 3571 | _debug_dump_mf(mpi_request, |
| 3519 | sizeof(Mpi2PortEnableRequest_t)/4); | 3572 | sizeof(Mpi2PortEnableRequest_t)/4); |
| 3520 | if (ioc->base_cmds.status & MPT2_CMD_RESET) | 3573 | if (ioc->port_enable_cmds.status & MPT2_CMD_RESET) |
| 3521 | r = -EFAULT; | 3574 | r = -EFAULT; |
| 3522 | else | 3575 | else |
| 3523 | r = -ETIME; | 3576 | r = -ETIME; |
| 3524 | goto out; | 3577 | goto out; |
| 3525 | } else | 3578 | } |
| 3526 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n", | 3579 | mpi_reply = ioc->port_enable_cmds.reply; |
| 3527 | ioc->name, __func__)); | ||
| 3528 | 3580 | ||
| 3529 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL, | 3581 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; |
| 3530 | 60, sleep_flag); | 3582 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
| 3531 | if (ioc_state) { | 3583 | printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n", |
| 3532 | printk(MPT2SAS_ERR_FMT "%s: failed going to operational state " | 3584 | ioc->name, __func__, ioc_status); |
| 3533 | " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); | ||
| 3534 | r = -EFAULT; | 3585 | r = -EFAULT; |
| 3586 | goto out; | ||
| 3535 | } | 3587 | } |
| 3536 | out: | 3588 | out: |
| 3537 | ioc->base_cmds.status = MPT2_CMD_NOT_USED; | 3589 | ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED; |
| 3538 | printk(MPT2SAS_INFO_FMT "port enable: %s\n", | 3590 | printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ? |
| 3539 | ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); | 3591 | "SUCCESS" : "FAILED")); |
| 3540 | return r; | 3592 | return r; |
| 3541 | } | 3593 | } |
| 3542 | 3594 | ||
| 3543 | /** | 3595 | /** |
| 3596 | * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply) | ||
| 3597 | * @ioc: per adapter object | ||
| 3598 | * | ||
| 3599 | * Returns 0 for success, non-zero for failure. | ||
| 3600 | */ | ||
| 3601 | int | ||
| 3602 | mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc) | ||
| 3603 | { | ||
| 3604 | Mpi2PortEnableRequest_t *mpi_request; | ||
| 3605 | u16 smid; | ||
| 3606 | |||
| 3607 | printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); | ||
| 3608 | |||
| 3609 | if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { | ||
| 3610 | printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", | ||
| 3611 | ioc->name, __func__); | ||
| 3612 | return -EAGAIN; | ||
| 3613 | } | ||
| 3614 | |||
| 3615 | smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx); | ||
| 3616 | if (!smid) { | ||
| 3617 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", | ||
| 3618 | ioc->name, __func__); | ||
| 3619 | return -EAGAIN; | ||
| 3620 | } | ||
| 3621 | |||
| 3622 | ioc->port_enable_cmds.status = MPT2_CMD_PENDING; | ||
| 3623 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); | ||
| 3624 | ioc->port_enable_cmds.smid = smid; | ||
| 3625 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); | ||
| 3626 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; | ||
| 3627 | |||
| 3628 | mpt2sas_base_put_smid_default(ioc, smid); | ||
| 3629 | return 0; | ||
| 3630 | } | ||
| 3631 | |||
| 3632 | /** | ||
| 3633 | * _base_determine_wait_on_discovery - desposition | ||
| 3634 | * @ioc: per adapter object | ||
| 3635 | * | ||
| 3636 | * Decide whether to wait on discovery to complete. Used to either | ||
| 3637 | * locate boot device, or report volumes ahead of physical devices. | ||
| 3638 | * | ||
| 3639 | * Returns 1 for wait, 0 for don't wait | ||
| 3640 | */ | ||
| 3641 | static int | ||
| 3642 | _base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc) | ||
| 3643 | { | ||
| 3644 | /* We wait for discovery to complete if IR firmware is loaded. | ||
| 3645 | * The sas topology events arrive before PD events, so we need time to | ||
| 3646 | * turn on the bit in ioc->pd_handles to indicate PD | ||
| 3647 | * Also, it maybe required to report Volumes ahead of physical | ||
| 3648 | * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. | ||
| 3649 | */ | ||
| 3650 | if (ioc->ir_firmware) | ||
| 3651 | return 1; | ||
| 3652 | |||
| 3653 | /* if no Bios, then we don't need to wait */ | ||
| 3654 | if (!ioc->bios_pg3.BiosVersion) | ||
| 3655 | return 0; | ||
| 3656 | |||
| 3657 | /* Bios is present, then we drop down here. | ||
| 3658 | * | ||
| 3659 | * If there any entries in the Bios Page 2, then we wait | ||
| 3660 | * for discovery to complete. | ||
| 3661 | */ | ||
| 3662 | |||
| 3663 | /* Current Boot Device */ | ||
| 3664 | if ((ioc->bios_pg2.CurrentBootDeviceForm & | ||
| 3665 | MPI2_BIOSPAGE2_FORM_MASK) == | ||
| 3666 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && | ||
| 3667 | /* Request Boot Device */ | ||
| 3668 | (ioc->bios_pg2.ReqBootDeviceForm & | ||
| 3669 | MPI2_BIOSPAGE2_FORM_MASK) == | ||
| 3670 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && | ||
| 3671 | /* Alternate Request Boot Device */ | ||
| 3672 | (ioc->bios_pg2.ReqAltBootDeviceForm & | ||
| 3673 | MPI2_BIOSPAGE2_FORM_MASK) == | ||
| 3674 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) | ||
| 3675 | return 0; | ||
| 3676 | |||
| 3677 | return 1; | ||
| 3678 | } | ||
| 3679 | |||
| 3680 | |||
| 3681 | /** | ||
| 3544 | * _base_unmask_events - turn on notification for this event | 3682 | * _base_unmask_events - turn on notification for this event |
| 3545 | * @ioc: per adapter object | 3683 | * @ioc: per adapter object |
| 3546 | * @event: firmware event | 3684 | * @event: firmware event |
| @@ -3962,6 +4100,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 3962 | skip_init_reply_post_host_index: | 4100 | skip_init_reply_post_host_index: |
| 3963 | 4101 | ||
| 3964 | _base_unmask_interrupts(ioc); | 4102 | _base_unmask_interrupts(ioc); |
| 4103 | |||
| 3965 | r = _base_event_notification(ioc, sleep_flag); | 4104 | r = _base_event_notification(ioc, sleep_flag); |
| 3966 | if (r) | 4105 | if (r) |
| 3967 | return r; | 4106 | return r; |
| @@ -3969,7 +4108,18 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 3969 | if (sleep_flag == CAN_SLEEP) | 4108 | if (sleep_flag == CAN_SLEEP) |
| 3970 | _base_static_config_pages(ioc); | 4109 | _base_static_config_pages(ioc); |
| 3971 | 4110 | ||
| 3972 | if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) { | 4111 | |
| 4112 | if (ioc->is_driver_loading) { | ||
| 4113 | |||
| 4114 | |||
| 4115 | |||
| 4116 | ioc->wait_for_discovery_to_complete = | ||
| 4117 | _base_determine_wait_on_discovery(ioc); | ||
| 4118 | return r; /* scan_start and scan_finished support */ | ||
| 4119 | } | ||
| 4120 | |||
| 4121 | |||
| 4122 | if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) { | ||
| 3973 | if (ioc->manu_pg10.OEMIdentifier == 0x80) { | 4123 | if (ioc->manu_pg10.OEMIdentifier == 0x80) { |
| 3974 | hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & | 4124 | hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & |
| 3975 | MFG_PAGE10_HIDE_SSDS_MASK); | 4125 | MFG_PAGE10_HIDE_SSDS_MASK); |
| @@ -3978,13 +4128,6 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
| 3978 | } | 4128 | } |
| 3979 | } | 4129 | } |
| 3980 | 4130 | ||
| 3981 | if (ioc->wait_for_port_enable_to_complete) { | ||
| 3982 | if (diag_buffer_enable != 0) | ||
| 3983 | mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); | ||
| 3984 | if (disable_discovery > 0) | ||
| 3985 | return r; | ||
| 3986 | } | ||
| 3987 | |||
| 3988 | r = _base_send_port_enable(ioc, sleep_flag); | 4131 | r = _base_send_port_enable(ioc, sleep_flag); |
| 3989 | if (r) | 4132 | if (r) |
| 3990 | return r; | 4133 | return r; |
| @@ -4121,6 +4264,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4121 | ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | 4264 | ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); |
| 4122 | ioc->base_cmds.status = MPT2_CMD_NOT_USED; | 4265 | ioc->base_cmds.status = MPT2_CMD_NOT_USED; |
| 4123 | 4266 | ||
| 4267 | /* port_enable command bits */ | ||
| 4268 | ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | ||
| 4269 | ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED; | ||
| 4270 | |||
| 4124 | /* transport internal command bits */ | 4271 | /* transport internal command bits */ |
| 4125 | ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | 4272 | ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); |
| 4126 | ioc->transport_cmds.status = MPT2_CMD_NOT_USED; | 4273 | ioc->transport_cmds.status = MPT2_CMD_NOT_USED; |
| @@ -4162,8 +4309,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4162 | goto out_free_resources; | 4309 | goto out_free_resources; |
| 4163 | } | 4310 | } |
| 4164 | 4311 | ||
| 4165 | init_completion(&ioc->shost_recovery_done); | ||
| 4166 | |||
| 4167 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) | 4312 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) |
| 4168 | ioc->event_masks[i] = -1; | 4313 | ioc->event_masks[i] = -1; |
| 4169 | 4314 | ||
| @@ -4186,7 +4331,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4186 | _base_update_missing_delay(ioc, missing_delay[0], | 4331 | _base_update_missing_delay(ioc, missing_delay[0], |
| 4187 | missing_delay[1]); | 4332 | missing_delay[1]); |
| 4188 | 4333 | ||
| 4189 | mpt2sas_base_start_watchdog(ioc); | ||
| 4190 | return 0; | 4334 | return 0; |
| 4191 | 4335 | ||
| 4192 | out_free_resources: | 4336 | out_free_resources: |
| @@ -4204,6 +4348,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4204 | kfree(ioc->scsih_cmds.reply); | 4348 | kfree(ioc->scsih_cmds.reply); |
| 4205 | kfree(ioc->config_cmds.reply); | 4349 | kfree(ioc->config_cmds.reply); |
| 4206 | kfree(ioc->base_cmds.reply); | 4350 | kfree(ioc->base_cmds.reply); |
| 4351 | kfree(ioc->port_enable_cmds.reply); | ||
| 4207 | kfree(ioc->ctl_cmds.reply); | 4352 | kfree(ioc->ctl_cmds.reply); |
| 4208 | kfree(ioc->ctl_cmds.sense); | 4353 | kfree(ioc->ctl_cmds.sense); |
| 4209 | kfree(ioc->pfacts); | 4354 | kfree(ioc->pfacts); |
| @@ -4243,6 +4388,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4243 | kfree(ioc->ctl_cmds.reply); | 4388 | kfree(ioc->ctl_cmds.reply); |
| 4244 | kfree(ioc->ctl_cmds.sense); | 4389 | kfree(ioc->ctl_cmds.sense); |
| 4245 | kfree(ioc->base_cmds.reply); | 4390 | kfree(ioc->base_cmds.reply); |
| 4391 | kfree(ioc->port_enable_cmds.reply); | ||
| 4246 | kfree(ioc->tm_cmds.reply); | 4392 | kfree(ioc->tm_cmds.reply); |
| 4247 | kfree(ioc->transport_cmds.reply); | 4393 | kfree(ioc->transport_cmds.reply); |
| 4248 | kfree(ioc->scsih_cmds.reply); | 4394 | kfree(ioc->scsih_cmds.reply); |
| @@ -4284,6 +4430,20 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) | |||
| 4284 | mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid); | 4430 | mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid); |
| 4285 | complete(&ioc->base_cmds.done); | 4431 | complete(&ioc->base_cmds.done); |
| 4286 | } | 4432 | } |
| 4433 | if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { | ||
| 4434 | ioc->port_enable_failed = 1; | ||
| 4435 | ioc->port_enable_cmds.status |= MPT2_CMD_RESET; | ||
| 4436 | mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); | ||
| 4437 | if (ioc->is_driver_loading) { | ||
| 4438 | ioc->start_scan_failed = | ||
| 4439 | MPI2_IOCSTATUS_INTERNAL_ERROR; | ||
| 4440 | ioc->start_scan = 0; | ||
| 4441 | ioc->port_enable_cmds.status = | ||
| 4442 | MPT2_CMD_NOT_USED; | ||
| 4443 | } else | ||
| 4444 | complete(&ioc->port_enable_cmds.done); | ||
| 4445 | |||
| 4446 | } | ||
| 4287 | if (ioc->config_cmds.status & MPT2_CMD_PENDING) { | 4447 | if (ioc->config_cmds.status & MPT2_CMD_PENDING) { |
| 4288 | ioc->config_cmds.status |= MPT2_CMD_RESET; | 4448 | ioc->config_cmds.status |= MPT2_CMD_RESET; |
| 4289 | mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); | 4449 | mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); |
| @@ -4349,7 +4509,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, | |||
| 4349 | { | 4509 | { |
| 4350 | int r; | 4510 | int r; |
| 4351 | unsigned long flags; | 4511 | unsigned long flags; |
| 4352 | u8 pe_complete = ioc->wait_for_port_enable_to_complete; | ||
| 4353 | 4512 | ||
| 4354 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, | 4513 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, |
| 4355 | __func__)); | 4514 | __func__)); |
| @@ -4396,7 +4555,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, | |||
| 4396 | /* If this hard reset is called while port enable is active, then | 4555 | /* If this hard reset is called while port enable is active, then |
| 4397 | * there is no reason to call make_ioc_operational | 4556 | * there is no reason to call make_ioc_operational |
| 4398 | */ | 4557 | */ |
| 4399 | if (pe_complete) { | 4558 | if (ioc->is_driver_loading && ioc->port_enable_failed) { |
| 4559 | ioc->remove_host = 1; | ||
| 4400 | r = -EFAULT; | 4560 | r = -EFAULT; |
| 4401 | goto out; | 4561 | goto out; |
| 4402 | } | 4562 | } |
| @@ -4410,7 +4570,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, | |||
| 4410 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 4570 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
| 4411 | ioc->ioc_reset_in_progress_status = r; | 4571 | ioc->ioc_reset_in_progress_status = r; |
| 4412 | ioc->shost_recovery = 0; | 4572 | ioc->shost_recovery = 0; |
| 4413 | complete(&ioc->shost_recovery_done); | ||
| 4414 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 4573 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
| 4415 | mutex_unlock(&ioc->reset_in_progress_mutex); | 4574 | mutex_unlock(&ioc->reset_in_progress_mutex); |
| 4416 | 4575 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 59354dba68c0..3c3babc7d260 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
| @@ -69,11 +69,11 @@ | |||
| 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
| 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
| 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
| 72 | #define MPT2SAS_DRIVER_VERSION "09.100.00.01" | 72 | #define MPT2SAS_DRIVER_VERSION "10.100.00.00" |
| 73 | #define MPT2SAS_MAJOR_VERSION 09 | 73 | #define MPT2SAS_MAJOR_VERSION 10 |
| 74 | #define MPT2SAS_MINOR_VERSION 100 | 74 | #define MPT2SAS_MINOR_VERSION 100 |
| 75 | #define MPT2SAS_BUILD_VERSION 00 | 75 | #define MPT2SAS_BUILD_VERSION 00 |
| 76 | #define MPT2SAS_RELEASE_VERSION 01 | 76 | #define MPT2SAS_RELEASE_VERSION 00 |
| 77 | 77 | ||
| 78 | /* | 78 | /* |
| 79 | * Set MPT2SAS_SG_DEPTH value based on user input. | 79 | * Set MPT2SAS_SG_DEPTH value based on user input. |
| @@ -655,7 +655,12 @@ enum mutex_type { | |||
| 655 | * @ignore_loginfos: ignore loginfos during task management | 655 | * @ignore_loginfos: ignore loginfos during task management |
| 656 | * @remove_host: flag for when driver unloads, to avoid sending dev resets | 656 | * @remove_host: flag for when driver unloads, to avoid sending dev resets |
| 657 | * @pci_error_recovery: flag to prevent ioc access until slot reset completes | 657 | * @pci_error_recovery: flag to prevent ioc access until slot reset completes |
| 658 | * @wait_for_port_enable_to_complete: | 658 | * @wait_for_discovery_to_complete: flag set at driver load time when |
| 659 | * waiting on reporting devices | ||
| 660 | * @is_driver_loading: flag set at driver load time | ||
| 661 | * @port_enable_failed: flag set when port enable has failed | ||
| 662 | * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work | ||
| 663 | * @start_scan_failed: means port enable failed, return's the ioc_status | ||
| 659 | * @msix_enable: flag indicating msix is enabled | 664 | * @msix_enable: flag indicating msix is enabled |
| 660 | * @msix_vector_count: number msix vectors | 665 | * @msix_vector_count: number msix vectors |
| 661 | * @cpu_msix_table: table for mapping cpus to msix index | 666 | * @cpu_msix_table: table for mapping cpus to msix index |
| @@ -790,15 +795,20 @@ struct MPT2SAS_ADAPTER { | |||
| 790 | u8 shost_recovery; | 795 | u8 shost_recovery; |
| 791 | 796 | ||
| 792 | struct mutex reset_in_progress_mutex; | 797 | struct mutex reset_in_progress_mutex; |
| 793 | struct completion shost_recovery_done; | ||
| 794 | spinlock_t ioc_reset_in_progress_lock; | 798 | spinlock_t ioc_reset_in_progress_lock; |
| 795 | u8 ioc_link_reset_in_progress; | 799 | u8 ioc_link_reset_in_progress; |
| 796 | int ioc_reset_in_progress_status; | 800 | u8 ioc_reset_in_progress_status; |
| 797 | 801 | ||
| 798 | u8 ignore_loginfos; | 802 | u8 ignore_loginfos; |
| 799 | u8 remove_host; | 803 | u8 remove_host; |
| 800 | u8 pci_error_recovery; | 804 | u8 pci_error_recovery; |
| 801 | u8 wait_for_port_enable_to_complete; | 805 | u8 wait_for_discovery_to_complete; |
| 806 | struct completion port_enable_done; | ||
| 807 | u8 is_driver_loading; | ||
| 808 | u8 port_enable_failed; | ||
| 809 | |||
| 810 | u8 start_scan; | ||
| 811 | u16 start_scan_failed; | ||
| 802 | 812 | ||
| 803 | u8 msix_enable; | 813 | u8 msix_enable; |
| 804 | u16 msix_vector_count; | 814 | u16 msix_vector_count; |
| @@ -814,11 +824,13 @@ struct MPT2SAS_ADAPTER { | |||
| 814 | u8 scsih_cb_idx; | 824 | u8 scsih_cb_idx; |
| 815 | u8 ctl_cb_idx; | 825 | u8 ctl_cb_idx; |
| 816 | u8 base_cb_idx; | 826 | u8 base_cb_idx; |
| 827 | u8 port_enable_cb_idx; | ||
| 817 | u8 config_cb_idx; | 828 | u8 config_cb_idx; |
| 818 | u8 tm_tr_cb_idx; | 829 | u8 tm_tr_cb_idx; |
| 819 | u8 tm_tr_volume_cb_idx; | 830 | u8 tm_tr_volume_cb_idx; |
| 820 | u8 tm_sas_control_cb_idx; | 831 | u8 tm_sas_control_cb_idx; |
| 821 | struct _internal_cmd base_cmds; | 832 | struct _internal_cmd base_cmds; |
| 833 | struct _internal_cmd port_enable_cmds; | ||
| 822 | struct _internal_cmd transport_cmds; | 834 | struct _internal_cmd transport_cmds; |
| 823 | struct _internal_cmd scsih_cmds; | 835 | struct _internal_cmd scsih_cmds; |
| 824 | struct _internal_cmd tm_cmds; | 836 | struct _internal_cmd tm_cmds; |
| @@ -1001,6 +1013,8 @@ void mpt2sas_base_release_callback_handler(u8 cb_idx); | |||
| 1001 | 1013 | ||
| 1002 | u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 1014 | u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
| 1003 | u32 reply); | 1015 | u32 reply); |
| 1016 | u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, | ||
| 1017 | u8 msix_index, u32 reply); | ||
| 1004 | void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr); | 1018 | void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr); |
| 1005 | 1019 | ||
| 1006 | u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked); | 1020 | u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked); |
| @@ -1015,6 +1029,8 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty | |||
| 1015 | 1029 | ||
| 1016 | void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); | 1030 | void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); |
| 1017 | 1031 | ||
| 1032 | int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc); | ||
| 1033 | |||
| 1018 | /* scsih shared API */ | 1034 | /* scsih shared API */ |
| 1019 | u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | 1035 | u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, |
| 1020 | u32 reply); | 1036 | u32 reply); |
| @@ -1032,6 +1048,8 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAP | |||
| 1032 | struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( | 1048 | struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( |
| 1033 | struct MPT2SAS_ADAPTER *ioc, u64 sas_address); | 1049 | struct MPT2SAS_ADAPTER *ioc, u64 sas_address); |
| 1034 | 1050 | ||
| 1051 | void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc); | ||
| 1052 | |||
| 1035 | void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); | 1053 | void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); |
| 1036 | 1054 | ||
| 1037 | /* config shared API */ | 1055 | /* config shared API */ |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 2b1101076cfe..36ea0b2d8020 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
| @@ -1356,6 +1356,9 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, | |||
| 1356 | Mpi2ConfigReply_t mpi_reply; | 1356 | Mpi2ConfigReply_t mpi_reply; |
| 1357 | int r, i, config_page_sz; | 1357 | int r, i, config_page_sz; |
| 1358 | u16 ioc_status; | 1358 | u16 ioc_status; |
| 1359 | int config_num; | ||
| 1360 | u16 element_type; | ||
| 1361 | u16 phys_disk_dev_handle; | ||
| 1359 | 1362 | ||
| 1360 | *volume_handle = 0; | 1363 | *volume_handle = 0; |
| 1361 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1364 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
| @@ -1371,35 +1374,53 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, | |||
| 1371 | if (r) | 1374 | if (r) |
| 1372 | goto out; | 1375 | goto out; |
| 1373 | 1376 | ||
| 1374 | mpi_request.PageAddress = | ||
| 1375 | cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG); | ||
| 1376 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; | 1377 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; |
| 1377 | config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); | 1378 | config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); |
| 1378 | config_page = kmalloc(config_page_sz, GFP_KERNEL); | 1379 | config_page = kmalloc(config_page_sz, GFP_KERNEL); |
| 1379 | if (!config_page) | 1380 | if (!config_page) { |
| 1380 | goto out; | 1381 | r = -1; |
| 1381 | r = _config_request(ioc, &mpi_request, &mpi_reply, | ||
| 1382 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, | ||
| 1383 | config_page_sz); | ||
| 1384 | if (r) | ||
| 1385 | goto out; | 1382 | goto out; |
| 1386 | 1383 | } | |
| 1387 | r = -1; | 1384 | config_num = 0xff; |
| 1388 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; | 1385 | while (1) { |
| 1389 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | 1386 | mpi_request.PageAddress = cpu_to_le32(config_num + |
| 1390 | goto out; | 1387 | MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); |
| 1391 | for (i = 0; i < config_page->NumElements; i++) { | 1388 | r = _config_request(ioc, &mpi_request, &mpi_reply, |
| 1392 | if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) & | 1389 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, |
| 1393 | MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) != | 1390 | config_page_sz); |
| 1394 | MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT) | 1391 | if (r) |
| 1395 | continue; | 1392 | goto out; |
| 1396 | if (le16_to_cpu(config_page->ConfigElement[i]. | 1393 | r = -1; |
| 1397 | PhysDiskDevHandle) == pd_handle) { | 1394 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
| 1398 | *volume_handle = le16_to_cpu(config_page-> | 1395 | MPI2_IOCSTATUS_MASK; |
| 1399 | ConfigElement[i].VolDevHandle); | 1396 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
| 1400 | r = 0; | ||
| 1401 | goto out; | 1397 | goto out; |
| 1398 | for (i = 0; i < config_page->NumElements; i++) { | ||
| 1399 | element_type = le16_to_cpu(config_page-> | ||
| 1400 | ConfigElement[i].ElementFlags) & | ||
| 1401 | MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; | ||
| 1402 | if (element_type == | ||
| 1403 | MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || | ||
| 1404 | element_type == | ||
| 1405 | MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { | ||
| 1406 | phys_disk_dev_handle = | ||
| 1407 | le16_to_cpu(config_page->ConfigElement[i]. | ||
| 1408 | PhysDiskDevHandle); | ||
| 1409 | if (phys_disk_dev_handle == pd_handle) { | ||
| 1410 | *volume_handle = | ||
| 1411 | le16_to_cpu(config_page-> | ||
| 1412 | ConfigElement[i].VolDevHandle); | ||
| 1413 | r = 0; | ||
| 1414 | goto out; | ||
| 1415 | } | ||
| 1416 | } else if (element_type == | ||
| 1417 | MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { | ||
| 1418 | *volume_handle = 0; | ||
| 1419 | r = 0; | ||
| 1420 | goto out; | ||
| 1421 | } | ||
| 1402 | } | 1422 | } |
| 1423 | config_num = config_page->ConfigNum; | ||
| 1403 | } | 1424 | } |
| 1404 | out: | 1425 | out: |
| 1405 | kfree(config_page); | 1426 | kfree(config_page); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 9adb0133d6fb..aabcb911706e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
| @@ -1207,6 +1207,9 @@ _ctl_do_reset(void __user *arg) | |||
| 1207 | if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) | 1207 | if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) |
| 1208 | return -ENODEV; | 1208 | return -ENODEV; |
| 1209 | 1209 | ||
| 1210 | if (ioc->shost_recovery || ioc->pci_error_recovery || | ||
| 1211 | ioc->is_driver_loading) | ||
| 1212 | return -EAGAIN; | ||
| 1210 | dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, | 1213 | dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, |
| 1211 | __func__)); | 1214 | __func__)); |
| 1212 | 1215 | ||
| @@ -2178,7 +2181,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) | |||
| 2178 | !ioc) | 2181 | !ioc) |
| 2179 | return -ENODEV; | 2182 | return -ENODEV; |
| 2180 | 2183 | ||
| 2181 | if (ioc->shost_recovery || ioc->pci_error_recovery) | 2184 | if (ioc->shost_recovery || ioc->pci_error_recovery || |
| 2185 | ioc->is_driver_loading) | ||
| 2182 | return -EAGAIN; | 2186 | return -EAGAIN; |
| 2183 | 2187 | ||
| 2184 | if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { | 2188 | if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { |
| @@ -2297,7 +2301,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) | |||
| 2297 | if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) | 2301 | if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) |
| 2298 | return -ENODEV; | 2302 | return -ENODEV; |
| 2299 | 2303 | ||
| 2300 | if (ioc->shost_recovery || ioc->pci_error_recovery) | 2304 | if (ioc->shost_recovery || ioc->pci_error_recovery || |
| 2305 | ioc->is_driver_loading) | ||
| 2301 | return -EAGAIN; | 2306 | return -EAGAIN; |
| 2302 | 2307 | ||
| 2303 | memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); | 2308 | memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 1da1aa1a11e2..8889b1babcac 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
| @@ -71,6 +71,9 @@ static void _firmware_event_work(struct work_struct *work); | |||
| 71 | 71 | ||
| 72 | static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid); | 72 | static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid); |
| 73 | 73 | ||
| 74 | static void _scsih_scan_start(struct Scsi_Host *shost); | ||
| 75 | static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time); | ||
| 76 | |||
| 74 | /* global parameters */ | 77 | /* global parameters */ |
| 75 | LIST_HEAD(mpt2sas_ioc_list); | 78 | LIST_HEAD(mpt2sas_ioc_list); |
| 76 | 79 | ||
| @@ -79,6 +82,7 @@ static u8 scsi_io_cb_idx = -1; | |||
| 79 | static u8 tm_cb_idx = -1; | 82 | static u8 tm_cb_idx = -1; |
| 80 | static u8 ctl_cb_idx = -1; | 83 | static u8 ctl_cb_idx = -1; |
| 81 | static u8 base_cb_idx = -1; | 84 | static u8 base_cb_idx = -1; |
| 85 | static u8 port_enable_cb_idx = -1; | ||
| 82 | static u8 transport_cb_idx = -1; | 86 | static u8 transport_cb_idx = -1; |
| 83 | static u8 scsih_cb_idx = -1; | 87 | static u8 scsih_cb_idx = -1; |
| 84 | static u8 config_cb_idx = -1; | 88 | static u8 config_cb_idx = -1; |
| @@ -103,6 +107,18 @@ static int max_lun = MPT2SAS_MAX_LUN; | |||
| 103 | module_param(max_lun, int, 0); | 107 | module_param(max_lun, int, 0); |
| 104 | MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); | 108 | MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); |
| 105 | 109 | ||
| 110 | /* diag_buffer_enable is bitwise | ||
| 111 | * bit 0 set = TRACE | ||
| 112 | * bit 1 set = SNAPSHOT | ||
| 113 | * bit 2 set = EXTENDED | ||
| 114 | * | ||
| 115 | * Either bit can be set, or both | ||
| 116 | */ | ||
| 117 | static int diag_buffer_enable = -1; | ||
| 118 | module_param(diag_buffer_enable, int, 0); | ||
| 119 | MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " | ||
| 120 | "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); | ||
| 121 | |||
| 106 | /** | 122 | /** |
| 107 | * struct sense_info - common structure for obtaining sense keys | 123 | * struct sense_info - common structure for obtaining sense keys |
| 108 | * @skey: sense key | 124 | * @skey: sense key |
| @@ -117,8 +133,8 @@ struct sense_info { | |||
| 117 | 133 | ||
| 118 | 134 | ||
| 119 | #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) | 135 | #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) |
| 120 | #define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) | 136 | #define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD) |
| 121 | 137 | #define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) | |
| 122 | /** | 138 | /** |
| 123 | * struct fw_event_work - firmware event struct | 139 | * struct fw_event_work - firmware event struct |
| 124 | * @list: link list framework | 140 | * @list: link list framework |
| @@ -372,31 +388,34 @@ _scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
| 372 | Mpi2SasDevicePage0_t sas_device_pg0; | 388 | Mpi2SasDevicePage0_t sas_device_pg0; |
| 373 | Mpi2ConfigReply_t mpi_reply; | 389 | Mpi2ConfigReply_t mpi_reply; |
| 374 | u32 ioc_status; | 390 | u32 ioc_status; |
| 391 | *sas_address = 0; | ||
| 375 | 392 | ||
| 376 | if (handle <= ioc->sas_hba.num_phys) { | 393 | if (handle <= ioc->sas_hba.num_phys) { |
| 377 | *sas_address = ioc->sas_hba.sas_address; | 394 | *sas_address = ioc->sas_hba.sas_address; |
| 378 | return 0; | 395 | return 0; |
| 379 | } else | 396 | } |
| 380 | *sas_address = 0; | ||
| 381 | 397 | ||
| 382 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, | 398 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, |
| 383 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { | 399 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
| 384 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 400 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, |
| 385 | ioc->name, __FILE__, __LINE__, __func__); | 401 | __FILE__, __LINE__, __func__); |
| 386 | return -ENXIO; | 402 | return -ENXIO; |
| 387 | } | 403 | } |
| 388 | 404 | ||
| 389 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | 405 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; |
| 390 | MPI2_IOCSTATUS_MASK; | 406 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { |
| 391 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | 407 | *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); |
| 392 | printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" | 408 | return 0; |
| 393 | "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, | ||
| 394 | __FILE__, __LINE__, __func__); | ||
| 395 | return -EIO; | ||
| 396 | } | 409 | } |
| 397 | 410 | ||
| 398 | *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); | 411 | /* we hit this becuase the given parent handle doesn't exist */ |
| 399 | return 0; | 412 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) |
| 413 | return -ENXIO; | ||
| 414 | /* else error case */ | ||
| 415 | printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), " | ||
| 416 | "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, | ||
| 417 | __FILE__, __LINE__, __func__); | ||
| 418 | return -EIO; | ||
| 400 | } | 419 | } |
| 401 | 420 | ||
| 402 | /** | 421 | /** |
| @@ -424,7 +443,11 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc, | |||
| 424 | u16 slot; | 443 | u16 slot; |
| 425 | 444 | ||
| 426 | /* only process this function when driver loads */ | 445 | /* only process this function when driver loads */ |
| 427 | if (!ioc->wait_for_port_enable_to_complete) | 446 | if (!ioc->is_driver_loading) |
| 447 | return; | ||
| 448 | |||
| 449 | /* no Bios, return immediately */ | ||
| 450 | if (!ioc->bios_pg3.BiosVersion) | ||
| 428 | return; | 451 | return; |
| 429 | 452 | ||
| 430 | if (!is_raid) { | 453 | if (!is_raid) { |
| @@ -587,8 +610,15 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 587 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 610 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 588 | 611 | ||
| 589 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, | 612 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, |
| 590 | sas_device->sas_address_parent)) | 613 | sas_device->sas_address_parent)) { |
| 591 | _scsih_sas_device_remove(ioc, sas_device); | 614 | _scsih_sas_device_remove(ioc, sas_device); |
| 615 | } else if (!sas_device->starget) { | ||
| 616 | if (!ioc->is_driver_loading) | ||
| 617 | mpt2sas_transport_port_remove(ioc, | ||
| 618 | sas_device->sas_address, | ||
| 619 | sas_device->sas_address_parent); | ||
| 620 | _scsih_sas_device_remove(ioc, sas_device); | ||
| 621 | } | ||
| 592 | } | 622 | } |
| 593 | 623 | ||
| 594 | /** | 624 | /** |
| @@ -1400,6 +1430,10 @@ _scsih_slave_destroy(struct scsi_device *sdev) | |||
| 1400 | { | 1430 | { |
| 1401 | struct MPT2SAS_TARGET *sas_target_priv_data; | 1431 | struct MPT2SAS_TARGET *sas_target_priv_data; |
| 1402 | struct scsi_target *starget; | 1432 | struct scsi_target *starget; |
| 1433 | struct Scsi_Host *shost; | ||
| 1434 | struct MPT2SAS_ADAPTER *ioc; | ||
| 1435 | struct _sas_device *sas_device; | ||
| 1436 | unsigned long flags; | ||
| 1403 | 1437 | ||
| 1404 | if (!sdev->hostdata) | 1438 | if (!sdev->hostdata) |
| 1405 | return; | 1439 | return; |
| @@ -1407,6 +1441,19 @@ _scsih_slave_destroy(struct scsi_device *sdev) | |||
| 1407 | starget = scsi_target(sdev); | 1441 | starget = scsi_target(sdev); |
| 1408 | sas_target_priv_data = starget->hostdata; | 1442 | sas_target_priv_data = starget->hostdata; |
| 1409 | sas_target_priv_data->num_luns--; | 1443 | sas_target_priv_data->num_luns--; |
| 1444 | |||
| 1445 | shost = dev_to_shost(&starget->dev); | ||
| 1446 | ioc = shost_priv(shost); | ||
| 1447 | |||
| 1448 | if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { | ||
| 1449 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 1450 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
| 1451 | sas_target_priv_data->sas_address); | ||
| 1452 | if (sas_device) | ||
| 1453 | sas_device->starget = NULL; | ||
| 1454 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 1455 | } | ||
| 1456 | |||
| 1410 | kfree(sdev->hostdata); | 1457 | kfree(sdev->hostdata); |
| 1411 | sdev->hostdata = NULL; | 1458 | sdev->hostdata = NULL; |
| 1412 | } | 1459 | } |
| @@ -1598,8 +1645,10 @@ _scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device) | |||
| 1598 | * _scsih_get_volume_capabilities - volume capabilities | 1645 | * _scsih_get_volume_capabilities - volume capabilities |
| 1599 | * @ioc: per adapter object | 1646 | * @ioc: per adapter object |
| 1600 | * @sas_device: the raid_device object | 1647 | * @sas_device: the raid_device object |
| 1648 | * | ||
| 1649 | * Returns 0 for success, else 1 | ||
| 1601 | */ | 1650 | */ |
| 1602 | static void | 1651 | static int |
| 1603 | _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, | 1652 | _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, |
| 1604 | struct _raid_device *raid_device) | 1653 | struct _raid_device *raid_device) |
| 1605 | { | 1654 | { |
| @@ -1612,9 +1661,10 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, | |||
| 1612 | 1661 | ||
| 1613 | if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle, | 1662 | if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle, |
| 1614 | &num_pds)) || !num_pds) { | 1663 | &num_pds)) || !num_pds) { |
| 1615 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 1664 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT |
| 1616 | ioc->name, __FILE__, __LINE__, __func__); | 1665 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, |
| 1617 | return; | 1666 | __func__)); |
| 1667 | return 1; | ||
| 1618 | } | 1668 | } |
| 1619 | 1669 | ||
| 1620 | raid_device->num_pds = num_pds; | 1670 | raid_device->num_pds = num_pds; |
| @@ -1622,17 +1672,19 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, | |||
| 1622 | sizeof(Mpi2RaidVol0PhysDisk_t)); | 1672 | sizeof(Mpi2RaidVol0PhysDisk_t)); |
| 1623 | vol_pg0 = kzalloc(sz, GFP_KERNEL); | 1673 | vol_pg0 = kzalloc(sz, GFP_KERNEL); |
| 1624 | if (!vol_pg0) { | 1674 | if (!vol_pg0) { |
| 1625 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 1675 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT |
| 1626 | ioc->name, __FILE__, __LINE__, __func__); | 1676 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, |
| 1627 | return; | 1677 | __func__)); |
| 1678 | return 1; | ||
| 1628 | } | 1679 | } |
| 1629 | 1680 | ||
| 1630 | if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, | 1681 | if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, |
| 1631 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { | 1682 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { |
| 1632 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 1683 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT |
| 1633 | ioc->name, __FILE__, __LINE__, __func__); | 1684 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, |
| 1685 | __func__)); | ||
| 1634 | kfree(vol_pg0); | 1686 | kfree(vol_pg0); |
| 1635 | return; | 1687 | return 1; |
| 1636 | } | 1688 | } |
| 1637 | 1689 | ||
| 1638 | raid_device->volume_type = vol_pg0->VolumeType; | 1690 | raid_device->volume_type = vol_pg0->VolumeType; |
| @@ -1652,6 +1704,7 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, | |||
| 1652 | } | 1704 | } |
| 1653 | 1705 | ||
| 1654 | kfree(vol_pg0); | 1706 | kfree(vol_pg0); |
| 1707 | return 0; | ||
| 1655 | } | 1708 | } |
| 1656 | /** | 1709 | /** |
| 1657 | * _scsih_disable_ddio - Disable direct I/O for all the volumes | 1710 | * _scsih_disable_ddio - Disable direct I/O for all the volumes |
| @@ -1922,13 +1975,20 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
| 1922 | sas_target_priv_data->handle); | 1975 | sas_target_priv_data->handle); |
| 1923 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | 1976 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
| 1924 | if (!raid_device) { | 1977 | if (!raid_device) { |
| 1925 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 1978 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT |
| 1926 | ioc->name, __FILE__, __LINE__, __func__); | 1979 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, |
| 1927 | return 0; | 1980 | __LINE__, __func__)); |
| 1981 | return 1; | ||
| 1928 | } | 1982 | } |
| 1929 | 1983 | ||
| 1930 | _scsih_get_volume_capabilities(ioc, raid_device); | 1984 | _scsih_get_volume_capabilities(ioc, raid_device); |
| 1931 | 1985 | ||
| 1986 | if (_scsih_get_volume_capabilities(ioc, raid_device)) { | ||
| 1987 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT | ||
| 1988 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, | ||
| 1989 | __LINE__, __func__)); | ||
| 1990 | return 1; | ||
| 1991 | } | ||
| 1932 | /* | 1992 | /* |
| 1933 | * WARPDRIVE: Initialize the required data for Direct IO | 1993 | * WARPDRIVE: Initialize the required data for Direct IO |
| 1934 | */ | 1994 | */ |
| @@ -2002,11 +2062,22 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
| 2002 | if (sas_device) { | 2062 | if (sas_device) { |
| 2003 | if (sas_target_priv_data->flags & | 2063 | if (sas_target_priv_data->flags & |
| 2004 | MPT_TARGET_FLAGS_RAID_COMPONENT) { | 2064 | MPT_TARGET_FLAGS_RAID_COMPONENT) { |
| 2005 | mpt2sas_config_get_volume_handle(ioc, | 2065 | if (mpt2sas_config_get_volume_handle(ioc, |
| 2006 | sas_device->handle, &sas_device->volume_handle); | 2066 | sas_device->handle, &sas_device->volume_handle)) { |
| 2007 | mpt2sas_config_get_volume_wwid(ioc, | 2067 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT |
| 2068 | "failure at %s:%d/%s()!\n", ioc->name, | ||
| 2069 | __FILE__, __LINE__, __func__)); | ||
| 2070 | return 1; | ||
| 2071 | } | ||
| 2072 | if (sas_device->volume_handle && | ||
| 2073 | mpt2sas_config_get_volume_wwid(ioc, | ||
| 2008 | sas_device->volume_handle, | 2074 | sas_device->volume_handle, |
| 2009 | &sas_device->volume_wwid); | 2075 | &sas_device->volume_wwid)) { |
| 2076 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT | ||
| 2077 | "failure at %s:%d/%s()!\n", ioc->name, | ||
| 2078 | __FILE__, __LINE__, __func__)); | ||
| 2079 | return 1; | ||
| 2080 | } | ||
| 2010 | } | 2081 | } |
| 2011 | if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { | 2082 | if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { |
| 2012 | qdepth = MPT2SAS_SAS_QUEUE_DEPTH; | 2083 | qdepth = MPT2SAS_SAS_QUEUE_DEPTH; |
| @@ -2035,6 +2106,11 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
| 2035 | 2106 | ||
| 2036 | if (!ssp_target) | 2107 | if (!ssp_target) |
| 2037 | _scsih_display_sata_capabilities(ioc, sas_device, sdev); | 2108 | _scsih_display_sata_capabilities(ioc, sas_device, sdev); |
| 2109 | } else { | ||
| 2110 | dfailprintk(ioc, printk(MPT2SAS_WARN_FMT | ||
| 2111 | "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, | ||
| 2112 | __func__)); | ||
| 2113 | return 1; | ||
| 2038 | } | 2114 | } |
| 2039 | 2115 | ||
| 2040 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); | 2116 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); |
| @@ -2714,22 +2790,38 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work | |||
| 2714 | 2790 | ||
| 2715 | 2791 | ||
| 2716 | /** | 2792 | /** |
| 2717 | * _scsih_queue_rescan - queue a topology rescan from user context | 2793 | * _scsih_error_recovery_delete_devices - remove devices not responding |
| 2718 | * @ioc: per adapter object | 2794 | * @ioc: per adapter object |
| 2719 | * | 2795 | * |
| 2720 | * Return nothing. | 2796 | * Return nothing. |
| 2721 | */ | 2797 | */ |
| 2722 | static void | 2798 | static void |
| 2723 | _scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc) | 2799 | _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc) |
| 2724 | { | 2800 | { |
| 2725 | struct fw_event_work *fw_event; | 2801 | struct fw_event_work *fw_event; |
| 2726 | 2802 | ||
| 2727 | if (ioc->wait_for_port_enable_to_complete) | 2803 | if (ioc->is_driver_loading) |
| 2728 | return; | 2804 | return; |
| 2805 | fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; | ||
| 2806 | fw_event->ioc = ioc; | ||
| 2807 | _scsih_fw_event_add(ioc, fw_event); | ||
| 2808 | } | ||
| 2809 | |||
| 2810 | /** | ||
| 2811 | * mpt2sas_port_enable_complete - port enable completed (fake event) | ||
| 2812 | * @ioc: per adapter object | ||
| 2813 | * | ||
| 2814 | * Return nothing. | ||
| 2815 | */ | ||
| 2816 | void | ||
| 2817 | mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc) | ||
| 2818 | { | ||
| 2819 | struct fw_event_work *fw_event; | ||
| 2820 | |||
| 2729 | fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); | 2821 | fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); |
| 2730 | if (!fw_event) | 2822 | if (!fw_event) |
| 2731 | return; | 2823 | return; |
| 2732 | fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET; | 2824 | fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE; |
| 2733 | fw_event->ioc = ioc; | 2825 | fw_event->ioc = ioc; |
| 2734 | _scsih_fw_event_add(ioc, fw_event); | 2826 | _scsih_fw_event_add(ioc, fw_event); |
| 2735 | } | 2827 | } |
| @@ -2977,14 +3069,27 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2977 | Mpi2SCSITaskManagementRequest_t *mpi_request; | 3069 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
| 2978 | u16 smid; | 3070 | u16 smid; |
| 2979 | struct _sas_device *sas_device; | 3071 | struct _sas_device *sas_device; |
| 2980 | struct MPT2SAS_TARGET *sas_target_priv_data; | 3072 | struct MPT2SAS_TARGET *sas_target_priv_data = NULL; |
| 3073 | u64 sas_address = 0; | ||
| 2981 | unsigned long flags; | 3074 | unsigned long flags; |
| 2982 | struct _tr_list *delayed_tr; | 3075 | struct _tr_list *delayed_tr; |
| 3076 | u32 ioc_state; | ||
| 2983 | 3077 | ||
| 2984 | if (ioc->shost_recovery || ioc->remove_host || | 3078 | if (ioc->remove_host) { |
| 2985 | ioc->pci_error_recovery) { | 3079 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been " |
| 2986 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " | 3080 | "removed: handle(0x%04x)\n", __func__, ioc->name, handle)); |
| 2987 | "progress!\n", __func__, ioc->name)); | 3081 | return; |
| 3082 | } else if (ioc->pci_error_recovery) { | ||
| 3083 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci " | ||
| 3084 | "error recovery: handle(0x%04x)\n", __func__, ioc->name, | ||
| 3085 | handle)); | ||
| 3086 | return; | ||
| 3087 | } | ||
| 3088 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); | ||
| 3089 | if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { | ||
| 3090 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not " | ||
| 3091 | "operational: handle(0x%04x)\n", __func__, ioc->name, | ||
| 3092 | handle)); | ||
| 2988 | return; | 3093 | return; |
| 2989 | } | 3094 | } |
| 2990 | 3095 | ||
| @@ -2998,13 +3103,18 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2998 | sas_device->starget->hostdata) { | 3103 | sas_device->starget->hostdata) { |
| 2999 | sas_target_priv_data = sas_device->starget->hostdata; | 3104 | sas_target_priv_data = sas_device->starget->hostdata; |
| 3000 | sas_target_priv_data->deleted = 1; | 3105 | sas_target_priv_data->deleted = 1; |
| 3001 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | 3106 | sas_address = sas_device->sas_address; |
| 3002 | "setting delete flag: handle(0x%04x), " | ||
| 3003 | "sas_addr(0x%016llx)\n", ioc->name, handle, | ||
| 3004 | (unsigned long long) sas_device->sas_address)); | ||
| 3005 | } | 3107 | } |
| 3006 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 3108 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
| 3007 | 3109 | ||
| 3110 | if (sas_target_priv_data) { | ||
| 3111 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: " | ||
| 3112 | "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, | ||
| 3113 | (unsigned long long)sas_address)); | ||
| 3114 | _scsih_ublock_io_device(ioc, handle); | ||
| 3115 | sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE; | ||
| 3116 | } | ||
| 3117 | |||
| 3008 | smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); | 3118 | smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); |
| 3009 | if (!smid) { | 3119 | if (!smid) { |
| 3010 | delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); | 3120 | delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); |
| @@ -3185,11 +3295,21 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
| 3185 | mpt2sas_base_get_reply_virt_addr(ioc, reply); | 3295 | mpt2sas_base_get_reply_virt_addr(ioc, reply); |
| 3186 | Mpi2SasIoUnitControlRequest_t *mpi_request; | 3296 | Mpi2SasIoUnitControlRequest_t *mpi_request; |
| 3187 | u16 smid_sas_ctrl; | 3297 | u16 smid_sas_ctrl; |
| 3298 | u32 ioc_state; | ||
| 3188 | 3299 | ||
| 3189 | if (ioc->shost_recovery || ioc->remove_host || | 3300 | if (ioc->remove_host) { |
| 3190 | ioc->pci_error_recovery) { | 3301 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been " |
| 3191 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " | 3302 | "removed\n", __func__, ioc->name)); |
| 3192 | "progress!\n", __func__, ioc->name)); | 3303 | return 1; |
| 3304 | } else if (ioc->pci_error_recovery) { | ||
| 3305 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci " | ||
| 3306 | "error recovery\n", __func__, ioc->name)); | ||
| 3307 | return 1; | ||
| 3308 | } | ||
| 3309 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); | ||
| 3310 | if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { | ||
| 3311 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not " | ||
| 3312 | "operational\n", __func__, ioc->name)); | ||
| 3193 | return 1; | 3313 | return 1; |
| 3194 | } | 3314 | } |
| 3195 | 3315 | ||
| @@ -5099,7 +5219,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) | |||
| 5099 | /* get device name */ | 5219 | /* get device name */ |
| 5100 | sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); | 5220 | sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); |
| 5101 | 5221 | ||
| 5102 | if (ioc->wait_for_port_enable_to_complete) | 5222 | if (ioc->wait_for_discovery_to_complete) |
| 5103 | _scsih_sas_device_init_add(ioc, sas_device); | 5223 | _scsih_sas_device_init_add(ioc, sas_device); |
| 5104 | else | 5224 | else |
| 5105 | _scsih_sas_device_add(ioc, sas_device); | 5225 | _scsih_sas_device_add(ioc, sas_device); |
| @@ -5135,6 +5255,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, | |||
| 5135 | if (sas_device_backup.starget && sas_device_backup.starget->hostdata) { | 5255 | if (sas_device_backup.starget && sas_device_backup.starget->hostdata) { |
| 5136 | sas_target_priv_data = sas_device_backup.starget->hostdata; | 5256 | sas_target_priv_data = sas_device_backup.starget->hostdata; |
| 5137 | sas_target_priv_data->deleted = 1; | 5257 | sas_target_priv_data->deleted = 1; |
| 5258 | _scsih_ublock_io_device(ioc, sas_device_backup.handle); | ||
| 5259 | sas_target_priv_data->handle = | ||
| 5260 | MPT2SAS_INVALID_DEVICE_HANDLE; | ||
| 5138 | } | 5261 | } |
| 5139 | 5262 | ||
| 5140 | _scsih_ublock_io_device(ioc, sas_device_backup.handle); | 5263 | _scsih_ublock_io_device(ioc, sas_device_backup.handle); |
| @@ -5288,7 +5411,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5288 | _scsih_sas_topology_change_event_debug(ioc, event_data); | 5411 | _scsih_sas_topology_change_event_debug(ioc, event_data); |
| 5289 | #endif | 5412 | #endif |
| 5290 | 5413 | ||
| 5291 | if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) | 5414 | if (ioc->remove_host || ioc->pci_error_recovery) |
| 5292 | return; | 5415 | return; |
| 5293 | 5416 | ||
| 5294 | if (!ioc->sas_hba.num_phys) | 5417 | if (!ioc->sas_hba.num_phys) |
| @@ -5349,6 +5472,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5349 | switch (reason_code) { | 5472 | switch (reason_code) { |
| 5350 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: | 5473 | case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: |
| 5351 | 5474 | ||
| 5475 | if (ioc->shost_recovery) | ||
| 5476 | break; | ||
| 5477 | |||
| 5352 | if (link_rate == prev_link_rate) | 5478 | if (link_rate == prev_link_rate) |
| 5353 | break; | 5479 | break; |
| 5354 | 5480 | ||
| @@ -5362,6 +5488,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5362 | break; | 5488 | break; |
| 5363 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: | 5489 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
| 5364 | 5490 | ||
| 5491 | if (ioc->shost_recovery) | ||
| 5492 | break; | ||
| 5493 | |||
| 5365 | mpt2sas_transport_update_links(ioc, sas_address, | 5494 | mpt2sas_transport_update_links(ioc, sas_address, |
| 5366 | handle, phy_number, link_rate); | 5495 | handle, phy_number, link_rate); |
| 5367 | 5496 | ||
| @@ -5622,7 +5751,7 @@ broadcast_aen_retry: | |||
| 5622 | termination_count = 0; | 5751 | termination_count = 0; |
| 5623 | query_count = 0; | 5752 | query_count = 0; |
| 5624 | for (smid = 1; smid <= ioc->scsiio_depth; smid++) { | 5753 | for (smid = 1; smid <= ioc->scsiio_depth; smid++) { |
| 5625 | if (ioc->ioc_reset_in_progress_status) | 5754 | if (ioc->shost_recovery) |
| 5626 | goto out; | 5755 | goto out; |
| 5627 | scmd = _scsih_scsi_lookup_get(ioc, smid); | 5756 | scmd = _scsih_scsi_lookup_get(ioc, smid); |
| 5628 | if (!scmd) | 5757 | if (!scmd) |
| @@ -5644,7 +5773,7 @@ broadcast_aen_retry: | |||
| 5644 | lun = sas_device_priv_data->lun; | 5773 | lun = sas_device_priv_data->lun; |
| 5645 | query_count++; | 5774 | query_count++; |
| 5646 | 5775 | ||
| 5647 | if (ioc->ioc_reset_in_progress_status) | 5776 | if (ioc->shost_recovery) |
| 5648 | goto out; | 5777 | goto out; |
| 5649 | 5778 | ||
| 5650 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 5779 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
| @@ -5686,7 +5815,7 @@ broadcast_aen_retry: | |||
| 5686 | goto broadcast_aen_retry; | 5815 | goto broadcast_aen_retry; |
| 5687 | } | 5816 | } |
| 5688 | 5817 | ||
| 5689 | if (ioc->ioc_reset_in_progress_status) | 5818 | if (ioc->shost_recovery) |
| 5690 | goto out_no_lock; | 5819 | goto out_no_lock; |
| 5691 | 5820 | ||
| 5692 | r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, | 5821 | r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, |
| @@ -5725,7 +5854,7 @@ broadcast_aen_retry: | |||
| 5725 | ioc->name, __func__, query_count, termination_count)); | 5854 | ioc->name, __func__, query_count, termination_count)); |
| 5726 | 5855 | ||
| 5727 | ioc->broadcast_aen_busy = 0; | 5856 | ioc->broadcast_aen_busy = 0; |
| 5728 | if (!ioc->ioc_reset_in_progress_status) | 5857 | if (!ioc->shost_recovery) |
| 5729 | _scsih_ublock_io_all_device(ioc); | 5858 | _scsih_ublock_io_all_device(ioc); |
| 5730 | mutex_unlock(&ioc->tm_cmds.mutex); | 5859 | mutex_unlock(&ioc->tm_cmds.mutex); |
| 5731 | } | 5860 | } |
| @@ -5789,8 +5918,11 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) | |||
| 5789 | static void | 5918 | static void |
| 5790 | _scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach) | 5919 | _scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach) |
| 5791 | { | 5920 | { |
| 5792 | struct MPT2SAS_TARGET *sas_target_priv_data = starget->hostdata; | 5921 | struct MPT2SAS_TARGET *sas_target_priv_data; |
| 5793 | 5922 | ||
| 5923 | if (starget == NULL) | ||
| 5924 | return; | ||
| 5925 | sas_target_priv_data = starget->hostdata; | ||
| 5794 | if (no_uld_attach) | 5926 | if (no_uld_attach) |
| 5795 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; | 5927 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; |
| 5796 | else | 5928 | else |
| @@ -5845,7 +5977,7 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, | |||
| 5845 | raid_device->handle = handle; | 5977 | raid_device->handle = handle; |
| 5846 | raid_device->wwid = wwid; | 5978 | raid_device->wwid = wwid; |
| 5847 | _scsih_raid_device_add(ioc, raid_device); | 5979 | _scsih_raid_device_add(ioc, raid_device); |
| 5848 | if (!ioc->wait_for_port_enable_to_complete) { | 5980 | if (!ioc->wait_for_discovery_to_complete) { |
| 5849 | rc = scsi_add_device(ioc->shost, RAID_CHANNEL, | 5981 | rc = scsi_add_device(ioc->shost, RAID_CHANNEL, |
| 5850 | raid_device->id, 0); | 5982 | raid_device->id, 0); |
| 5851 | if (rc) | 5983 | if (rc) |
| @@ -6127,6 +6259,10 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6127 | _scsih_sas_ir_config_change_event_debug(ioc, event_data); | 6259 | _scsih_sas_ir_config_change_event_debug(ioc, event_data); |
| 6128 | 6260 | ||
| 6129 | #endif | 6261 | #endif |
| 6262 | |||
| 6263 | if (ioc->shost_recovery) | ||
| 6264 | return; | ||
| 6265 | |||
| 6130 | foreign_config = (le32_to_cpu(event_data->Flags) & | 6266 | foreign_config = (le32_to_cpu(event_data->Flags) & |
| 6131 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; | 6267 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; |
| 6132 | 6268 | ||
| @@ -6185,6 +6321,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6185 | int rc; | 6321 | int rc; |
| 6186 | Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; | 6322 | Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; |
| 6187 | 6323 | ||
| 6324 | if (ioc->shost_recovery) | ||
| 6325 | return; | ||
| 6326 | |||
| 6188 | if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) | 6327 | if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) |
| 6189 | return; | 6328 | return; |
| 6190 | 6329 | ||
| @@ -6267,6 +6406,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6267 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; | 6406 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; |
| 6268 | u64 sas_address; | 6407 | u64 sas_address; |
| 6269 | 6408 | ||
| 6409 | if (ioc->shost_recovery) | ||
| 6410 | return; | ||
| 6411 | |||
| 6270 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) | 6412 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) |
| 6271 | return; | 6413 | return; |
| 6272 | 6414 | ||
| @@ -6510,10 +6652,10 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6510 | u32 device_info; | 6652 | u32 device_info; |
| 6511 | u16 slot; | 6653 | u16 slot; |
| 6512 | 6654 | ||
| 6513 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); | 6655 | printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name); |
| 6514 | 6656 | ||
| 6515 | if (list_empty(&ioc->sas_device_list)) | 6657 | if (list_empty(&ioc->sas_device_list)) |
| 6516 | return; | 6658 | goto out; |
| 6517 | 6659 | ||
| 6518 | handle = 0xFFFF; | 6660 | handle = 0xFFFF; |
| 6519 | while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, | 6661 | while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, |
| @@ -6532,6 +6674,9 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6532 | _scsih_mark_responding_sas_device(ioc, sas_address, slot, | 6674 | _scsih_mark_responding_sas_device(ioc, sas_address, slot, |
| 6533 | handle); | 6675 | handle); |
| 6534 | } | 6676 | } |
| 6677 | out: | ||
| 6678 | printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n", | ||
| 6679 | ioc->name); | ||
| 6535 | } | 6680 | } |
| 6536 | 6681 | ||
| 6537 | /** | 6682 | /** |
| @@ -6607,10 +6752,14 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6607 | u16 handle; | 6752 | u16 handle; |
| 6608 | u8 phys_disk_num; | 6753 | u8 phys_disk_num; |
| 6609 | 6754 | ||
| 6610 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); | 6755 | if (!ioc->ir_firmware) |
| 6756 | return; | ||
| 6757 | |||
| 6758 | printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n", | ||
| 6759 | ioc->name); | ||
| 6611 | 6760 | ||
| 6612 | if (list_empty(&ioc->raid_device_list)) | 6761 | if (list_empty(&ioc->raid_device_list)) |
| 6613 | return; | 6762 | goto out; |
| 6614 | 6763 | ||
| 6615 | handle = 0xFFFF; | 6764 | handle = 0xFFFF; |
| 6616 | while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, | 6765 | while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, |
| @@ -6649,6 +6798,9 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6649 | set_bit(handle, ioc->pd_handles); | 6798 | set_bit(handle, ioc->pd_handles); |
| 6650 | } | 6799 | } |
| 6651 | } | 6800 | } |
| 6801 | out: | ||
| 6802 | printk(MPT2SAS_INFO_FMT "search for responding raid volumes: " | ||
| 6803 | "complete\n", ioc->name); | ||
| 6652 | } | 6804 | } |
| 6653 | 6805 | ||
| 6654 | /** | 6806 | /** |
| @@ -6708,10 +6860,10 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) | |||
| 6708 | u64 sas_address; | 6860 | u64 sas_address; |
| 6709 | u16 handle; | 6861 | u16 handle; |
| 6710 | 6862 | ||
| 6711 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); | 6863 | printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name); |
| 6712 | 6864 | ||
| 6713 | if (list_empty(&ioc->sas_expander_list)) | 6865 | if (list_empty(&ioc->sas_expander_list)) |
| 6714 | return; | 6866 | goto out; |
| 6715 | 6867 | ||
| 6716 | handle = 0xFFFF; | 6868 | handle = 0xFFFF; |
| 6717 | while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, | 6869 | while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, |
| @@ -6730,6 +6882,8 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) | |||
| 6730 | _scsih_mark_responding_expander(ioc, sas_address, handle); | 6882 | _scsih_mark_responding_expander(ioc, sas_address, handle); |
| 6731 | } | 6883 | } |
| 6732 | 6884 | ||
| 6885 | out: | ||
| 6886 | printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name); | ||
| 6733 | } | 6887 | } |
| 6734 | 6888 | ||
| 6735 | /** | 6889 | /** |
| @@ -6745,6 +6899,8 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6745 | struct _sas_node *sas_expander; | 6899 | struct _sas_node *sas_expander; |
| 6746 | struct _raid_device *raid_device, *raid_device_next; | 6900 | struct _raid_device *raid_device, *raid_device_next; |
| 6747 | 6901 | ||
| 6902 | printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", | ||
| 6903 | ioc->name); | ||
| 6748 | 6904 | ||
| 6749 | list_for_each_entry_safe(sas_device, sas_device_next, | 6905 | list_for_each_entry_safe(sas_device, sas_device_next, |
| 6750 | &ioc->sas_device_list, list) { | 6906 | &ioc->sas_device_list, list) { |
| @@ -6764,6 +6920,9 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6764 | _scsih_remove_device(ioc, sas_device); | 6920 | _scsih_remove_device(ioc, sas_device); |
| 6765 | } | 6921 | } |
| 6766 | 6922 | ||
| 6923 | if (!ioc->ir_firmware) | ||
| 6924 | goto retry_expander_search; | ||
| 6925 | |||
| 6767 | list_for_each_entry_safe(raid_device, raid_device_next, | 6926 | list_for_each_entry_safe(raid_device, raid_device_next, |
| 6768 | &ioc->raid_device_list, list) { | 6927 | &ioc->raid_device_list, list) { |
| 6769 | if (raid_device->responding) { | 6928 | if (raid_device->responding) { |
| @@ -6790,52 +6949,170 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 6790 | mpt2sas_expander_remove(ioc, sas_expander->sas_address); | 6949 | mpt2sas_expander_remove(ioc, sas_expander->sas_address); |
| 6791 | goto retry_expander_search; | 6950 | goto retry_expander_search; |
| 6792 | } | 6951 | } |
| 6952 | printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n", | ||
| 6953 | ioc->name); | ||
| 6954 | /* unblock devices */ | ||
| 6955 | _scsih_ublock_io_all_device(ioc); | ||
| 6956 | } | ||
| 6957 | |||
| 6958 | static void | ||
| 6959 | _scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc, | ||
| 6960 | struct _sas_node *sas_expander, u16 handle) | ||
| 6961 | { | ||
| 6962 | Mpi2ExpanderPage1_t expander_pg1; | ||
| 6963 | Mpi2ConfigReply_t mpi_reply; | ||
| 6964 | int i; | ||
| 6965 | |||
| 6966 | for (i = 0 ; i < sas_expander->num_phys ; i++) { | ||
| 6967 | if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply, | ||
| 6968 | &expander_pg1, i, handle))) { | ||
| 6969 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 6970 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 6971 | return; | ||
| 6972 | } | ||
| 6973 | |||
| 6974 | mpt2sas_transport_update_links(ioc, sas_expander->sas_address, | ||
| 6975 | le16_to_cpu(expander_pg1.AttachedDevHandle), i, | ||
| 6976 | expander_pg1.NegotiatedLinkRate >> 4); | ||
| 6977 | } | ||
| 6793 | } | 6978 | } |
| 6794 | 6979 | ||
| 6795 | /** | 6980 | /** |
| 6796 | * _scsih_hide_unhide_sas_devices - add/remove device to/from OS | 6981 | * _scsih_scan_for_devices_after_reset - scan for devices after host reset |
| 6797 | * @ioc: per adapter object | 6982 | * @ioc: per adapter object |
| 6798 | * | 6983 | * |
| 6799 | * Return nothing. | 6984 | * Return nothing. |
| 6800 | */ | 6985 | */ |
| 6801 | static void | 6986 | static void |
| 6802 | _scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc) | 6987 | _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) |
| 6803 | { | 6988 | { |
| 6804 | struct _sas_device *sas_device, *sas_device_next; | 6989 | Mpi2ExpanderPage0_t expander_pg0; |
| 6990 | Mpi2SasDevicePage0_t sas_device_pg0; | ||
| 6991 | Mpi2RaidVolPage1_t volume_pg1; | ||
| 6992 | Mpi2RaidVolPage0_t volume_pg0; | ||
| 6993 | Mpi2RaidPhysDiskPage0_t pd_pg0; | ||
| 6994 | Mpi2EventIrConfigElement_t element; | ||
| 6995 | Mpi2ConfigReply_t mpi_reply; | ||
| 6996 | u8 phys_disk_num; | ||
| 6997 | u16 ioc_status; | ||
| 6998 | u16 handle, parent_handle; | ||
| 6999 | u64 sas_address; | ||
| 7000 | struct _sas_device *sas_device; | ||
| 7001 | struct _sas_node *expander_device; | ||
| 7002 | static struct _raid_device *raid_device; | ||
| 6805 | 7003 | ||
| 6806 | if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag != | 7004 | printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name); |
| 6807 | MFG_PAGE10_HIDE_IF_VOL_PRESENT) | ||
| 6808 | return; | ||
| 6809 | 7005 | ||
| 6810 | if (ioc->hide_drives) { | 7006 | _scsih_sas_host_refresh(ioc); |
| 6811 | if (_scsih_get_num_volumes(ioc)) | 7007 | |
| 6812 | return; | 7008 | /* expanders */ |
| 6813 | ioc->hide_drives = 0; | 7009 | handle = 0xFFFF; |
| 6814 | list_for_each_entry_safe(sas_device, sas_device_next, | 7010 | while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, |
| 6815 | &ioc->sas_device_list, list) { | 7011 | MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { |
| 6816 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, | 7012 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
| 6817 | sas_device->sas_address_parent)) { | 7013 | MPI2_IOCSTATUS_MASK; |
| 6818 | _scsih_sas_device_remove(ioc, sas_device); | 7014 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) |
| 6819 | } else if (!sas_device->starget) { | 7015 | break; |
| 6820 | mpt2sas_transport_port_remove(ioc, | 7016 | handle = le16_to_cpu(expander_pg0.DevHandle); |
| 6821 | sas_device->sas_address, | 7017 | expander_device = mpt2sas_scsih_expander_find_by_sas_address( |
| 6822 | sas_device->sas_address_parent); | 7018 | ioc, le64_to_cpu(expander_pg0.SASAddress)); |
| 6823 | _scsih_sas_device_remove(ioc, sas_device); | 7019 | if (expander_device) |
| 6824 | } | 7020 | _scsih_refresh_expander_links(ioc, expander_device, |
| 7021 | handle); | ||
| 7022 | else | ||
| 7023 | _scsih_expander_add(ioc, handle); | ||
| 7024 | } | ||
| 7025 | |||
| 7026 | if (!ioc->ir_firmware) | ||
| 7027 | goto skip_to_sas; | ||
| 7028 | |||
| 7029 | /* phys disk */ | ||
| 7030 | phys_disk_num = 0xFF; | ||
| 7031 | while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, | ||
| 7032 | &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, | ||
| 7033 | phys_disk_num))) { | ||
| 7034 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
| 7035 | MPI2_IOCSTATUS_MASK; | ||
| 7036 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) | ||
| 7037 | break; | ||
| 7038 | phys_disk_num = pd_pg0.PhysDiskNum; | ||
| 7039 | handle = le16_to_cpu(pd_pg0.DevHandle); | ||
| 7040 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | ||
| 7041 | if (sas_device) | ||
| 7042 | continue; | ||
| 7043 | if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, | ||
| 7044 | &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, | ||
| 7045 | handle) != 0) | ||
| 7046 | continue; | ||
| 7047 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); | ||
| 7048 | if (!_scsih_get_sas_address(ioc, parent_handle, | ||
| 7049 | &sas_address)) { | ||
| 7050 | mpt2sas_transport_update_links(ioc, sas_address, | ||
| 7051 | handle, sas_device_pg0.PhyNum, | ||
| 7052 | MPI2_SAS_NEG_LINK_RATE_1_5); | ||
| 7053 | set_bit(handle, ioc->pd_handles); | ||
| 7054 | _scsih_add_device(ioc, handle, 0, 1); | ||
| 6825 | } | 7055 | } |
| 6826 | } else { | 7056 | } |
| 6827 | if (!_scsih_get_num_volumes(ioc)) | 7057 | |
| 6828 | return; | 7058 | /* volumes */ |
| 6829 | ioc->hide_drives = 1; | 7059 | handle = 0xFFFF; |
| 6830 | list_for_each_entry_safe(sas_device, sas_device_next, | 7060 | while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, |
| 6831 | &ioc->sas_device_list, list) { | 7061 | &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { |
| 6832 | mpt2sas_transport_port_remove(ioc, | 7062 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
| 6833 | sas_device->sas_address, | 7063 | MPI2_IOCSTATUS_MASK; |
| 6834 | sas_device->sas_address_parent); | 7064 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) |
| 7065 | break; | ||
| 7066 | handle = le16_to_cpu(volume_pg1.DevHandle); | ||
| 7067 | raid_device = _scsih_raid_device_find_by_wwid(ioc, | ||
| 7068 | le64_to_cpu(volume_pg1.WWID)); | ||
| 7069 | if (raid_device) | ||
| 7070 | continue; | ||
| 7071 | if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, | ||
| 7072 | &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, | ||
| 7073 | sizeof(Mpi2RaidVolPage0_t))) | ||
| 7074 | continue; | ||
| 7075 | if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || | ||
| 7076 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || | ||
| 7077 | volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { | ||
| 7078 | memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); | ||
| 7079 | element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; | ||
| 7080 | element.VolDevHandle = volume_pg1.DevHandle; | ||
| 7081 | _scsih_sas_volume_add(ioc, &element); | ||
| 6835 | } | 7082 | } |
| 6836 | } | 7083 | } |
| 7084 | |||
| 7085 | skip_to_sas: | ||
| 7086 | |||
| 7087 | /* sas devices */ | ||
| 7088 | handle = 0xFFFF; | ||
| 7089 | while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, | ||
| 7090 | &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, | ||
| 7091 | handle))) { | ||
| 7092 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
| 7093 | MPI2_IOCSTATUS_MASK; | ||
| 7094 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) | ||
| 7095 | break; | ||
| 7096 | handle = le16_to_cpu(sas_device_pg0.DevHandle); | ||
| 7097 | if (!(_scsih_is_end_device( | ||
| 7098 | le32_to_cpu(sas_device_pg0.DeviceInfo)))) | ||
| 7099 | continue; | ||
| 7100 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
| 7101 | le64_to_cpu(sas_device_pg0.SASAddress)); | ||
| 7102 | if (sas_device) | ||
| 7103 | continue; | ||
| 7104 | parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); | ||
| 7105 | if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { | ||
| 7106 | mpt2sas_transport_update_links(ioc, sas_address, handle, | ||
| 7107 | sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | ||
| 7108 | _scsih_add_device(ioc, handle, 0, 0); | ||
| 7109 | } | ||
| 7110 | } | ||
| 7111 | |||
| 7112 | printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name); | ||
| 6837 | } | 7113 | } |
| 6838 | 7114 | ||
| 7115 | |||
| 6839 | /** | 7116 | /** |
| 6840 | * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) | 7117 | * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) |
| 6841 | * @ioc: per adapter object | 7118 | * @ioc: per adapter object |
| @@ -6871,7 +7148,6 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) | |||
| 6871 | } | 7148 | } |
| 6872 | _scsih_fw_event_cleanup_queue(ioc); | 7149 | _scsih_fw_event_cleanup_queue(ioc); |
| 6873 | _scsih_flush_running_cmds(ioc); | 7150 | _scsih_flush_running_cmds(ioc); |
| 6874 | _scsih_queue_rescan(ioc); | ||
| 6875 | break; | 7151 | break; |
| 6876 | case MPT2_IOC_DONE_RESET: | 7152 | case MPT2_IOC_DONE_RESET: |
| 6877 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " | 7153 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " |
| @@ -6881,6 +7157,13 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) | |||
| 6881 | _scsih_search_responding_sas_devices(ioc); | 7157 | _scsih_search_responding_sas_devices(ioc); |
| 6882 | _scsih_search_responding_raid_devices(ioc); | 7158 | _scsih_search_responding_raid_devices(ioc); |
| 6883 | _scsih_search_responding_expanders(ioc); | 7159 | _scsih_search_responding_expanders(ioc); |
| 7160 | if (!ioc->is_driver_loading) { | ||
| 7161 | _scsih_prep_device_scan(ioc); | ||
| 7162 | _scsih_search_responding_sas_devices(ioc); | ||
| 7163 | _scsih_search_responding_raid_devices(ioc); | ||
| 7164 | _scsih_search_responding_expanders(ioc); | ||
| 7165 | _scsih_error_recovery_delete_devices(ioc); | ||
| 7166 | } | ||
| 6884 | break; | 7167 | break; |
| 6885 | } | 7168 | } |
| 6886 | } | 7169 | } |
| @@ -6898,7 +7181,6 @@ _firmware_event_work(struct work_struct *work) | |||
| 6898 | { | 7181 | { |
| 6899 | struct fw_event_work *fw_event = container_of(work, | 7182 | struct fw_event_work *fw_event = container_of(work, |
| 6900 | struct fw_event_work, delayed_work.work); | 7183 | struct fw_event_work, delayed_work.work); |
| 6901 | unsigned long flags; | ||
| 6902 | struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; | 7184 | struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; |
| 6903 | 7185 | ||
| 6904 | /* the queue is being flushed so ignore this event */ | 7186 | /* the queue is being flushed so ignore this event */ |
| @@ -6908,23 +7190,21 @@ _firmware_event_work(struct work_struct *work) | |||
| 6908 | return; | 7190 | return; |
| 6909 | } | 7191 | } |
| 6910 | 7192 | ||
| 6911 | if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) { | 7193 | switch (fw_event->event) { |
| 6912 | _scsih_fw_event_free(ioc, fw_event); | 7194 | case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: |
| 6913 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 7195 | while (scsi_host_in_recovery(ioc->shost)) |
| 6914 | if (ioc->shost_recovery) { | 7196 | ssleep(1); |
| 6915 | init_completion(&ioc->shost_recovery_done); | ||
| 6916 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, | ||
| 6917 | flags); | ||
| 6918 | wait_for_completion(&ioc->shost_recovery_done); | ||
| 6919 | } else | ||
| 6920 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, | ||
| 6921 | flags); | ||
| 6922 | _scsih_remove_unresponding_sas_devices(ioc); | 7197 | _scsih_remove_unresponding_sas_devices(ioc); |
| 6923 | _scsih_hide_unhide_sas_devices(ioc); | 7198 | _scsih_scan_for_devices_after_reset(ioc); |
| 6924 | return; | 7199 | break; |
| 6925 | } | 7200 | case MPT2SAS_PORT_ENABLE_COMPLETE: |
| 7201 | ioc->start_scan = 0; | ||
| 6926 | 7202 | ||
| 6927 | switch (fw_event->event) { | 7203 | |
| 7204 | |||
| 7205 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " | ||
| 7206 | "from worker thread\n", ioc->name)); | ||
| 7207 | break; | ||
| 6928 | case MPT2SAS_TURN_ON_FAULT_LED: | 7208 | case MPT2SAS_TURN_ON_FAULT_LED: |
| 6929 | _scsih_turn_on_fault_led(ioc, fw_event->device_handle); | 7209 | _scsih_turn_on_fault_led(ioc, fw_event->device_handle); |
| 6930 | break; | 7210 | break; |
| @@ -7121,6 +7401,8 @@ static struct scsi_host_template scsih_driver_template = { | |||
| 7121 | .slave_configure = _scsih_slave_configure, | 7401 | .slave_configure = _scsih_slave_configure, |
| 7122 | .target_destroy = _scsih_target_destroy, | 7402 | .target_destroy = _scsih_target_destroy, |
| 7123 | .slave_destroy = _scsih_slave_destroy, | 7403 | .slave_destroy = _scsih_slave_destroy, |
| 7404 | .scan_finished = _scsih_scan_finished, | ||
| 7405 | .scan_start = _scsih_scan_start, | ||
| 7124 | .change_queue_depth = _scsih_change_queue_depth, | 7406 | .change_queue_depth = _scsih_change_queue_depth, |
| 7125 | .change_queue_type = _scsih_change_queue_type, | 7407 | .change_queue_type = _scsih_change_queue_type, |
| 7126 | .eh_abort_handler = _scsih_abort, | 7408 | .eh_abort_handler = _scsih_abort, |
| @@ -7381,7 +7663,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 7381 | unsigned long flags; | 7663 | unsigned long flags; |
| 7382 | int rc; | 7664 | int rc; |
| 7383 | 7665 | ||
| 7666 | /* no Bios, return immediately */ | ||
| 7667 | if (!ioc->bios_pg3.BiosVersion) | ||
| 7668 | return; | ||
| 7669 | |||
| 7384 | device = NULL; | 7670 | device = NULL; |
| 7671 | is_raid = 0; | ||
| 7385 | if (ioc->req_boot_device.device) { | 7672 | if (ioc->req_boot_device.device) { |
| 7386 | device = ioc->req_boot_device.device; | 7673 | device = ioc->req_boot_device.device; |
| 7387 | is_raid = ioc->req_boot_device.is_raid; | 7674 | is_raid = ioc->req_boot_device.is_raid; |
| @@ -7417,8 +7704,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 7417 | sas_device->sas_address_parent)) { | 7704 | sas_device->sas_address_parent)) { |
| 7418 | _scsih_sas_device_remove(ioc, sas_device); | 7705 | _scsih_sas_device_remove(ioc, sas_device); |
| 7419 | } else if (!sas_device->starget) { | 7706 | } else if (!sas_device->starget) { |
| 7420 | mpt2sas_transport_port_remove(ioc, sas_address, | 7707 | if (!ioc->is_driver_loading) |
| 7421 | sas_address_parent); | 7708 | mpt2sas_transport_port_remove(ioc, sas_address, |
| 7709 | sas_address_parent); | ||
| 7422 | _scsih_sas_device_remove(ioc, sas_device); | 7710 | _scsih_sas_device_remove(ioc, sas_device); |
| 7423 | } | 7711 | } |
| 7424 | } | 7712 | } |
| @@ -7462,22 +7750,28 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) | |||
| 7462 | /* SAS Device List */ | 7750 | /* SAS Device List */ |
| 7463 | list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, | 7751 | list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, |
| 7464 | list) { | 7752 | list) { |
| 7465 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 7466 | list_move_tail(&sas_device->list, &ioc->sas_device_list); | ||
| 7467 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 7468 | 7753 | ||
| 7469 | if (ioc->hide_drives) | 7754 | if (ioc->hide_drives) |
| 7470 | continue; | 7755 | continue; |
| 7471 | 7756 | ||
| 7472 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, | 7757 | if (!mpt2sas_transport_port_add(ioc, sas_device->handle, |
| 7473 | sas_device->sas_address_parent)) { | 7758 | sas_device->sas_address_parent)) { |
| 7474 | _scsih_sas_device_remove(ioc, sas_device); | 7759 | list_del(&sas_device->list); |
| 7760 | kfree(sas_device); | ||
| 7761 | continue; | ||
| 7475 | } else if (!sas_device->starget) { | 7762 | } else if (!sas_device->starget) { |
| 7476 | mpt2sas_transport_port_remove(ioc, | 7763 | if (!ioc->is_driver_loading) |
| 7477 | sas_device->sas_address, | 7764 | mpt2sas_transport_port_remove(ioc, |
| 7478 | sas_device->sas_address_parent); | 7765 | sas_device->sas_address, |
| 7479 | _scsih_sas_device_remove(ioc, sas_device); | 7766 | sas_device->sas_address_parent); |
| 7767 | list_del(&sas_device->list); | ||
| 7768 | kfree(sas_device); | ||
| 7769 | continue; | ||
| 7770 | |||
| 7480 | } | 7771 | } |
| 7772 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
| 7773 | list_move_tail(&sas_device->list, &ioc->sas_device_list); | ||
| 7774 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
| 7481 | } | 7775 | } |
| 7482 | } | 7776 | } |
| 7483 | 7777 | ||
| @@ -7490,9 +7784,7 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) | |||
| 7490 | static void | 7784 | static void |
| 7491 | _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) | 7785 | _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) |
| 7492 | { | 7786 | { |
| 7493 | u16 volume_mapping_flags = | 7787 | u16 volume_mapping_flags; |
| 7494 | le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & | ||
| 7495 | MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; | ||
| 7496 | 7788 | ||
| 7497 | if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) | 7789 | if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) |
| 7498 | return; /* return when IOC doesn't support initiator mode */ | 7790 | return; /* return when IOC doesn't support initiator mode */ |
| @@ -7500,18 +7792,93 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) | |||
| 7500 | _scsih_probe_boot_devices(ioc); | 7792 | _scsih_probe_boot_devices(ioc); |
| 7501 | 7793 | ||
| 7502 | if (ioc->ir_firmware) { | 7794 | if (ioc->ir_firmware) { |
| 7503 | if ((volume_mapping_flags & | 7795 | volume_mapping_flags = |
| 7504 | MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) { | 7796 | le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & |
| 7505 | _scsih_probe_sas(ioc); | 7797 | MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; |
| 7798 | if (volume_mapping_flags == | ||
| 7799 | MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { | ||
| 7506 | _scsih_probe_raid(ioc); | 7800 | _scsih_probe_raid(ioc); |
| 7801 | _scsih_probe_sas(ioc); | ||
| 7507 | } else { | 7802 | } else { |
| 7508 | _scsih_probe_raid(ioc); | ||
| 7509 | _scsih_probe_sas(ioc); | 7803 | _scsih_probe_sas(ioc); |
| 7804 | _scsih_probe_raid(ioc); | ||
| 7510 | } | 7805 | } |
| 7511 | } else | 7806 | } else |
| 7512 | _scsih_probe_sas(ioc); | 7807 | _scsih_probe_sas(ioc); |
| 7513 | } | 7808 | } |
| 7514 | 7809 | ||
| 7810 | |||
| 7811 | /** | ||
| 7812 | * _scsih_scan_start - scsi lld callback for .scan_start | ||
| 7813 | * @shost: SCSI host pointer | ||
| 7814 | * | ||
| 7815 | * The shost has the ability to discover targets on its own instead | ||
| 7816 | * of scanning the entire bus. In our implemention, we will kick off | ||
| 7817 | * firmware discovery. | ||
| 7818 | */ | ||
| 7819 | static void | ||
| 7820 | _scsih_scan_start(struct Scsi_Host *shost) | ||
| 7821 | { | ||
| 7822 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 7823 | int rc; | ||
| 7824 | |||
| 7825 | if (diag_buffer_enable != -1 && diag_buffer_enable != 0) | ||
| 7826 | mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); | ||
| 7827 | |||
| 7828 | ioc->start_scan = 1; | ||
| 7829 | rc = mpt2sas_port_enable(ioc); | ||
| 7830 | |||
| 7831 | if (rc != 0) | ||
| 7832 | printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name); | ||
| 7833 | } | ||
| 7834 | |||
| 7835 | /** | ||
| 7836 | * _scsih_scan_finished - scsi lld callback for .scan_finished | ||
| 7837 | * @shost: SCSI host pointer | ||
| 7838 | * @time: elapsed time of the scan in jiffies | ||
| 7839 | * | ||
| 7840 | * This function will be called periodically until it returns 1 with the | ||
| 7841 | * scsi_host and the elapsed time of the scan in jiffies. In our implemention, | ||
| 7842 | * we wait for firmware discovery to complete, then return 1. | ||
| 7843 | */ | ||
| 7844 | static int | ||
| 7845 | _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) | ||
| 7846 | { | ||
| 7847 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | ||
| 7848 | |||
| 7849 | if (time >= (300 * HZ)) { | ||
| 7850 | ioc->base_cmds.status = MPT2_CMD_NOT_USED; | ||
| 7851 | printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout " | ||
| 7852 | "(timeout=300s)\n", ioc->name); | ||
| 7853 | ioc->is_driver_loading = 0; | ||
| 7854 | return 1; | ||
| 7855 | } | ||
| 7856 | |||
| 7857 | if (ioc->start_scan) | ||
| 7858 | return 0; | ||
| 7859 | |||
| 7860 | if (ioc->start_scan_failed) { | ||
| 7861 | printk(MPT2SAS_INFO_FMT "port enable: FAILED with " | ||
| 7862 | "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed); | ||
| 7863 | ioc->is_driver_loading = 0; | ||
| 7864 | ioc->wait_for_discovery_to_complete = 0; | ||
| 7865 | ioc->remove_host = 1; | ||
| 7866 | return 1; | ||
| 7867 | } | ||
| 7868 | |||
| 7869 | printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name); | ||
| 7870 | ioc->base_cmds.status = MPT2_CMD_NOT_USED; | ||
| 7871 | |||
| 7872 | if (ioc->wait_for_discovery_to_complete) { | ||
| 7873 | ioc->wait_for_discovery_to_complete = 0; | ||
| 7874 | _scsih_probe_devices(ioc); | ||
| 7875 | } | ||
| 7876 | mpt2sas_base_start_watchdog(ioc); | ||
| 7877 | ioc->is_driver_loading = 0; | ||
| 7878 | return 1; | ||
| 7879 | } | ||
| 7880 | |||
| 7881 | |||
| 7515 | /** | 7882 | /** |
| 7516 | * _scsih_probe - attach and add scsi host | 7883 | * _scsih_probe - attach and add scsi host |
| 7517 | * @pdev: PCI device struct | 7884 | * @pdev: PCI device struct |
| @@ -7548,6 +7915,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 7548 | ioc->tm_cb_idx = tm_cb_idx; | 7915 | ioc->tm_cb_idx = tm_cb_idx; |
| 7549 | ioc->ctl_cb_idx = ctl_cb_idx; | 7916 | ioc->ctl_cb_idx = ctl_cb_idx; |
| 7550 | ioc->base_cb_idx = base_cb_idx; | 7917 | ioc->base_cb_idx = base_cb_idx; |
| 7918 | ioc->port_enable_cb_idx = port_enable_cb_idx; | ||
| 7551 | ioc->transport_cb_idx = transport_cb_idx; | 7919 | ioc->transport_cb_idx = transport_cb_idx; |
| 7552 | ioc->scsih_cb_idx = scsih_cb_idx; | 7920 | ioc->scsih_cb_idx = scsih_cb_idx; |
| 7553 | ioc->config_cb_idx = config_cb_idx; | 7921 | ioc->config_cb_idx = config_cb_idx; |
| @@ -7620,14 +7988,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 7620 | goto out_thread_fail; | 7988 | goto out_thread_fail; |
| 7621 | } | 7989 | } |
| 7622 | 7990 | ||
| 7623 | ioc->wait_for_port_enable_to_complete = 1; | 7991 | ioc->is_driver_loading = 1; |
| 7624 | if ((mpt2sas_base_attach(ioc))) { | 7992 | if ((mpt2sas_base_attach(ioc))) { |
| 7625 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 7993 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
| 7626 | ioc->name, __FILE__, __LINE__, __func__); | 7994 | ioc->name, __FILE__, __LINE__, __func__); |
| 7627 | goto out_attach_fail; | 7995 | goto out_attach_fail; |
| 7628 | } | 7996 | } |
| 7629 | 7997 | ||
| 7630 | ioc->wait_for_port_enable_to_complete = 0; | 7998 | scsi_scan_host(shost); |
| 7631 | if (ioc->is_warpdrive) { | 7999 | if (ioc->is_warpdrive) { |
| 7632 | if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) | 8000 | if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) |
| 7633 | ioc->hide_drives = 0; | 8001 | ioc->hide_drives = 0; |
| @@ -7650,6 +8018,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 7650 | out_thread_fail: | 8018 | out_thread_fail: |
| 7651 | list_del(&ioc->list); | 8019 | list_del(&ioc->list); |
| 7652 | scsi_remove_host(shost); | 8020 | scsi_remove_host(shost); |
| 8021 | scsi_host_put(shost); | ||
| 7653 | out_add_shost_fail: | 8022 | out_add_shost_fail: |
| 7654 | return -ENODEV; | 8023 | return -ENODEV; |
| 7655 | } | 8024 | } |
| @@ -7896,6 +8265,8 @@ _scsih_init(void) | |||
| 7896 | 8265 | ||
| 7897 | /* base internal commands callback handler */ | 8266 | /* base internal commands callback handler */ |
| 7898 | base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); | 8267 | base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); |
| 8268 | port_enable_cb_idx = mpt2sas_base_register_callback_handler( | ||
| 8269 | mpt2sas_port_enable_done); | ||
| 7899 | 8270 | ||
| 7900 | /* transport internal commands callback handler */ | 8271 | /* transport internal commands callback handler */ |
| 7901 | transport_cb_idx = mpt2sas_base_register_callback_handler( | 8272 | transport_cb_idx = mpt2sas_base_register_callback_handler( |
| @@ -7950,6 +8321,7 @@ _scsih_exit(void) | |||
| 7950 | mpt2sas_base_release_callback_handler(scsi_io_cb_idx); | 8321 | mpt2sas_base_release_callback_handler(scsi_io_cb_idx); |
| 7951 | mpt2sas_base_release_callback_handler(tm_cb_idx); | 8322 | mpt2sas_base_release_callback_handler(tm_cb_idx); |
| 7952 | mpt2sas_base_release_callback_handler(base_cb_idx); | 8323 | mpt2sas_base_release_callback_handler(base_cb_idx); |
| 8324 | mpt2sas_base_release_callback_handler(port_enable_cb_idx); | ||
| 7953 | mpt2sas_base_release_callback_handler(transport_cb_idx); | 8325 | mpt2sas_base_release_callback_handler(transport_cb_idx); |
| 7954 | mpt2sas_base_release_callback_handler(scsih_cb_idx); | 8326 | mpt2sas_base_release_callback_handler(scsih_cb_idx); |
| 7955 | mpt2sas_base_release_callback_handler(config_cb_idx); | 8327 | mpt2sas_base_release_callback_handler(config_cb_idx); |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 621b5e072758..6f589195746c 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
| @@ -732,6 +732,16 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { | |||
| 732 | .class_mask = 0, | 732 | .class_mask = 0, |
| 733 | .driver_data = chip_9485, | 733 | .driver_data = chip_9485, |
| 734 | }, | 734 | }, |
| 735 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ | ||
| 736 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 737 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 738 | { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 739 | { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 740 | { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 741 | { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 742 | { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 743 | { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 744 | { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | ||
| 735 | 745 | ||
| 736 | { } /* terminate list */ | 746 | { } /* terminate list */ |
| 737 | }; | 747 | }; |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index b86db84d6f32..5163edb925cb 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -4102,7 +4102,7 @@ static long pmcraid_chr_ioctl( | |||
| 4102 | struct pmcraid_ioctl_header *hdr = NULL; | 4102 | struct pmcraid_ioctl_header *hdr = NULL; |
| 4103 | int retval = -ENOTTY; | 4103 | int retval = -ENOTTY; |
| 4104 | 4104 | ||
| 4105 | hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header)); | 4105 | hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL); |
| 4106 | 4106 | ||
| 4107 | if (!hdr) { | 4107 | if (!hdr) { |
| 4108 | pmcraid_err("faile to allocate memory for ioctl header\n"); | 4108 | pmcraid_err("faile to allocate memory for ioctl header\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3474e86e98ab..2516adf1aeea 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -2279,7 +2279,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
| 2279 | ha = rsp->hw; | 2279 | ha = rsp->hw; |
| 2280 | 2280 | ||
| 2281 | /* Clear the interrupt, if enabled, for this response queue */ | 2281 | /* Clear the interrupt, if enabled, for this response queue */ |
| 2282 | if (rsp->options & ~BIT_6) { | 2282 | if (!ha->flags.disable_msix_handshake) { |
| 2283 | reg = &ha->iobase->isp24; | 2283 | reg = &ha->iobase->isp24; |
| 2284 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2284 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 2285 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2285 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fc3f168decb4..b4d43ae76132 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1698,6 +1698,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
| 1698 | 1698 | ||
| 1699 | void scsi_free_queue(struct request_queue *q) | 1699 | void scsi_free_queue(struct request_queue *q) |
| 1700 | { | 1700 | { |
| 1701 | unsigned long flags; | ||
| 1702 | |||
| 1703 | WARN_ON(q->queuedata); | ||
| 1704 | |||
| 1705 | /* cause scsi_request_fn() to kill all non-finished requests */ | ||
| 1706 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 1707 | q->request_fn(q); | ||
| 1708 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 1709 | |||
| 1701 | blk_cleanup_queue(q); | 1710 | blk_cleanup_queue(q); |
| 1702 | } | 1711 | } |
| 1703 | 1712 | ||
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 44e8ca398efa..72273a0e5666 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -322,6 +322,7 @@ out_device_destroy: | |||
| 322 | scsi_device_set_state(sdev, SDEV_DEL); | 322 | scsi_device_set_state(sdev, SDEV_DEL); |
| 323 | transport_destroy_device(&sdev->sdev_gendev); | 323 | transport_destroy_device(&sdev->sdev_gendev); |
| 324 | put_device(&sdev->sdev_dev); | 324 | put_device(&sdev->sdev_dev); |
| 325 | scsi_free_queue(sdev->request_queue); | ||
| 325 | put_device(&sdev->sdev_gendev); | 326 | put_device(&sdev->sdev_gendev); |
| 326 | out: | 327 | out: |
| 327 | if (display_failure_msg) | 328 | if (display_failure_msg) |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 1bcd65a509e6..96029e6d027f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
| @@ -520,7 +520,7 @@ fail_host_msg: | |||
| 520 | /** | 520 | /** |
| 521 | * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests | 521 | * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests |
| 522 | * @shost: shost for iscsi_host | 522 | * @shost: shost for iscsi_host |
| 523 | * @cls_host: iscsi_cls_host adding the structures to | 523 | * @ihost: iscsi_cls_host adding the structures to |
| 524 | */ | 524 | */ |
| 525 | static int | 525 | static int |
| 526 | iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) | 526 | iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a7942e5c8be8..fa3a5918009c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -2590,18 +2590,16 @@ static int sd_probe(struct device *dev) | |||
| 2590 | spin_unlock(&sd_index_lock); | 2590 | spin_unlock(&sd_index_lock); |
| 2591 | } while (error == -EAGAIN); | 2591 | } while (error == -EAGAIN); |
| 2592 | 2592 | ||
| 2593 | if (error) | 2593 | if (error) { |
| 2594 | sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); | ||
| 2594 | goto out_put; | 2595 | goto out_put; |
| 2595 | |||
| 2596 | if (index >= SD_MAX_DISKS) { | ||
| 2597 | error = -ENODEV; | ||
| 2598 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n"); | ||
| 2599 | goto out_free_index; | ||
| 2600 | } | 2596 | } |
| 2601 | 2597 | ||
| 2602 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); | 2598 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); |
| 2603 | if (error) | 2599 | if (error) { |
| 2600 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); | ||
| 2604 | goto out_free_index; | 2601 | goto out_free_index; |
| 2602 | } | ||
| 2605 | 2603 | ||
| 2606 | sdkp->device = sdp; | 2604 | sdkp->device = sdp; |
| 2607 | sdkp->driver = &sd_template; | 2605 | sdkp->driver = &sd_template; |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 6ad798bfd52a..4163f2910e3d 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -9,12 +9,6 @@ | |||
| 9 | #define SD_MAJORS 16 | 9 | #define SD_MAJORS 16 |
| 10 | 10 | ||
| 11 | /* | 11 | /* |
| 12 | * This is limited by the naming scheme enforced in sd_probe, | ||
| 13 | * add another character to it if you really need more disks. | ||
| 14 | */ | ||
| 15 | #define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26) | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Time out in seconds for disks and Magneto-opticals (which are slower). | 12 | * Time out in seconds for disks and Magneto-opticals (which are slower). |
| 19 | */ | 13 | */ |
| 20 | #define SD_TIMEOUT (30 * HZ) | 14 | #define SD_TIMEOUT (30 * HZ) |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 1871b8ae83ae..9b28f39bac26 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -462,14 +462,16 @@ static void st_scsi_execute_end(struct request *req, int uptodate) | |||
| 462 | { | 462 | { |
| 463 | struct st_request *SRpnt = req->end_io_data; | 463 | struct st_request *SRpnt = req->end_io_data; |
| 464 | struct scsi_tape *STp = SRpnt->stp; | 464 | struct scsi_tape *STp = SRpnt->stp; |
| 465 | struct bio *tmp; | ||
| 465 | 466 | ||
| 466 | STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; | 467 | STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; |
| 467 | STp->buffer->cmdstat.residual = req->resid_len; | 468 | STp->buffer->cmdstat.residual = req->resid_len; |
| 468 | 469 | ||
| 470 | tmp = SRpnt->bio; | ||
| 469 | if (SRpnt->waiting) | 471 | if (SRpnt->waiting) |
| 470 | complete(SRpnt->waiting); | 472 | complete(SRpnt->waiting); |
| 471 | 473 | ||
| 472 | blk_rq_unmap_user(SRpnt->bio); | 474 | blk_rq_unmap_user(tmp); |
| 473 | __blk_put_request(req->q, req); | 475 | __blk_put_request(req->q, req); |
| 474 | } | 476 | } |
| 475 | 477 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1679ff6931f9..3fdf251389de 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2873,3 +2873,5 @@ | |||
| 2873 | 2873 | ||
| 2874 | #define PCI_VENDOR_ID_XEN 0x5853 | 2874 | #define PCI_VENDOR_ID_XEN 0x5853 |
| 2875 | #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 | 2875 | #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 |
| 2876 | |||
| 2877 | #define PCI_VENDOR_ID_OCZ 0x1b85 | ||
