aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /drivers/message
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt6
-rw-r--r--drivers/message/fusion/mptbase.c1570
-rw-r--r--drivers/message/fusion/mptbase.h180
-rw-r--r--drivers/message/fusion/mptctl.c692
-rw-r--r--drivers/message/fusion/mptdebug.h3
-rw-r--r--drivers/message/fusion/mptfc.c15
-rw-r--r--drivers/message/fusion/mptsas.c3136
-rw-r--r--drivers/message/fusion/mptsas.h41
-rw-r--r--drivers/message/fusion/mptscsih.c1329
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c71
-rw-r--r--drivers/message/i2o/i2o_block.c43
12 files changed, 4602 insertions, 2491 deletions
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 693e4b511354..fa9249b4971a 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -130,7 +130,7 @@ mpi_ioc.h
130 * 08-08-01 01.02.01 Original release for v1.2 work. 130 * 08-08-01 01.02.01 Original release for v1.2 work.
131 * New format for FWVersion and ProductId in 131 * New format for FWVersion and ProductId in
132 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. 132 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
133 * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and 133 * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
134 * related structure and defines. 134 * related structure and defines.
135 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. 135 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
136 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. 136 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -190,7 +190,7 @@ mpi_ioc.h
190 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. 190 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
191 * Added MaxInitiators field to PortFacts reply. 191 * Added MaxInitiators field to PortFacts reply.
192 * Added SAS Device Status Change ReasonCode for 192 * Added SAS Device Status Change ReasonCode for
193 * asynchronous notificaiton. 193 * asynchronous notification.
194 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event 194 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
195 * data structure. 195 * data structure.
196 * Added new ImageType values for FWDownload and FWUpload 196 * Added new ImageType values for FWDownload and FWUpload
@@ -623,7 +623,7 @@ mpi_fc.h
623 * 11-02-00 01.01.01 Original release for post 1.0 work 623 * 11-02-00 01.01.01 Original release for post 1.0 work
624 * 12-04-00 01.01.02 Added messages for Common Transport Send and 624 * 12-04-00 01.01.02 Added messages for Common Transport Send and
625 * Primitive Send. 625 * Primitive Send.
626 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix 626 * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
627 * and modified the FcPrimitiveSend flags. 627 * and modified the FcPrimitiveSend flags.
628 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger 628 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
629 * field. 629 * field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d496a99e034..0df065275cd3 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 148
149static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
150 149
151/* 150/*
152 * Driver Callback Index's 151 * Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
159 * Forward protos... 158 * Forward protos...
160 */ 159 */
161static irqreturn_t mpt_interrupt(int irq, void *bus_id); 160static irqreturn_t mpt_interrupt(int irq, void *bus_id);
162static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); 161static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
162 MPT_FRAME_HDR *reply);
163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, 163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
164 u32 *req, int replyBytes, u16 *u16reply, int maxwait, 164 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
165 int sleepFlag); 165 int sleepFlag);
@@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
193static void mpt_timer_expired(unsigned long data);
194static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); 193static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
195static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 194static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
195 int sleepFlag);
196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); 198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
207#endif 207#endif
208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); 208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
209 209
210//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 210static int ProcessEventNotification(MPT_ADAPTER *ioc,
211static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); 211 EventNotificationReply_t *evReply, int *evHandlers);
212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -277,6 +277,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
277} 277}
278 278
279/** 279/**
280 * mpt_is_discovery_complete - determine if discovery has completed
281 * @ioc: per adatper instance
282 *
283 * Returns 1 when discovery completed, else zero.
284 */
285static int
286mpt_is_discovery_complete(MPT_ADAPTER *ioc)
287{
288 ConfigExtendedPageHeader_t hdr;
289 CONFIGPARMS cfg;
290 SasIOUnitPage0_t *buffer;
291 dma_addr_t dma_handle;
292 int rc = 0;
293
294 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
295 memset(&cfg, 0, sizeof(CONFIGPARMS));
296 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
297 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
298 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
299 cfg.cfghdr.ehdr = &hdr;
300 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
301
302 if ((mpt_config(ioc, &cfg)))
303 goto out;
304 if (!hdr.ExtPageLength)
305 goto out;
306
307 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
308 &dma_handle);
309 if (!buffer)
310 goto out;
311
312 cfg.physAddr = dma_handle;
313 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
314
315 if ((mpt_config(ioc, &cfg)))
316 goto out_free_consistent;
317
318 if (!(buffer->PhyData[0].PortFlags &
319 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
320 rc = 1;
321
322 out_free_consistent:
323 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
324 buffer, dma_handle);
325 out:
326 return rc;
327}
328
329/**
280 * mpt_fault_reset_work - work performed on workq after ioc fault 330 * mpt_fault_reset_work - work performed on workq after ioc fault
281 * @work: input argument, used to derive ioc 331 * @work: input argument, used to derive ioc
282 * 332 *
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
290 int rc; 340 int rc;
291 unsigned long flags; 341 unsigned long flags;
292 342
293 if (ioc->diagPending || !ioc->active) 343 if (ioc->ioc_reset_in_progress || !ioc->active)
294 goto out; 344 goto out;
295 345
296 ioc_raw_state = mpt_GetIocState(ioc, 0); 346 ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
307 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 357 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
308 "reset (%04xh)\n", ioc->name, ioc_raw_state & 358 "reset (%04xh)\n", ioc->name, ioc_raw_state &
309 MPI_DOORBELL_DATA_MASK); 359 MPI_DOORBELL_DATA_MASK);
360 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
361 if ((mpt_is_discovery_complete(ioc))) {
362 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
363 "discovery_quiesce_io flag\n", ioc->name));
364 ioc->sas_discovery_quiesce_io = 0;
365 }
310 } 366 }
311 367
312 out: 368 out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
317 ioc = ioc->alt_ioc; 373 ioc = ioc->alt_ioc;
318 374
319 /* rearm the timer */ 375 /* rearm the timer */
320 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 376 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
321 if (ioc->reset_work_q) 377 if (ioc->reset_work_q)
322 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 378 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
323 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 379 msecs_to_jiffies(MPT_POLLING_INTERVAL));
324 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 380 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
325} 381}
326 382
327 383
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
501 557
502/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 558/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
503/** 559/**
504 * mpt_base_reply - MPT base driver's callback routine 560 * mptbase_reply - MPT base driver's callback routine
505 * @ioc: Pointer to MPT_ADAPTER structure 561 * @ioc: Pointer to MPT_ADAPTER structure
506 * @mf: Pointer to original MPT request frame 562 * @req: Pointer to original MPT request frame
507 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 563 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
508 * 564 *
509 * MPT base driver's callback routine; all base driver 565 * MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
514 * should be freed, or 0 if it shouldn't. 570 * should be freed, or 0 if it shouldn't.
515 */ 571 */
516static int 572static int
517mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) 573mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
518{ 574{
575 EventNotificationReply_t *pEventReply;
576 u8 event;
577 int evHandlers;
519 int freereq = 1; 578 int freereq = 1;
520 u8 func;
521 579
522 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); 580 switch (reply->u.hdr.Function) {
523#ifdef CONFIG_FUSION_LOGGING 581 case MPI_FUNCTION_EVENT_NOTIFICATION:
524 if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && 582 pEventReply = (EventNotificationReply_t *)reply;
525 !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { 583 evHandlers = 0;
526 dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", 584 ProcessEventNotification(ioc, pEventReply, &evHandlers);
527 ioc->name, mf)); 585 event = le32_to_cpu(pEventReply->Event) & 0xFF;
528 DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); 586 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
529 }
530#endif
531
532 func = reply->u.hdr.Function;
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
534 ioc->name, func));
535
536 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
537 EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
538 int evHandlers = 0;
539 int results;
540
541 results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
542 if (results != evHandlers) {
543 /* CHECKME! Any special handling needed here? */
544 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
545 ioc->name, evHandlers, results));
546 }
547
548 /*
549 * Hmmm... It seems that EventNotificationReply is an exception
550 * to the rule of one reply per request.
551 */
552 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
553 freereq = 0; 587 freereq = 0;
554 } else { 588 if (event != MPI_EVENT_EVENT_CHANGE)
555 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 589 break;
556 ioc->name, pEvReply)); 590 case MPI_FUNCTION_CONFIG:
557 } 591 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
558 592 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
559#ifdef CONFIG_PROC_FS 593 if (reply) {
560// LogEvent(ioc, pEvReply); 594 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
561#endif 595 memcpy(ioc->mptbase_cmds.reply, reply,
562 596 min(MPT_DEFAULT_FRAME_SIZE,
563 } else if (func == MPI_FUNCTION_EVENT_ACK) { 597 4 * reply->u.reply.MsgLength));
564 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
565 ioc->name));
566 } else if (func == MPI_FUNCTION_CONFIG) {
567 CONFIGPARMS *pCfg;
568 unsigned long flags;
569
570 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
571 ioc->name, mf, reply));
572
573 pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
574
575 if (pCfg) {
576 /* disable timer and remove from linked list */
577 del_timer(&pCfg->timer);
578
579 spin_lock_irqsave(&ioc->FreeQlock, flags);
580 list_del(&pCfg->linkage);
581 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
582
583 /*
584 * If IOC Status is SUCCESS, save the header
585 * and set the status code to GOOD.
586 */
587 pCfg->status = MPT_CONFIG_ERROR;
588 if (reply) {
589 ConfigReply_t *pReply = (ConfigReply_t *)reply;
590 u16 status;
591
592 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
593 dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
594 ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
595
596 pCfg->status = status;
597 if (status == MPI_IOCSTATUS_SUCCESS) {
598 if ((pReply->Header.PageType &
599 MPI_CONFIG_PAGETYPE_MASK) ==
600 MPI_CONFIG_PAGETYPE_EXTENDED) {
601 pCfg->cfghdr.ehdr->ExtPageLength =
602 le16_to_cpu(pReply->ExtPageLength);
603 pCfg->cfghdr.ehdr->ExtPageType =
604 pReply->ExtPageType;
605 }
606 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
607
608 /* If this is a regular header, save PageLength. */
609 /* LMP Do this better so not using a reserved field! */
610 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
611 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
612 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
613 }
614 }
615
616 /*
617 * Wake up the original calling thread
618 */
619 pCfg->wait_done = 1;
620 wake_up(&mpt_waitq);
621 } 598 }
622 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { 599 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
623 /* we should be always getting a reply frame */ 600 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
624 memcpy(ioc->persist_reply_frame, reply, 601 complete(&ioc->mptbase_cmds.done);
625 min(MPT_DEFAULT_FRAME_SIZE, 602 } else
626 4*reply->u.reply.MsgLength)); 603 freereq = 0;
627 del_timer(&ioc->persist_timer); 604 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
628 ioc->persist_wait_done = 1; 605 freereq = 1;
629 wake_up(&mpt_waitq); 606 break;
630 } else { 607 case MPI_FUNCTION_EVENT_ACK:
631 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 608 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
632 ioc->name, func); 609 "EventAck reply received\n", ioc->name));
610 break;
611 default:
612 printk(MYIOC_s_ERR_FMT
613 "Unexpected msg function (=%02Xh) reply received!\n",
614 ioc->name, reply->u.hdr.Function);
615 break;
633 } 616 }
634 617
635 /* 618 /*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
988 971
989 /* Put Request back on FreeQ! */ 972 /* Put Request back on FreeQ! */
990 spin_lock_irqsave(&ioc->FreeQlock, flags); 973 spin_lock_irqsave(&ioc->FreeQlock, flags);
991 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ 974 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
975 goto out;
976 /* signature to know if this mf is freed */
977 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
992 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 978 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
993#ifdef MFCNT 979#ifdef MFCNT
994 ioc->mfcnt--; 980 ioc->mfcnt--;
995#endif 981#endif
982 out:
996 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 983 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
997} 984}
998 985
999/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1000/** 987/**
1001 * mpt_add_sge - Place a simple SGE at address pAddr. 988 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
1002 * @pAddr: virtual address for SGE 989 * @pAddr: virtual address for SGE
1003 * @flagslength: SGE flags and data transfer length 990 * @flagslength: SGE flags and data transfer length
1004 * @dma_addr: Physical address 991 * @dma_addr: Physical address
@@ -1006,23 +993,116 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1006 * This routine places a MPT request frame back on the MPT adapter's 993 * This routine places a MPT request frame back on the MPT adapter's
1007 * FreeQ. 994 * FreeQ.
1008 */ 995 */
1009void 996static void
1010mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) 997mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1011{ 998{
1012 if (sizeof(dma_addr_t) == sizeof(u64)) { 999 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1013 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1000 pSge->FlagsLength = cpu_to_le32(flagslength);
1001 pSge->Address = cpu_to_le32(dma_addr);
1002}
1003
1004/**
1005 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
1006 * @pAddr: virtual address for SGE
1007 * @flagslength: SGE flags and data transfer length
1008 * @dma_addr: Physical address
1009 *
1010 * This routine places a MPT request frame back on the MPT adapter's
1011 * FreeQ.
1012 **/
1013static void
1014mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1015{
1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1017 pSge->Address.Low = cpu_to_le32
1018 (lower_32_bits((unsigned long)(dma_addr)));
1019 pSge->Address.High = cpu_to_le32
1020 (upper_32_bits((unsigned long)dma_addr));
1021 pSge->FlagsLength = cpu_to_le32
1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1023}
1024
1025/**
1026 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
1027 * @pAddr: virtual address for SGE
1028 * @flagslength: SGE flags and data transfer length
1029 * @dma_addr: Physical address
1030 *
1031 * This routine places a MPT request frame back on the MPT adapter's
1032 * FreeQ.
1033 **/
1034static void
1035mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1036{
1037 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1038 u32 tmp;
1039
1040 pSge->Address.Low = cpu_to_le32
1041 (lower_32_bits((unsigned long)(dma_addr)));
1042 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1043
1044 /*
1045 * 1078 errata workaround for the 36GB limitation
1046 */
1047 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1048 flagslength |=
1049 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
1050 tmp |= (1<<31);
1051 if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1052 printk(KERN_DEBUG "1078 P0M2 addressing for "
1053 "addr = 0x%llx len = %d\n",
1054 (unsigned long long)dma_addr,
1055 MPI_SGE_LENGTH(flagslength));
1056 }
1057
1058 pSge->Address.High = cpu_to_le32(tmp);
1059 pSge->FlagsLength = cpu_to_le32(
1060 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1061}
1062
1063/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1064/**
1065 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
1066 * @pAddr: virtual address for SGE
1067 * @next: nextChainOffset value (u32's)
1068 * @length: length of next SGL segment
1069 * @dma_addr: Physical address
1070 *
1071 */
1072static void
1073mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1074{
1075 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1076 pChain->Length = cpu_to_le16(length);
1077 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1078 pChain->NextChainOffset = next;
1079 pChain->Address = cpu_to_le32(dma_addr);
1080}
1081
1082/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1083/**
1084 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
1085 * @pAddr: virtual address for SGE
1086 * @next: nextChainOffset value (u32's)
1087 * @length: length of next SGL segment
1088 * @dma_addr: Physical address
1089 *
1090 */
1091static void
1092mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1093{
1094 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1014 u32 tmp = dma_addr & 0xFFFFFFFF; 1095 u32 tmp = dma_addr & 0xFFFFFFFF;
1015 1096
1016 pSge->FlagsLength = cpu_to_le32(flagslength); 1097 pChain->Length = cpu_to_le16(length);
1017 pSge->Address.Low = cpu_to_le32(tmp); 1098 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1018 tmp = (u32) ((u64)dma_addr >> 32); 1099 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1019 pSge->Address.High = cpu_to_le32(tmp);
1020 1100
1021 } else { 1101 pChain->NextChainOffset = next;
1022 SGESimple32_t *pSge = (SGESimple32_t *) pAddr; 1102
1023 pSge->FlagsLength = cpu_to_le32(flagslength); 1103 pChain->Address.Low = cpu_to_le32(tmp);
1024 pSge->Address = cpu_to_le32(dma_addr); 1104 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1025 } 1105 pChain->Address.High = cpu_to_le32(tmp);
1026} 1106}
1027 1107
1028/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1108/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1305,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1225 } 1305 }
1226 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1306 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1227 flags_length |= ioc->HostPageBuffer_sz; 1307 flags_length |= ioc->HostPageBuffer_sz;
1228 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1308 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1229 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1309 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1230 1310
1231return 0; 1311return 0;
@@ -1534,21 +1614,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1534 1614
1535 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1615 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1536 1616
1537 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1617 if (sizeof(dma_addr_t) > 4) {
1538 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1618 const uint64_t required_mask = dma_get_required_mask
1539 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1619 (&pdev->dev);
1540 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1620 if (required_mask > DMA_BIT_MASK(32)
1541 ioc->name)); 1621 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1542 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1622 && !pci_set_consistent_dma_mask(pdev,
1543 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1623 DMA_BIT_MASK(64))) {
1544 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1624 ioc->dma_mask = DMA_BIT_MASK(64);
1545 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1625 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1546 ioc->name)); 1626 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1627 ioc->name));
1628 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1629 && !pci_set_consistent_dma_mask(pdev,
1630 DMA_BIT_MASK(32))) {
1631 ioc->dma_mask = DMA_BIT_MASK(32);
1632 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1633 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1634 ioc->name));
1635 } else {
1636 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1637 ioc->name, pci_name(pdev));
1638 return r;
1639 }
1547 } else { 1640 } else {
1548 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1641 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1549 ioc->name, pci_name(pdev)); 1642 && !pci_set_consistent_dma_mask(pdev,
1550 pci_release_selected_regions(pdev, ioc->bars); 1643 DMA_BIT_MASK(32))) {
1551 return r; 1644 ioc->dma_mask = DMA_BIT_MASK(32);
1645 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1646 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1647 ioc->name));
1648 } else {
1649 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1650 ioc->name, pci_name(pdev));
1651 return r;
1652 }
1552 } 1653 }
1553 1654
1554 mem_phys = msize = 0; 1655 mem_phys = msize = 0;
@@ -1632,6 +1733,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1632 1733
1633 ioc->id = mpt_ids++; 1734 ioc->id = mpt_ids++;
1634 sprintf(ioc->name, "ioc%d", ioc->id); 1735 sprintf(ioc->name, "ioc%d", ioc->id);
1736 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1635 1737
1636 /* 1738 /*
1637 * set initial debug level 1739 * set initial debug level
@@ -1650,14 +1752,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1650 return r; 1752 return r;
1651 } 1753 }
1652 1754
1755 /*
1756 * Setting up proper handlers for scatter gather handling
1757 */
1758 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1759 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
1760 ioc->add_sge = &mpt_add_sge_64bit_1078;
1761 else
1762 ioc->add_sge = &mpt_add_sge_64bit;
1763 ioc->add_chain = &mpt_add_chain_64bit;
1764 ioc->sg_addr_size = 8;
1765 } else {
1766 ioc->add_sge = &mpt_add_sge;
1767 ioc->add_chain = &mpt_add_chain;
1768 ioc->sg_addr_size = 4;
1769 }
1770 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1771
1653 ioc->alloc_total = sizeof(MPT_ADAPTER); 1772 ioc->alloc_total = sizeof(MPT_ADAPTER);
1654 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1773 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1655 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1774 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1656 1775
1657 ioc->pcidev = pdev; 1776 ioc->pcidev = pdev;
1658 ioc->diagPending = 0; 1777
1659 spin_lock_init(&ioc->diagLock); 1778 spin_lock_init(&ioc->taskmgmt_lock);
1660 spin_lock_init(&ioc->initializing_hba_lock); 1779 mutex_init(&ioc->internal_cmds.mutex);
1780 init_completion(&ioc->internal_cmds.done);
1781 mutex_init(&ioc->mptbase_cmds.mutex);
1782 init_completion(&ioc->mptbase_cmds.done);
1783 mutex_init(&ioc->taskmgmt_cmds.mutex);
1784 init_completion(&ioc->taskmgmt_cmds.done);
1661 1785
1662 /* Initialize the event logging. 1786 /* Initialize the event logging.
1663 */ 1787 */
@@ -1670,16 +1794,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1670 ioc->mfcnt = 0; 1794 ioc->mfcnt = 0;
1671#endif 1795#endif
1672 1796
1797 ioc->sh = NULL;
1673 ioc->cached_fw = NULL; 1798 ioc->cached_fw = NULL;
1674 1799
1675 /* Initilize SCSI Config Data structure 1800 /* Initilize SCSI Config Data structure
1676 */ 1801 */
1677 memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); 1802 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1678 1803
1679 /* Initialize the running configQ head.
1680 */
1681 INIT_LIST_HEAD(&ioc->configQ);
1682
1683 /* Initialize the fc rport list head. 1804 /* Initialize the fc rport list head.
1684 */ 1805 */
1685 INIT_LIST_HEAD(&ioc->fc_rports); 1806 INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1811,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1690 1811
1691 /* Initialize workqueue */ 1812 /* Initialize workqueue */
1692 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); 1813 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1693 spin_lock_init(&ioc->fault_reset_work_lock);
1694 1814
1695 snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), 1815 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1696 "mpt_poll_%d", ioc->id); 1816 "mpt_poll_%d", ioc->id);
1697 ioc->reset_work_q = 1817 ioc->reset_work_q =
1698 create_singlethread_workqueue(ioc->reset_work_q_name); 1818 create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1887,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1767 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1887 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1768 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1888 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1769 ioc->errata_flag_1064 = 1; 1889 ioc->errata_flag_1064 = 1;
1890 ioc->bus_type = SAS;
1891 break;
1770 1892
1771 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1893 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1772 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1894 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1773 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1895 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1774 ioc->bus_type = SAS; 1896 ioc->bus_type = SAS;
1897 break;
1775 } 1898 }
1776 1899
1777 1900
@@ -1813,6 +1936,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1813 */ 1936 */
1814 mpt_detect_bound_ports(ioc, pdev); 1937 mpt_detect_bound_ports(ioc, pdev);
1815 1938
1939 INIT_LIST_HEAD(&ioc->fw_event_list);
1940 spin_lock_init(&ioc->fw_event_lock);
1941 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1942 ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
1943
1816 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 1944 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
1817 CAN_SLEEP)) != 0){ 1945 CAN_SLEEP)) != 0){
1818 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", 1946 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2013,18 @@ mpt_detach(struct pci_dev *pdev)
1885 /* 2013 /*
1886 * Stop polling ioc for fault condition 2014 * Stop polling ioc for fault condition
1887 */ 2015 */
1888 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 2016 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1889 wq = ioc->reset_work_q; 2017 wq = ioc->reset_work_q;
1890 ioc->reset_work_q = NULL; 2018 ioc->reset_work_q = NULL;
1891 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 2019 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1892 cancel_delayed_work(&ioc->fault_reset_work); 2020 cancel_delayed_work(&ioc->fault_reset_work);
1893 destroy_workqueue(wq); 2021 destroy_workqueue(wq);
1894 2022
2023 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2024 wq = ioc->fw_event_q;
2025 ioc->fw_event_q = NULL;
2026 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2027 destroy_workqueue(wq);
1895 2028
1896 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 2029 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1897 remove_proc_entry(pname, NULL); 2030 remove_proc_entry(pname, NULL);
@@ -1994,6 +2127,21 @@ mpt_resume(struct pci_dev *pdev)
1994 if (err) 2127 if (err)
1995 return err; 2128 return err;
1996 2129
2130 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2131 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
2132 ioc->add_sge = &mpt_add_sge_64bit_1078;
2133 else
2134 ioc->add_sge = &mpt_add_sge_64bit;
2135 ioc->add_chain = &mpt_add_chain_64bit;
2136 ioc->sg_addr_size = 8;
2137 } else {
2138
2139 ioc->add_sge = &mpt_add_sge;
2140 ioc->add_chain = &mpt_add_chain;
2141 ioc->sg_addr_size = 4;
2142 }
2143 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2144
1997 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", 2145 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
1998 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), 2146 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
1999 CHIPREG_READ32(&ioc->chip->Doorbell)); 2147 CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2239,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2091 ioc->active = 0; 2239 ioc->active = 0;
2092 2240
2093 if (ioc->alt_ioc) { 2241 if (ioc->alt_ioc) {
2094 if (ioc->alt_ioc->active) 2242 if (ioc->alt_ioc->active ||
2243 reason == MPT_HOSTEVENT_IOC_RECOVER) {
2095 reset_alt_ioc_active = 1; 2244 reset_alt_ioc_active = 1;
2096 2245 /* Disable alt-IOC's reply interrupts
2097 /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ 2246 * (and FreeQ) for a bit
2098 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); 2247 **/
2099 ioc->alt_ioc->active = 0; 2248 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2249 0xFFFFFFFF);
2250 ioc->alt_ioc->active = 0;
2251 }
2100 } 2252 }
2101 2253
2102 hard = 1; 2254 hard = 1;
@@ -2117,9 +2269,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2117 } 2269 }
2118 2270
2119 } else { 2271 } else {
2120 printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); 2272 printk(MYIOC_s_WARN_FMT
2273 "NOT READY WARNING!\n", ioc->name);
2121 } 2274 }
2122 return -1; 2275 ret = -1;
2276 goto out;
2123 } 2277 }
2124 2278
2125 /* hard_reset_done = 0 if a soft reset was performed 2279 /* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2283,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2129 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) 2283 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2130 alt_ioc_ready = 1; 2284 alt_ioc_ready = 1;
2131 else 2285 else
2132 printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); 2286 printk(MYIOC_s_WARN_FMT
2287 ": alt-ioc Not ready WARNING!\n",
2288 ioc->alt_ioc->name);
2133 } 2289 }
2134 2290
2135 for (ii=0; ii<5; ii++) { 2291 for (ii=0; ii<5; ii++) {
@@ -2150,7 +2306,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2150 if (alt_ioc_ready) { 2306 if (alt_ioc_ready) {
2151 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 2307 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2152 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2308 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2153 "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); 2309 "Initial Alt IocFacts failed rc=%x\n",
2310 ioc->name, rc));
2154 /* Retry - alt IOC was initialized once 2311 /* Retry - alt IOC was initialized once
2155 */ 2312 */
2156 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); 2313 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2351,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2194 IRQF_SHARED, ioc->name, ioc); 2351 IRQF_SHARED, ioc->name, ioc);
2195 if (rc < 0) { 2352 if (rc < 0) {
2196 printk(MYIOC_s_ERR_FMT "Unable to allocate " 2353 printk(MYIOC_s_ERR_FMT "Unable to allocate "
2197 "interrupt %d!\n", ioc->name, ioc->pcidev->irq); 2354 "interrupt %d!\n",
2355 ioc->name, ioc->pcidev->irq);
2198 if (ioc->msi_enable) 2356 if (ioc->msi_enable)
2199 pci_disable_msi(ioc->pcidev); 2357 pci_disable_msi(ioc->pcidev);
2200 return -EBUSY; 2358 ret = -EBUSY;
2359 goto out;
2201 } 2360 }
2202 irq_allocated = 1; 2361 irq_allocated = 1;
2203 ioc->pci_irq = ioc->pcidev->irq; 2362 ioc->pci_irq = ioc->pcidev->irq;
2204 pci_set_master(ioc->pcidev); /* ?? */ 2363 pci_set_master(ioc->pcidev); /* ?? */
2205 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2364 pci_set_drvdata(ioc->pcidev, ioc);
2206 "%d\n", ioc->name, ioc->pcidev->irq)); 2365 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2366 "installed at interrupt %d\n", ioc->name,
2367 ioc->pcidev->irq));
2207 } 2368 }
2208 } 2369 }
2209 2370
@@ -2212,17 +2373,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2212 * init as upper addresses are needed for init. 2373 * init as upper addresses are needed for init.
2213 * If fails, continue with alt-ioc processing 2374 * If fails, continue with alt-ioc processing
2214 */ 2375 */
2376 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2377 ioc->name));
2215 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) 2378 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2216 ret = -3; 2379 ret = -3;
2217 2380
2218 /* May need to check/upload firmware & data here! 2381 /* May need to check/upload firmware & data here!
2219 * If fails, continue with alt-ioc processing 2382 * If fails, continue with alt-ioc processing
2220 */ 2383 */
2384 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2385 ioc->name));
2221 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) 2386 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2222 ret = -4; 2387 ret = -4;
2223// NEW! 2388// NEW!
2224 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { 2389 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2225 printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", 2390 printk(MYIOC_s_WARN_FMT
2391 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2226 ioc->alt_ioc->name, rc); 2392 ioc->alt_ioc->name, rc);
2227 alt_ioc_ready = 0; 2393 alt_ioc_ready = 0;
2228 reset_alt_ioc_active = 0; 2394 reset_alt_ioc_active = 0;
@@ -2232,8 +2398,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2232 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { 2398 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2233 alt_ioc_ready = 0; 2399 alt_ioc_ready = 0;
2234 reset_alt_ioc_active = 0; 2400 reset_alt_ioc_active = 0;
2235 printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", 2401 printk(MYIOC_s_WARN_FMT
2236 ioc->alt_ioc->name, rc); 2402 ": alt-ioc: (%d) init failure WARNING!\n",
2403 ioc->alt_ioc->name, rc);
2237 } 2404 }
2238 } 2405 }
2239 2406
@@ -2269,28 +2436,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2269 } 2436 }
2270 } 2437 }
2271 2438
2439 /* Enable MPT base driver management of EventNotification
2440 * and EventAck handling.
2441 */
2442 if ((ret == 0) && (!ioc->facts.EventState)) {
2443 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2444 "SendEventNotification\n",
2445 ioc->name));
2446 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2447 }
2448
2449 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2450 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2451
2272 if (ret == 0) { 2452 if (ret == 0) {
2273 /* Enable! (reply interrupt) */ 2453 /* Enable! (reply interrupt) */
2274 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 2454 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2275 ioc->active = 1; 2455 ioc->active = 1;
2276 } 2456 }
2277 2457 if (rc == 0) { /* alt ioc */
2278 if (reset_alt_ioc_active && ioc->alt_ioc) { 2458 if (reset_alt_ioc_active && ioc->alt_ioc) {
2279 /* (re)Enable alt-IOC! (reply interrupt) */ 2459 /* (re)Enable alt-IOC! (reply interrupt) */
2280 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", 2460 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2281 ioc->alt_ioc->name)); 2461 "reply irq re-enabled\n",
2282 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); 2462 ioc->alt_ioc->name));
2283 ioc->alt_ioc->active = 1; 2463 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2464 MPI_HIM_DIM);
2465 ioc->alt_ioc->active = 1;
2466 }
2284 } 2467 }
2285 2468
2286 /* Enable MPT base driver management of EventNotification
2287 * and EventAck handling.
2288 */
2289 if ((ret == 0) && (!ioc->facts.EventState))
2290 (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
2291
2292 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2293 (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
2294 2469
2295 /* Add additional "reason" check before call to GetLanConfigPages 2470 /* Add additional "reason" check before call to GetLanConfigPages
2296 * (combined with GetIoUnitPage2 call). This prevents a somewhat 2471 * (combined with GetIoUnitPage2 call). This prevents a somewhat
@@ -2306,8 +2481,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2306 mutex_init(&ioc->raid_data.inactive_list_mutex); 2481 mutex_init(&ioc->raid_data.inactive_list_mutex);
2307 INIT_LIST_HEAD(&ioc->raid_data.inactive_list); 2482 INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2308 2483
2309 if (ioc->bus_type == SAS) { 2484 switch (ioc->bus_type) {
2310 2485
2486 case SAS:
2311 /* clear persistency table */ 2487 /* clear persistency table */
2312 if(ioc->facts.IOCExceptions & 2488 if(ioc->facts.IOCExceptions &
2313 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { 2489 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2497,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2321 */ 2497 */
2322 mpt_findImVolumes(ioc); 2498 mpt_findImVolumes(ioc);
2323 2499
2324 } else if (ioc->bus_type == FC) { 2500 /* Check, and possibly reset, the coalescing value
2325 if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && 2501 */
2502 mpt_read_ioc_pg_1(ioc);
2503
2504 break;
2505
2506 case FC:
2507 if ((ioc->pfacts[0].ProtocolFlags &
2508 MPI_PORTFACTS_PROTOCOL_LAN) &&
2326 (ioc->lan_cnfg_page0.Header.PageLength == 0)) { 2509 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2327 /* 2510 /*
2328 * Pre-fetch the ports LAN MAC address! 2511 * Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2514,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2331 (void) GetLanConfigPages(ioc); 2514 (void) GetLanConfigPages(ioc);
2332 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 2515 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2333 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2516 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2334 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", 2517 "LanAddr = %02X:%02X:%02X"
2335 ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); 2518 ":%02X:%02X:%02X\n",
2336 2519 ioc->name, a[5], a[4],
2520 a[3], a[2], a[1], a[0]));
2337 } 2521 }
2338 } else { 2522 break;
2523
2524 case SPI:
2339 /* Get NVRAM and adapter maximums from SPP 0 and 2 2525 /* Get NVRAM and adapter maximums from SPP 0 and 2
2340 */ 2526 */
2341 mpt_GetScsiPortSettings(ioc, 0); 2527 mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2540,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2354 mpt_read_ioc_pg_1(ioc); 2540 mpt_read_ioc_pg_1(ioc);
2355 2541
2356 mpt_read_ioc_pg_4(ioc); 2542 mpt_read_ioc_pg_4(ioc);
2543
2544 break;
2357 } 2545 }
2358 2546
2359 GetIoUnitPage2(ioc); 2547 GetIoUnitPage2(ioc);
@@ -2435,16 +2623,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2435 if (_pcidev == peer) { 2623 if (_pcidev == peer) {
2436 /* Paranoia checks */ 2624 /* Paranoia checks */
2437 if (ioc->alt_ioc != NULL) { 2625 if (ioc->alt_ioc != NULL) {
2438 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2626 printk(MYIOC_s_WARN_FMT
2439 ioc->name, ioc->alt_ioc->name); 2627 "Oops, already bound (%s <==> %s)!\n",
2628 ioc->name, ioc->name, ioc->alt_ioc->name);
2440 break; 2629 break;
2441 } else if (ioc_srch->alt_ioc != NULL) { 2630 } else if (ioc_srch->alt_ioc != NULL) {
2442 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2631 printk(MYIOC_s_WARN_FMT
2443 ioc_srch->name, ioc_srch->alt_ioc->name); 2632 "Oops, already bound (%s <==> %s)!\n",
2633 ioc_srch->name, ioc_srch->name,
2634 ioc_srch->alt_ioc->name);
2444 break; 2635 break;
2445 } 2636 }
2446 dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", 2637 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2447 ioc->name, ioc_srch->name)); 2638 "FOUND! binding %s <==> %s\n",
2639 ioc->name, ioc->name, ioc_srch->name));
2448 ioc_srch->alt_ioc = ioc; 2640 ioc_srch->alt_ioc = ioc;
2449 ioc->alt_ioc = ioc_srch; 2641 ioc->alt_ioc = ioc_srch;
2450 } 2642 }
@@ -2464,8 +2656,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2464 int ret; 2656 int ret;
2465 2657
2466 if (ioc->cached_fw != NULL) { 2658 if (ioc->cached_fw != NULL) {
2467 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2659 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2468 "adapter\n", __func__, ioc->name)); 2660 "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2469 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2661 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2470 ioc->cached_fw, CAN_SLEEP)) < 0) { 2662 ioc->cached_fw, CAN_SLEEP)) < 0) {
2471 printk(MYIOC_s_WARN_FMT 2663 printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2666,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2474 } 2666 }
2475 } 2667 }
2476 2668
2669 /*
2670 * Put the controller into ready state (if its not already)
2671 */
2672 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2673 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2674 CAN_SLEEP)) {
2675 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2676 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2677 "reset failed to put ioc in ready state!\n",
2678 ioc->name, __func__);
2679 } else
2680 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2681 "failed!\n", ioc->name, __func__);
2682 }
2683
2684
2477 /* Disable adapter interrupts! */ 2685 /* Disable adapter interrupts! */
2686 synchronize_irq(ioc->pcidev->irq);
2478 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2687 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2479 ioc->active = 0; 2688 ioc->active = 0;
2689
2480 /* Clear any lingering interrupt */ 2690 /* Clear any lingering interrupt */
2481 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2691 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2692 CHIPREG_READ32(&ioc->chip->IntStatus);
2482 2693
2483 if (ioc->alloc != NULL) { 2694 if (ioc->alloc != NULL) {
2484 sz = ioc->alloc_sz; 2695 sz = ioc->alloc_sz;
@@ -2538,19 +2749,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2538 if((ret = mpt_host_page_access_control(ioc, 2749 if((ret = mpt_host_page_access_control(ioc,
2539 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { 2750 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2540 printk(MYIOC_s_ERR_FMT 2751 printk(MYIOC_s_ERR_FMT
2541 "host page buffers free failed (%d)!\n", 2752 ": %s: host page buffers free failed (%d)!\n",
2542 ioc->name, ret); 2753 ioc->name, __func__, ret);
2543 } 2754 }
2544 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", 2755 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2545 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); 2756 "HostPageBuffer free @ %p, sz=%d bytes\n",
2757 ioc->name, ioc->HostPageBuffer,
2758 ioc->HostPageBuffer_sz));
2546 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, 2759 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2547 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2760 ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2548 ioc->HostPageBuffer = NULL; 2761 ioc->HostPageBuffer = NULL;
2549 ioc->HostPageBuffer_sz = 0; 2762 ioc->HostPageBuffer_sz = 0;
2550 ioc->alloc_total -= ioc->HostPageBuffer_sz; 2763 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2551 } 2764 }
2552}
2553 2765
2766 pci_set_drvdata(ioc->pcidev, NULL);
2767}
2554/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2768/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2555/** 2769/**
2556 * mpt_adapter_dispose - Free all resources associated with an MPT adapter 2770 * mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2904,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2690 } 2904 }
2691 2905
2692 /* Is it already READY? */ 2906 /* Is it already READY? */
2693 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 2907 if (!statefault &&
2908 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2909 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2910 "IOC is in READY state\n", ioc->name));
2694 return 0; 2911 return 0;
2912 }
2695 2913
2696 /* 2914 /*
2697 * Check to see if IOC is in FAULT state. 2915 * Check to see if IOC is in FAULT state.
@@ -2764,8 +2982,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2764 2982
2765 ii++; cntdn--; 2983 ii++; cntdn--;
2766 if (!cntdn) { 2984 if (!cntdn) {
2767 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 2985 printk(MYIOC_s_ERR_FMT
2768 ioc->name, (int)((ii+5)/HZ)); 2986 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
2987 ioc->name, ioc_state, (int)((ii+5)/HZ));
2769 return -ETIME; 2988 return -ETIME;
2770 } 2989 }
2771 2990
@@ -2778,9 +2997,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2778 } 2997 }
2779 2998
2780 if (statefault < 3) { 2999 if (statefault < 3) {
2781 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", 3000 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
2782 ioc->name, 3001 statefault == 1 ? "stuck handshake" : "IOC FAULT");
2783 statefault==1 ? "stuck handshake" : "IOC FAULT");
2784 } 3002 }
2785 3003
2786 return hard_reset_done; 3004 return hard_reset_done;
@@ -2833,8 +3051,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2833 3051
2834 /* IOC *must* NOT be in RESET state! */ 3052 /* IOC *must* NOT be in RESET state! */
2835 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3053 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2836 printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", 3054 printk(KERN_ERR MYNAM
2837 ioc->name, ioc->last_state ); 3055 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3056 ioc->name, ioc->last_state);
2838 return -44; 3057 return -44;
2839 } 3058 }
2840 3059
@@ -2896,7 +3115,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2896 * Old: u16{Major(4),Minor(4),SubMinor(8)} 3115 * Old: u16{Major(4),Minor(4),SubMinor(8)}
2897 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} 3116 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
2898 */ 3117 */
2899 if (facts->MsgVersion < 0x0102) { 3118 if (facts->MsgVersion < MPI_VERSION_01_02) {
2900 /* 3119 /*
2901 * Handle old FC f/w style, convert to new... 3120 * Handle old FC f/w style, convert to new...
2902 */ 3121 */
@@ -2908,9 +3127,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2908 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); 3127 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
2909 3128
2910 facts->ProductID = le16_to_cpu(facts->ProductID); 3129 facts->ProductID = le16_to_cpu(facts->ProductID);
3130
2911 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) 3131 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2912 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) 3132 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
2913 ioc->ir_firmware = 1; 3133 ioc->ir_firmware = 1;
3134
2914 facts->CurrentHostMfaHighAddr = 3135 facts->CurrentHostMfaHighAddr =
2915 le32_to_cpu(facts->CurrentHostMfaHighAddr); 3136 le32_to_cpu(facts->CurrentHostMfaHighAddr);
2916 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); 3137 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3147,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2926 * to 14 in MPI-1.01.0x. 3147 * to 14 in MPI-1.01.0x.
2927 */ 3148 */
2928 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && 3149 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
2929 facts->MsgVersion > 0x0100) { 3150 facts->MsgVersion > MPI_VERSION_01_00) {
2930 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3151 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
2931 } 3152 }
2932 3153
@@ -3108,6 +3329,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3108 3329
3109 ioc_init.MaxDevices = (U8)ioc->devices_per_bus; 3330 ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3110 ioc_init.MaxBuses = (U8)ioc->number_of_buses; 3331 ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3332
3111 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", 3333 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3112 ioc->name, ioc->facts.MsgVersion)); 3334 ioc->name, ioc->facts.MsgVersion));
3113 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { 3335 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3344,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3122 } 3344 }
3123 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 3345 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3124 3346
3125 if (sizeof(dma_addr_t) == sizeof(u64)) { 3347 if (ioc->sg_addr_size == sizeof(u64)) {
3126 /* Save the upper 32-bits of the request 3348 /* Save the upper 32-bits of the request
3127 * (reply) and sense buffers. 3349 * (reply) and sense buffers.
3128 */ 3350 */
@@ -3325,11 +3547,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3325 FWUpload_t *prequest; 3547 FWUpload_t *prequest;
3326 FWUploadReply_t *preply; 3548 FWUploadReply_t *preply;
3327 FWUploadTCSGE_t *ptcsge; 3549 FWUploadTCSGE_t *ptcsge;
3328 int sgeoffset;
3329 u32 flagsLength; 3550 u32 flagsLength;
3330 int ii, sz, reply_sz; 3551 int ii, sz, reply_sz;
3331 int cmdStatus; 3552 int cmdStatus;
3332 3553 int request_size;
3333 /* If the image size is 0, we are done. 3554 /* If the image size is 0, we are done.
3334 */ 3555 */
3335 if ((sz = ioc->facts.FWImageSize) == 0) 3556 if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3585,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3364 ptcsge->ImageSize = cpu_to_le32(sz); 3585 ptcsge->ImageSize = cpu_to_le32(sz);
3365 ptcsge++; 3586 ptcsge++;
3366 3587
3367 sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
3368
3369 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; 3588 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3370 mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); 3589 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3371 3590 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3372 sgeoffset += sizeof(u32) + sizeof(dma_addr_t); 3591 ioc->SGE_size;
3373 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", 3592 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3374 ioc->name, prequest, sgeoffset)); 3593 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3594 ioc->facts.FWImageSize, request_size));
3375 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); 3595 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3376 3596
3377 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, 3597 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3378 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); 3598 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3379 3599
3380 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); 3600 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3601 "rc=%x \n", ioc->name, ii));
3381 3602
3382 cmdStatus = -EFAULT; 3603 cmdStatus = -EFAULT;
3383 if (ii == 0) { 3604 if (ii == 0) {
3384 /* Handshake transfer was complete and successful. 3605 /* Handshake transfer was complete and successful.
3385 * Check the Reply Frame. 3606 * Check the Reply Frame.
3386 */ 3607 */
3387 int status, transfer_sz; 3608 int status;
3388 status = le16_to_cpu(preply->IOCStatus); 3609 status = le16_to_cpu(preply->IOCStatus) &
3389 if (status == MPI_IOCSTATUS_SUCCESS) { 3610 MPI_IOCSTATUS_MASK;
3390 transfer_sz = le32_to_cpu(preply->ActualImageSize); 3611 if (status == MPI_IOCSTATUS_SUCCESS &&
3391 if (transfer_sz == sz) 3612 ioc->facts.FWImageSize ==
3613 le32_to_cpu(preply->ActualImageSize))
3392 cmdStatus = 0; 3614 cmdStatus = 0;
3393 }
3394 } 3615 }
3395 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", 3616 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3396 ioc->name, cmdStatus)); 3617 ioc->name, cmdStatus));
3397 3618
3398 3619
3399 if (cmdStatus) { 3620 if (cmdStatus) {
3400 3621 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3401 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", 3622 "freeing image \n", ioc->name));
3402 ioc->name));
3403 mpt_free_fw_memory(ioc); 3623 mpt_free_fw_memory(ioc);
3404 } 3624 }
3405 kfree(prequest); 3625 kfree(prequest);
@@ -3723,6 +3943,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3723 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3943 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3724 3944
3725 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3945 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3946
3947 if (!ignore)
3948 return 0;
3949
3726 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3950 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3727 "address=%p\n", ioc->name, __func__, 3951 "address=%p\n", ioc->name, __func__,
3728 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3952 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3964,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3740 "looking for READY STATE: doorbell=%x" 3964 "looking for READY STATE: doorbell=%x"
3741 " count=%d\n", 3965 " count=%d\n",
3742 ioc->name, doorbell, count)); 3966 ioc->name, doorbell, count));
3967
3743 if (doorbell == MPI_IOC_STATE_READY) { 3968 if (doorbell == MPI_IOC_STATE_READY) {
3744 return 1; 3969 return 1;
3745 } 3970 }
@@ -3890,6 +4115,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3890 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4115 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3891 doorbell &= MPI_IOC_STATE_MASK; 4116 doorbell &= MPI_IOC_STATE_MASK;
3892 4117
4118 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4119 "looking for READY STATE: doorbell=%x"
4120 " count=%d\n", ioc->name, doorbell, count));
4121
3893 if (doorbell == MPI_IOC_STATE_READY) { 4122 if (doorbell == MPI_IOC_STATE_READY) {
3894 break; 4123 break;
3895 } 4124 }
@@ -3901,6 +4130,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3901 mdelay (1000); 4130 mdelay (1000);
3902 } 4131 }
3903 } 4132 }
4133
4134 if (doorbell != MPI_IOC_STATE_READY)
4135 printk(MYIOC_s_ERR_FMT "Failed to come READY "
4136 "after reset! IocState=%x", ioc->name,
4137 doorbell);
3904 } 4138 }
3905 } 4139 }
3906 4140
@@ -4019,8 +4253,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
4019 if (sleepFlag != CAN_SLEEP) 4253 if (sleepFlag != CAN_SLEEP)
4020 count *= 10; 4254 count *= 10;
4021 4255
4022 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 4256 printk(MYIOC_s_ERR_FMT
4023 ioc->name, (int)((count+5)/HZ)); 4257 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
4258 ioc->name, state, (int)((count+5)/HZ));
4024 return -ETIME; 4259 return -ETIME;
4025 } 4260 }
4026 4261
@@ -4090,24 +4325,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
4090 * num_sge = num sge in request frame + last chain buffer 4325 * num_sge = num sge in request frame + last chain buffer
4091 * scale = num sge per chain buffer if no chain element 4326 * scale = num sge per chain buffer if no chain element
4092 */ 4327 */
4093 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4328 scale = ioc->req_sz / ioc->SGE_size;
4094 if (sizeof(dma_addr_t) == sizeof(u64)) 4329 if (ioc->sg_addr_size == sizeof(u64))
4095 num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4330 num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
4096 else 4331 else
4097 num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4332 num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
4098 4333
4099 if (sizeof(dma_addr_t) == sizeof(u64)) { 4334 if (ioc->sg_addr_size == sizeof(u64)) {
4100 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4335 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
4101 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4336 (ioc->req_sz - 60) / ioc->SGE_size;
4102 } else { 4337 } else {
4103 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4338 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
4104 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4339 scale + (ioc->req_sz - 64) / ioc->SGE_size;
4105 } 4340 }
4106 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", 4341 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
4107 ioc->name, num_sge, numSGE)); 4342 ioc->name, num_sge, numSGE));
4108 4343
4109 if ( numSGE > MPT_SCSI_SG_DEPTH ) 4344 if (ioc->bus_type == FC) {
4110 numSGE = MPT_SCSI_SG_DEPTH; 4345 if (numSGE > MPT_SCSI_FC_SG_DEPTH)
4346 numSGE = MPT_SCSI_FC_SG_DEPTH;
4347 } else {
4348 if (numSGE > MPT_SCSI_SG_DEPTH)
4349 numSGE = MPT_SCSI_SG_DEPTH;
4350 }
4111 4351
4112 num_chain = 1; 4352 num_chain = 1;
4113 while (numSGE - num_sge > 0) { 4353 while (numSGE - num_sge > 0) {
@@ -4161,12 +4401,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4161 dma_addr_t alloc_dma; 4401 dma_addr_t alloc_dma;
4162 u8 *mem; 4402 u8 *mem;
4163 int i, reply_sz, sz, total_size, num_chain; 4403 int i, reply_sz, sz, total_size, num_chain;
4404 u64 dma_mask;
4405
4406 dma_mask = 0;
4164 4407
4165 /* Prime reply FIFO... */ 4408 /* Prime reply FIFO... */
4166 4409
4167 if (ioc->reply_frames == NULL) { 4410 if (ioc->reply_frames == NULL) {
4168 if ( (num_chain = initChainBuffers(ioc)) < 0) 4411 if ( (num_chain = initChainBuffers(ioc)) < 0)
4169 return -1; 4412 return -1;
4413 /*
4414 * 1078 errata workaround for the 36GB limitation
4415 */
4416 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4417 ioc->dma_mask > DMA_35BIT_MASK) {
4418 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4419 && !pci_set_consistent_dma_mask(ioc->pcidev,
4420 DMA_BIT_MASK(32))) {
4421 dma_mask = DMA_35BIT_MASK;
4422 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4423 "setting 35 bit addressing for "
4424 "Request/Reply/Chain and Sense Buffers\n",
4425 ioc->name));
4426 } else {
4427 /*Reseting DMA mask to 64 bit*/
4428 pci_set_dma_mask(ioc->pcidev,
4429 DMA_BIT_MASK(64));
4430 pci_set_consistent_dma_mask(ioc->pcidev,
4431 DMA_BIT_MASK(64));
4432
4433 printk(MYIOC_s_ERR_FMT
4434 "failed setting 35 bit addressing for "
4435 "Request/Reply/Chain and Sense Buffers\n",
4436 ioc->name);
4437 return -1;
4438 }
4439 }
4170 4440
4171 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); 4441 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
4172 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", 4442 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4575,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4305 alloc_dma += ioc->reply_sz; 4575 alloc_dma += ioc->reply_sz;
4306 } 4576 }
4307 4577
4578 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4579 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4580 ioc->dma_mask))
4581 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4582 "restoring 64 bit addressing\n", ioc->name));
4583
4308 return 0; 4584 return 0;
4309 4585
4310out_fail: 4586out_fail:
4587
4311 if (ioc->alloc != NULL) { 4588 if (ioc->alloc != NULL) {
4312 sz = ioc->alloc_sz; 4589 sz = ioc->alloc_sz;
4313 pci_free_consistent(ioc->pcidev, 4590 pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4601,13 @@ out_fail:
4324 ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 4601 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
4325 ioc->sense_buf_pool = NULL; 4602 ioc->sense_buf_pool = NULL;
4326 } 4603 }
4604
4605 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4606 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4607 DMA_BIT_MASK(64)))
4608 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4609 "restoring 64 bit addressing\n", ioc->name));
4610
4327 return -1; 4611 return -1;
4328} 4612}
4329 4613
@@ -4759,7 +5043,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4759 SasIoUnitControlReply_t *sasIoUnitCntrReply; 5043 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4760 MPT_FRAME_HDR *mf = NULL; 5044 MPT_FRAME_HDR *mf = NULL;
4761 MPIHeader_t *mpi_hdr; 5045 MPIHeader_t *mpi_hdr;
5046 int ret = 0;
5047 unsigned long timeleft;
5048
5049 mutex_lock(&ioc->mptbase_cmds.mutex);
4762 5050
5051 /* init the internal cmd struct */
5052 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
5053 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
4763 5054
4764 /* insure garbage is not sent to fw */ 5055 /* insure garbage is not sent to fw */
4765 switch(persist_opcode) { 5056 switch(persist_opcode) {
@@ -4769,17 +5060,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4769 break; 5060 break;
4770 5061
4771 default: 5062 default:
4772 return -1; 5063 ret = -1;
4773 break; 5064 goto out;
4774 } 5065 }
4775 5066
4776 printk("%s: persist_opcode=%x\n",__func__, persist_opcode); 5067 printk(KERN_DEBUG "%s: persist_opcode=%x\n",
5068 __func__, persist_opcode);
4777 5069
4778 /* Get a MF for this command. 5070 /* Get a MF for this command.
4779 */ 5071 */
4780 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5072 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4781 printk("%s: no msg frames!\n",__func__); 5073 printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
4782 return -1; 5074 ret = -1;
5075 goto out;
4783 } 5076 }
4784 5077
4785 mpi_hdr = (MPIHeader_t *) mf; 5078 mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5082,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4789 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; 5082 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4790 sasIoUnitCntrReq->Operation = persist_opcode; 5083 sasIoUnitCntrReq->Operation = persist_opcode;
4791 5084
4792 init_timer(&ioc->persist_timer);
4793 ioc->persist_timer.data = (unsigned long) ioc;
4794 ioc->persist_timer.function = mpt_timer_expired;
4795 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4796 ioc->persist_wait_done=0;
4797 add_timer(&ioc->persist_timer);
4798 mpt_put_msg_frame(mpt_base_index, ioc, mf); 5085 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4799 wait_event(mpt_waitq, ioc->persist_wait_done); 5086 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
5087 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
5088 ret = -ETIME;
5089 printk(KERN_DEBUG "%s: failed\n", __func__);
5090 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5091 goto out;
5092 if (!timeleft) {
5093 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
5094 ioc->name, __func__);
5095 mpt_HardResetHandler(ioc, CAN_SLEEP);
5096 mpt_free_msg_frame(ioc, mf);
5097 }
5098 goto out;
5099 }
5100
5101 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
5102 ret = -1;
5103 goto out;
5104 }
4800 5105
4801 sasIoUnitCntrReply = 5106 sasIoUnitCntrReply =
4802 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 5107 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
4803 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 5108 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4804 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 5109 printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4805 __func__, 5110 __func__, sasIoUnitCntrReply->IOCStatus,
4806 sasIoUnitCntrReply->IOCStatus,
4807 sasIoUnitCntrReply->IOCLogInfo); 5111 sasIoUnitCntrReply->IOCLogInfo);
4808 return -1; 5112 printk(KERN_DEBUG "%s: failed\n", __func__);
4809 } 5113 ret = -1;
5114 } else
5115 printk(KERN_DEBUG "%s: success\n", __func__);
5116 out:
4810 5117
4811 printk("%s: success\n",__func__); 5118 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
4812 return 0; 5119 mutex_unlock(&ioc->mptbase_cmds.mutex);
5120 return ret;
4813} 5121}
4814 5122
4815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5702,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
5394 * -ENOMEM if pci_alloc failed 5702 * -ENOMEM if pci_alloc failed
5395 **/ 5703 **/
5396int 5704int
5397mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) 5705mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
5706 RaidPhysDiskPage0_t *phys_disk)
5398{ 5707{
5399 CONFIGPARMS cfg; 5708 CONFIGPARMS cfg;
5400 ConfigPageHeader_t hdr; 5709 ConfigPageHeader_t hdr;
5401 dma_addr_t dma_handle; 5710 dma_addr_t dma_handle;
5402 pRaidPhysDiskPage0_t buffer = NULL; 5711 pRaidPhysDiskPage0_t buffer = NULL;
5403 int rc; 5712 int rc;
5404 5713
5405 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5714 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5406 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5715 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5716 memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
5407 5717
5718 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
5408 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5719 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5409 cfg.cfghdr.hdr = &hdr; 5720 cfg.cfghdr.hdr = &hdr;
5410 cfg.physAddr = -1; 5721 cfg.physAddr = -1;
@@ -5451,6 +5762,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
5451} 5762}
5452 5763
5453/** 5764/**
5765 * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
5766 * @ioc: Pointer to a Adapter Structure
5767 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5768 *
5769 * Return:
5770 * returns number paths
5771 **/
5772int
5773mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
5774{
5775 CONFIGPARMS cfg;
5776 ConfigPageHeader_t hdr;
5777 dma_addr_t dma_handle;
5778 pRaidPhysDiskPage1_t buffer = NULL;
5779 int rc;
5780
5781 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5782 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5783
5784 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5785 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5786 hdr.PageNumber = 1;
5787 cfg.cfghdr.hdr = &hdr;
5788 cfg.physAddr = -1;
5789 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5790
5791 if (mpt_config(ioc, &cfg) != 0) {
5792 rc = 0;
5793 goto out;
5794 }
5795
5796 if (!hdr.PageLength) {
5797 rc = 0;
5798 goto out;
5799 }
5800
5801 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5802 &dma_handle);
5803
5804 if (!buffer) {
5805 rc = 0;
5806 goto out;
5807 }
5808
5809 cfg.physAddr = dma_handle;
5810 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5811 cfg.pageAddr = phys_disk_num;
5812
5813 if (mpt_config(ioc, &cfg) != 0) {
5814 rc = 0;
5815 goto out;
5816 }
5817
5818 rc = buffer->NumPhysDiskPaths;
5819 out:
5820
5821 if (buffer)
5822 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5823 dma_handle);
5824
5825 return rc;
5826}
5827EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
5828
5829/**
5830 * mpt_raid_phys_disk_pg1 - returns phys disk page 1
5831 * @ioc: Pointer to a Adapter Structure
5832 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5833 * @phys_disk: requested payload data returned
5834 *
5835 * Return:
5836 * 0 on success
5837 * -EFAULT if read of config page header fails or data pointer not NULL
5838 * -ENOMEM if pci_alloc failed
5839 **/
5840int
5841mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
5842 RaidPhysDiskPage1_t *phys_disk)
5843{
5844 CONFIGPARMS cfg;
5845 ConfigPageHeader_t hdr;
5846 dma_addr_t dma_handle;
5847 pRaidPhysDiskPage1_t buffer = NULL;
5848 int rc;
5849 int i;
5850 __le64 sas_address;
5851
5852 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5853 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5854 rc = 0;
5855
5856 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5857 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5858 hdr.PageNumber = 1;
5859 cfg.cfghdr.hdr = &hdr;
5860 cfg.physAddr = -1;
5861 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5862
5863 if (mpt_config(ioc, &cfg) != 0) {
5864 rc = -EFAULT;
5865 goto out;
5866 }
5867
5868 if (!hdr.PageLength) {
5869 rc = -EFAULT;
5870 goto out;
5871 }
5872
5873 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5874 &dma_handle);
5875
5876 if (!buffer) {
5877 rc = -ENOMEM;
5878 goto out;
5879 }
5880
5881 cfg.physAddr = dma_handle;
5882 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5883 cfg.pageAddr = phys_disk_num;
5884
5885 if (mpt_config(ioc, &cfg) != 0) {
5886 rc = -EFAULT;
5887 goto out;
5888 }
5889
5890 phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
5891 phys_disk->PhysDiskNum = phys_disk_num;
5892 for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
5893 phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
5894 phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
5895 phys_disk->Path[i].OwnerIdentifier =
5896 buffer->Path[i].OwnerIdentifier;
5897 phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
5898 memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
5899 sas_address = le64_to_cpu(sas_address);
5900 memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
5901 memcpy(&sas_address,
5902 &buffer->Path[i].OwnerWWID, sizeof(__le64));
5903 sas_address = le64_to_cpu(sas_address);
5904 memcpy(&phys_disk->Path[i].OwnerWWID,
5905 &sas_address, sizeof(__le64));
5906 }
5907
5908 out:
5909
5910 if (buffer)
5911 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5912 dma_handle);
5913
5914 return rc;
5915}
5916EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
5917
5918
5919/**
5454 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5920 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
5455 * @ioc: Pointer to a Adapter Strucutre 5921 * @ioc: Pointer to a Adapter Strucutre
5456 * 5922 *
@@ -5775,30 +6241,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
5775 * SendEventNotification - Send EventNotification (on or off) request to adapter 6241 * SendEventNotification - Send EventNotification (on or off) request to adapter
5776 * @ioc: Pointer to MPT_ADAPTER structure 6242 * @ioc: Pointer to MPT_ADAPTER structure
5777 * @EvSwitch: Event switch flags 6243 * @EvSwitch: Event switch flags
6244 * @sleepFlag: Specifies whether the process can sleep
5778 */ 6245 */
5779static int 6246static int
5780SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) 6247SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
5781{ 6248{
5782 EventNotification_t *evnp; 6249 EventNotification_t evn;
6250 MPIDefaultReply_t reply_buf;
5783 6251
5784 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); 6252 memset(&evn, 0, sizeof(EventNotification_t));
5785 if (evnp == NULL) { 6253 memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
5786 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
5787 ioc->name));
5788 return 0;
5789 }
5790 memset(evnp, 0, sizeof(*evnp));
5791
5792 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
5793 6254
5794 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 6255 evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
5795 evnp->ChainOffset = 0; 6256 evn.Switch = EvSwitch;
5796 evnp->MsgFlags = 0; 6257 evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
5797 evnp->Switch = EvSwitch;
5798 6258
5799 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); 6259 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6260 "Sending EventNotification (%d) request %p\n",
6261 ioc->name, EvSwitch, &evn));
5800 6262
5801 return 0; 6263 return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
6264 (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
6265 sleepFlag);
5802} 6266}
5803 6267
5804/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6268/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6278,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5814 6278
5815 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6279 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5816 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 6280 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5817 ioc->name,__func__)); 6281 ioc->name, __func__));
5818 return -1; 6282 return -1;
5819 } 6283 }
5820 6284
@@ -5851,12 +6315,19 @@ int
5851mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 6315mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5852{ 6316{
5853 Config_t *pReq; 6317 Config_t *pReq;
6318 ConfigReply_t *pReply;
5854 ConfigExtendedPageHeader_t *pExtHdr = NULL; 6319 ConfigExtendedPageHeader_t *pExtHdr = NULL;
5855 MPT_FRAME_HDR *mf; 6320 MPT_FRAME_HDR *mf;
5856 unsigned long flags; 6321 int ii;
5857 int ii, rc;
5858 int flagsLength; 6322 int flagsLength;
5859 int in_isr; 6323 long timeout;
6324 int ret;
6325 u8 page_type = 0, extend_page;
6326 unsigned long timeleft;
6327 unsigned long flags;
6328 int in_isr;
6329 u8 issue_hard_reset = 0;
6330 u8 retry_count = 0;
5860 6331
5861 /* Prevent calling wait_event() (below), if caller happens 6332 /* Prevent calling wait_event() (below), if caller happens
5862 * to be in ISR context, because that is fatal! 6333 * to be in ISR context, because that is fatal!
@@ -5866,15 +6337,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5866 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", 6337 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
5867 ioc->name)); 6338 ioc->name));
5868 return -EPERM; 6339 return -EPERM;
6340 }
6341
6342 /* don't send a config page during diag reset */
6343 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6344 if (ioc->ioc_reset_in_progress) {
6345 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6346 "%s: busy with host reset\n", ioc->name, __func__));
6347 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6348 return -EBUSY;
6349 }
6350 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6351
6352 /* don't send if no chance of success */
6353 if (!ioc->active ||
6354 mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
6355 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6356 "%s: ioc not operational, %d, %xh\n",
6357 ioc->name, __func__, ioc->active,
6358 mpt_GetIocState(ioc, 0)));
6359 return -EFAULT;
5869 } 6360 }
5870 6361
6362 retry_config:
6363 mutex_lock(&ioc->mptbase_cmds.mutex);
6364 /* init the internal cmd struct */
6365 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
6366 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
6367
5871 /* Get and Populate a free Frame 6368 /* Get and Populate a free Frame
5872 */ 6369 */
5873 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6370 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5874 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", 6371 dcprintk(ioc, printk(MYIOC_s_WARN_FMT
5875 ioc->name)); 6372 "mpt_config: no msg frames!\n", ioc->name));
5876 return -EAGAIN; 6373 ret = -EAGAIN;
6374 goto out;
5877 } 6375 }
6376
5878 pReq = (Config_t *)mf; 6377 pReq = (Config_t *)mf;
5879 pReq->Action = pCfg->action; 6378 pReq->Action = pCfg->action;
5880 pReq->Reserved = 0; 6379 pReq->Reserved = 0;
@@ -5900,7 +6399,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5900 pReq->ExtPageType = pExtHdr->ExtPageType; 6399 pReq->ExtPageType = pExtHdr->ExtPageType;
5901 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 6400 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
5902 6401
5903 /* Page Length must be treated as a reserved field for the extended header. */ 6402 /* Page Length must be treated as a reserved field for the
6403 * extended header.
6404 */
5904 pReq->Header.PageLength = 0; 6405 pReq->Header.PageLength = 0;
5905 } 6406 }
5906 6407
@@ -5913,78 +6414,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5913 else 6414 else
5914 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 6415 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
5915 6416
5916 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { 6417 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
6418 MPI_CONFIG_PAGETYPE_EXTENDED) {
5917 flagsLength |= pExtHdr->ExtPageLength * 4; 6419 flagsLength |= pExtHdr->ExtPageLength * 4;
5918 6420 page_type = pReq->ExtPageType;
5919 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6421 extend_page = 1;
5920 ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); 6422 } else {
5921 }
5922 else {
5923 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; 6423 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
5924 6424 page_type = pReq->Header.PageType;
5925 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6425 extend_page = 0;
5926 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
5927 } 6426 }
5928 6427
5929 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 6428 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5930 6429 "Sending Config request type 0x%x, page 0x%x and action %d\n",
5931 /* Append pCfg pointer to end of mf 6430 ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
5932 */
5933 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5934
5935 /* Initalize the timer
5936 */
5937 init_timer_on_stack(&pCfg->timer);
5938 pCfg->timer.data = (unsigned long) ioc;
5939 pCfg->timer.function = mpt_timer_expired;
5940 pCfg->wait_done = 0;
5941
5942 /* Set the timer; ensure 10 second minimum */
5943 if (pCfg->timeout < 10)
5944 pCfg->timer.expires = jiffies + HZ*10;
5945 else
5946 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5947
5948 /* Add to end of Q, set timer and then issue this command */
5949 spin_lock_irqsave(&ioc->FreeQlock, flags);
5950 list_add_tail(&pCfg->linkage, &ioc->configQ);
5951 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5952 6431
5953 add_timer(&pCfg->timer); 6432 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
6433 timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
5954 mpt_put_msg_frame(mpt_base_index, ioc, mf); 6434 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5955 wait_event(mpt_waitq, pCfg->wait_done); 6435 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
6436 timeout);
6437 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
6438 ret = -ETIME;
6439 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6440 "Failed Sending Config request type 0x%x, page 0x%x,"
6441 " action %d, status %xh, time left %ld\n\n",
6442 ioc->name, page_type, pReq->Header.PageNumber,
6443 pReq->Action, ioc->mptbase_cmds.status, timeleft));
6444 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6445 goto out;
6446 if (!timeleft)
6447 issue_hard_reset = 1;
6448 goto out;
6449 }
5956 6450
5957 /* mf has been freed - do not access */ 6451 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
6452 ret = -1;
6453 goto out;
6454 }
6455 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
6456 ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
6457 if (ret == MPI_IOCSTATUS_SUCCESS) {
6458 if (extend_page) {
6459 pCfg->cfghdr.ehdr->ExtPageLength =
6460 le16_to_cpu(pReply->ExtPageLength);
6461 pCfg->cfghdr.ehdr->ExtPageType =
6462 pReply->ExtPageType;
6463 }
6464 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
6465 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
6466 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
6467 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
5958 6468
5959 rc = pCfg->status; 6469 }
5960 6470
5961 return rc; 6471 if (retry_count)
5962} 6472 printk(MYIOC_s_INFO_FMT "Retry completed "
6473 "ret=0x%x timeleft=%ld\n",
6474 ioc->name, ret, timeleft);
5963 6475
5964/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6476 dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
5965/** 6477 ret, le32_to_cpu(pReply->IOCLogInfo)));
5966 * mpt_timer_expired - Callback for timer process.
5967 * Used only internal config functionality.
5968 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
5969 */
5970static void
5971mpt_timer_expired(unsigned long data)
5972{
5973 MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
5974
5975 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
5976 6478
5977 /* Perform a FW reload */ 6479out:
5978 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
5979 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
5980 6480
5981 /* No more processing. 6481 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
5982 * Hard reset clean-up will wake up 6482 mutex_unlock(&ioc->mptbase_cmds.mutex);
5983 * process and free all resources. 6483 if (issue_hard_reset) {
5984 */ 6484 issue_hard_reset = 0;
5985 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); 6485 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
6486 ioc->name, __func__);
6487 mpt_HardResetHandler(ioc, CAN_SLEEP);
6488 mpt_free_msg_frame(ioc, mf);
6489 /* attempt one retry for a timed out command */
6490 if (!retry_count) {
6491 printk(MYIOC_s_INFO_FMT
6492 "Attempting Retry Config request"
6493 " type 0x%x, page 0x%x,"
6494 " action %d\n", ioc->name, page_type,
6495 pCfg->cfghdr.hdr->PageNumber, pCfg->action);
6496 retry_count++;
6497 goto retry_config;
6498 }
6499 }
6500 return ret;
5986 6501
5987 return;
5988} 6502}
5989 6503
5990/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6504/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6512,34 @@ mpt_timer_expired(unsigned long data)
5998static int 6512static int
5999mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 6513mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
6000{ 6514{
6001 CONFIGPARMS *pCfg; 6515 switch (reset_phase) {
6002 unsigned long flags; 6516 case MPT_IOC_SETUP_RESET:
6003 6517 ioc->taskmgmt_quiesce_io = 1;
6004 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6518 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6005 ": IOC %s_reset routed to MPT base driver!\n", 6519 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
6006 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 6520 break;
6007 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); 6521 case MPT_IOC_PRE_RESET:
6008 6522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6009 if (reset_phase == MPT_IOC_SETUP_RESET) { 6523 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
6010 ; 6524 break;
6011 } else if (reset_phase == MPT_IOC_PRE_RESET) { 6525 case MPT_IOC_POST_RESET:
6012 /* If the internal config Q is not empty - 6526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6013 * delete timer. MF resources will be freed when 6527 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
6014 * the FIFO's are primed. 6528/* wake up mptbase_cmds */
6015 */ 6529 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
6016 spin_lock_irqsave(&ioc->FreeQlock, flags); 6530 ioc->mptbase_cmds.status |=
6017 list_for_each_entry(pCfg, &ioc->configQ, linkage) 6531 MPT_MGMT_STATUS_DID_IOCRESET;
6018 del_timer(&pCfg->timer); 6532 complete(&ioc->mptbase_cmds.done);
6019 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
6020
6021 } else {
6022 CONFIGPARMS *pNext;
6023
6024 /* Search the configQ for internal commands.
6025 * Flush the Q, and wake up all suspended threads.
6026 */
6027 spin_lock_irqsave(&ioc->FreeQlock, flags);
6028 list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
6029 list_del(&pCfg->linkage);
6030
6031 pCfg->status = MPT_CONFIG_ERROR;
6032 pCfg->wait_done = 1;
6033 wake_up(&mpt_waitq);
6034 } 6533 }
6035 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 6534/* wake up taskmgmt_cmds */
6535 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
6536 ioc->taskmgmt_cmds.status |=
6537 MPT_MGMT_STATUS_DID_IOCRESET;
6538 complete(&ioc->taskmgmt_cmds.done);
6539 }
6540 break;
6541 default:
6542 break;
6036 } 6543 }
6037 6544
6038 return 1; /* currently means nothing really */ 6545 return 1; /* currently means nothing really */
@@ -6344,6 +6851,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6344 6851
6345 *size = y; 6852 *size = y;
6346} 6853}
6854/**
6855 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
6856 * @ioc: Pointer to MPT_ADAPTER structure
6857 *
6858 * Returns 0 for SUCCESS or -1 if FAILED.
6859 *
6860 * If -1 is return, then it was not possible to set the flags
6861 **/
6862int
6863mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6864{
6865 unsigned long flags;
6866 int retval;
6867
6868 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6869 if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
6870 (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
6871 retval = -1;
6872 goto out;
6873 }
6874 retval = 0;
6875 ioc->taskmgmt_in_progress = 1;
6876 ioc->taskmgmt_quiesce_io = 1;
6877 if (ioc->alt_ioc) {
6878 ioc->alt_ioc->taskmgmt_in_progress = 1;
6879 ioc->alt_ioc->taskmgmt_quiesce_io = 1;
6880 }
6881 out:
6882 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6883 return retval;
6884}
6885EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
6886
6887/**
6888 * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
6889 * @ioc: Pointer to MPT_ADAPTER structure
6890 *
6891 **/
6892void
6893mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6894{
6895 unsigned long flags;
6896
6897 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6898 ioc->taskmgmt_in_progress = 0;
6899 ioc->taskmgmt_quiesce_io = 0;
6900 if (ioc->alt_ioc) {
6901 ioc->alt_ioc->taskmgmt_in_progress = 0;
6902 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
6903 }
6904 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6905}
6906EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
6347 6907
6348 6908
6349/** 6909/**
@@ -6397,7 +6957,9 @@ int
6397mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6957mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6398{ 6958{
6399 int rc; 6959 int rc;
6960 u8 cb_idx;
6400 unsigned long flags; 6961 unsigned long flags;
6962 unsigned long time_count;
6401 6963
6402 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); 6964 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
6403#ifdef MFCNT 6965#ifdef MFCNT
@@ -6410,14 +6972,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6410 /* Reset the adapter. Prevent more than 1 call to 6972 /* Reset the adapter. Prevent more than 1 call to
6411 * mpt_do_ioc_recovery at any instant in time. 6973 * mpt_do_ioc_recovery at any instant in time.
6412 */ 6974 */
6413 spin_lock_irqsave(&ioc->diagLock, flags); 6975 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6414 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ 6976 if (ioc->ioc_reset_in_progress) {
6415 spin_unlock_irqrestore(&ioc->diagLock, flags); 6977 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6416 return 0; 6978 return 0;
6417 } else {
6418 ioc->diagPending = 1;
6419 } 6979 }
6420 spin_unlock_irqrestore(&ioc->diagLock, flags); 6980 ioc->ioc_reset_in_progress = 1;
6981 if (ioc->alt_ioc)
6982 ioc->alt_ioc->ioc_reset_in_progress = 1;
6983 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6421 6984
6422 /* FIXME: If do_ioc_recovery fails, repeat.... 6985 /* FIXME: If do_ioc_recovery fails, repeat....
6423 */ 6986 */
@@ -6427,47 +6990,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6427 * Prevents timeouts occurring during a diagnostic reset...very bad. 6990 * Prevents timeouts occurring during a diagnostic reset...very bad.
6428 * For all other protocol drivers, this is a no-op. 6991 * For all other protocol drivers, this is a no-op.
6429 */ 6992 */
6430 { 6993 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6431 u8 cb_idx; 6994 if (MptResetHandlers[cb_idx]) {
6432 int r = 0; 6995 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6433 6996 if (ioc->alt_ioc)
6434 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6997 mpt_signal_reset(cb_idx, ioc->alt_ioc,
6435 if (MptResetHandlers[cb_idx]) { 6998 MPT_IOC_SETUP_RESET);
6436 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
6437 ioc->name, cb_idx));
6438 r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6439 if (ioc->alt_ioc) {
6440 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
6441 ioc->name, ioc->alt_ioc->name, cb_idx));
6442 r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
6443 }
6444 }
6445 } 6999 }
6446 } 7000 }
6447 7001
6448 if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { 7002 time_count = jiffies;
6449 printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); 7003 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
7004 if (rc != 0) {
7005 printk(KERN_WARNING MYNAM
7006 ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
7007 } else {
7008 if (ioc->hard_resets < -1)
7009 ioc->hard_resets++;
6450 } 7010 }
6451 ioc->reload_fw = 0;
6452 if (ioc->alt_ioc)
6453 ioc->alt_ioc->reload_fw = 0;
6454 7011
6455 spin_lock_irqsave(&ioc->diagLock, flags); 7012 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6456 ioc->diagPending = 0; 7013 ioc->ioc_reset_in_progress = 0;
6457 if (ioc->alt_ioc) 7014 ioc->taskmgmt_quiesce_io = 0;
6458 ioc->alt_ioc->diagPending = 0; 7015 ioc->taskmgmt_in_progress = 0;
6459 spin_unlock_irqrestore(&ioc->diagLock, flags); 7016 if (ioc->alt_ioc) {
7017 ioc->alt_ioc->ioc_reset_in_progress = 0;
7018 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
7019 ioc->alt_ioc->taskmgmt_in_progress = 0;
7020 }
7021 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6460 7022
6461 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); 7023 dtmprintk(ioc,
7024 printk(MYIOC_s_DEBUG_FMT
7025 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
7026 jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
7027 "SUCCESS" : "FAILED")));
6462 7028
6463 return rc; 7029 return rc;
6464} 7030}
6465 7031
6466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7032#ifdef CONFIG_FUSION_LOGGING
6467static void 7033static void
6468EventDescriptionStr(u8 event, u32 evData0, char *evStr) 7034mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
6469{ 7035{
6470 char *ds = NULL; 7036 char *ds = NULL;
7037 u32 evData0;
7038 int ii;
7039 u8 event;
7040 char *evStr = ioc->evStr;
7041
7042 event = le32_to_cpu(pEventReply->Event) & 0xFF;
7043 evData0 = le32_to_cpu(pEventReply->Data[0]);
6471 7044
6472 switch(event) { 7045 switch(event) {
6473 case MPI_EVENT_NONE: 7046 case MPI_EVENT_NONE:
@@ -6501,9 +7074,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6501 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 7074 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
6502 ds = "Loop State(LIP) Change"; 7075 ds = "Loop State(LIP) Change";
6503 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 7076 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
6504 ds = "Loop State(LPE) Change"; /* ??? */ 7077 ds = "Loop State(LPE) Change";
6505 else 7078 else
6506 ds = "Loop State(LPB) Change"; /* ??? */ 7079 ds = "Loop State(LPB) Change";
6507 break; 7080 break;
6508 case MPI_EVENT_LOGOUT: 7081 case MPI_EVENT_LOGOUT:
6509 ds = "Logout"; 7082 ds = "Logout";
@@ -6703,28 +7276,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6703 } 7276 }
6704 case MPI_EVENT_IR2: 7277 case MPI_EVENT_IR2:
6705 { 7278 {
7279 u8 id = (u8)(evData0);
7280 u8 channel = (u8)(evData0 >> 8);
7281 u8 phys_num = (u8)(evData0 >> 24);
6706 u8 ReasonCode = (u8)(evData0 >> 16); 7282 u8 ReasonCode = (u8)(evData0 >> 16);
7283
6707 switch (ReasonCode) { 7284 switch (ReasonCode) {
6708 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: 7285 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
6709 ds = "IR2: LD State Changed"; 7286 snprintf(evStr, EVENT_DESCR_STR_SZ,
7287 "IR2: LD State Changed: "
7288 "id=%d channel=%d phys_num=%d",
7289 id, channel, phys_num);
6710 break; 7290 break;
6711 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: 7291 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
6712 ds = "IR2: PD State Changed"; 7292 snprintf(evStr, EVENT_DESCR_STR_SZ,
7293 "IR2: PD State Changed "
7294 "id=%d channel=%d phys_num=%d",
7295 id, channel, phys_num);
6713 break; 7296 break;
6714 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: 7297 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
6715 ds = "IR2: Bad Block Table Full"; 7298 snprintf(evStr, EVENT_DESCR_STR_SZ,
7299 "IR2: Bad Block Table Full: "
7300 "id=%d channel=%d phys_num=%d",
7301 id, channel, phys_num);
6716 break; 7302 break;
6717 case MPI_EVENT_IR2_RC_PD_INSERTED: 7303 case MPI_EVENT_IR2_RC_PD_INSERTED:
6718 ds = "IR2: PD Inserted"; 7304 snprintf(evStr, EVENT_DESCR_STR_SZ,
7305 "IR2: PD Inserted: "
7306 "id=%d channel=%d phys_num=%d",
7307 id, channel, phys_num);
6719 break; 7308 break;
6720 case MPI_EVENT_IR2_RC_PD_REMOVED: 7309 case MPI_EVENT_IR2_RC_PD_REMOVED:
6721 ds = "IR2: PD Removed"; 7310 snprintf(evStr, EVENT_DESCR_STR_SZ,
7311 "IR2: PD Removed: "
7312 "id=%d channel=%d phys_num=%d",
7313 id, channel, phys_num);
6722 break; 7314 break;
6723 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: 7315 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
6724 ds = "IR2: Foreign CFG Detected"; 7316 snprintf(evStr, EVENT_DESCR_STR_SZ,
7317 "IR2: Foreign CFG Detected: "
7318 "id=%d channel=%d phys_num=%d",
7319 id, channel, phys_num);
6725 break; 7320 break;
6726 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: 7321 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
6727 ds = "IR2: Rebuild Medium Error"; 7322 snprintf(evStr, EVENT_DESCR_STR_SZ,
7323 "IR2: Rebuild Medium Error: "
7324 "id=%d channel=%d phys_num=%d",
7325 id, channel, phys_num);
7326 break;
7327 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
7328 snprintf(evStr, EVENT_DESCR_STR_SZ,
7329 "IR2: Dual Port Added: "
7330 "id=%d channel=%d phys_num=%d",
7331 id, channel, phys_num);
7332 break;
7333 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
7334 snprintf(evStr, EVENT_DESCR_STR_SZ,
7335 "IR2: Dual Port Removed: "
7336 "id=%d channel=%d phys_num=%d",
7337 id, channel, phys_num);
6728 break; 7338 break;
6729 default: 7339 default:
6730 ds = "IR2"; 7340 ds = "IR2";
@@ -6760,13 +7370,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6760 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 7370 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
6761 { 7371 {
6762 u8 reason = (u8)(evData0); 7372 u8 reason = (u8)(evData0);
6763 u8 port_num = (u8)(evData0 >> 8);
6764 u16 handle = le16_to_cpu(evData0 >> 16);
6765 7373
6766 snprintf(evStr, EVENT_DESCR_STR_SZ, 7374 switch (reason) {
6767 "SAS Initiator Device Status Change: reason=0x%02x " 7375 case MPI_EVENT_SAS_INIT_RC_ADDED:
6768 "port=%d handle=0x%04x", 7376 ds = "SAS Initiator Status Change: Added";
6769 reason, port_num, handle); 7377 break;
7378 case MPI_EVENT_SAS_INIT_RC_REMOVED:
7379 ds = "SAS Initiator Status Change: Deleted";
7380 break;
7381 default:
7382 ds = "SAS Initiator Status Change";
7383 break;
7384 }
6770 break; 7385 break;
6771 } 7386 }
6772 7387
@@ -6814,6 +7429,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6814 break; 7429 break;
6815 } 7430 }
6816 7431
7432 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
7433 {
7434 u8 reason = (u8)(evData0);
7435
7436 switch (reason) {
7437 case MPI_EVENT_SAS_EXP_RC_ADDED:
7438 ds = "Expander Status Change: Added";
7439 break;
7440 case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
7441 ds = "Expander Status Change: Deleted";
7442 break;
7443 default:
7444 ds = "Expander Status Change";
7445 break;
7446 }
7447 break;
7448 }
7449
6817 /* 7450 /*
6818 * MPT base "custom" events may be added here... 7451 * MPT base "custom" events may be added here...
6819 */ 7452 */
@@ -6823,8 +7456,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6823 } 7456 }
6824 if (ds) 7457 if (ds)
6825 strncpy(evStr, ds, EVENT_DESCR_STR_SZ); 7458 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
6826}
6827 7459
7460
7461 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
7462 "MPT event:(%02Xh) : %s\n",
7463 ioc->name, event, evStr));
7464
7465 devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
7466 ": Event data:\n"));
7467 for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
7468 devtverboseprintk(ioc, printk(" %08x",
7469 le32_to_cpu(pEventReply->Data[ii])));
7470 devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
7471}
7472#endif
6828/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7473/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6829/** 7474/**
6830 * ProcessEventNotification - Route EventNotificationReply to all event handlers 7475 * ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7486,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6841{ 7486{
6842 u16 evDataLen; 7487 u16 evDataLen;
6843 u32 evData0 = 0; 7488 u32 evData0 = 0;
6844// u32 evCtx;
6845 int ii; 7489 int ii;
6846 u8 cb_idx; 7490 u8 cb_idx;
6847 int r = 0; 7491 int r = 0;
6848 int handlers = 0; 7492 int handlers = 0;
6849 char evStr[EVENT_DESCR_STR_SZ];
6850 u8 event; 7493 u8 event;
6851 7494
6852 /* 7495 /*
6853 * Do platform normalization of values 7496 * Do platform normalization of values
6854 */ 7497 */
6855 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7498 event = le32_to_cpu(pEventReply->Event) & 0xFF;
6856// evCtx = le32_to_cpu(pEventReply->EventContext);
6857 evDataLen = le16_to_cpu(pEventReply->EventDataLength); 7499 evDataLen = le16_to_cpu(pEventReply->EventDataLength);
6858 if (evDataLen) { 7500 if (evDataLen) {
6859 evData0 = le32_to_cpu(pEventReply->Data[0]); 7501 evData0 = le32_to_cpu(pEventReply->Data[0]);
6860 } 7502 }
6861 7503
6862 EventDescriptionStr(event, evData0, evStr);
6863 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
6864 ioc->name,
6865 event,
6866 evStr));
6867
6868#ifdef CONFIG_FUSION_LOGGING 7504#ifdef CONFIG_FUSION_LOGGING
6869 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7505 if (evDataLen)
6870 ": Event data:\n", ioc->name)); 7506 mpt_display_event_info(ioc, pEventReply);
6871 for (ii = 0; ii < evDataLen; ii++)
6872 devtverboseprintk(ioc, printk(" %08x",
6873 le32_to_cpu(pEventReply->Data[ii])));
6874 devtverboseprintk(ioc, printk("\n"));
6875#endif 7507#endif
6876 7508
6877 /* 7509 /*
@@ -6926,8 +7558,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6926 */ 7558 */
6927 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7559 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6928 if (MptEvHandlers[cb_idx]) { 7560 if (MptEvHandlers[cb_idx]) {
6929 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", 7561 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6930 ioc->name, cb_idx)); 7562 "Routing Event to event handler #%d\n",
7563 ioc->name, cb_idx));
6931 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); 7564 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
6932 handlers++; 7565 handlers++;
6933 } 7566 }
@@ -7011,8 +7644,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
7011 switch (info) { 7644 switch (info) {
7012 case 0x00010000: 7645 case 0x00010000:
7013 desc = "bug! MID not found"; 7646 desc = "bug! MID not found";
7014 if (ioc->reload_fw == 0)
7015 ioc->reload_fw++;
7016 break; 7647 break;
7017 7648
7018 case 0x00020000: 7649 case 0x00020000:
@@ -7613,7 +8244,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
7613EXPORT_SYMBOL(mpt_put_msg_frame); 8244EXPORT_SYMBOL(mpt_put_msg_frame);
7614EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); 8245EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
7615EXPORT_SYMBOL(mpt_free_msg_frame); 8246EXPORT_SYMBOL(mpt_free_msg_frame);
7616EXPORT_SYMBOL(mpt_add_sge);
7617EXPORT_SYMBOL(mpt_send_handshake_request); 8247EXPORT_SYMBOL(mpt_send_handshake_request);
7618EXPORT_SYMBOL(mpt_verify_adapter); 8248EXPORT_SYMBOL(mpt_verify_adapter);
7619EXPORT_SYMBOL(mpt_GetIocState); 8249EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8280,7 @@ fusion_init(void)
7650 /* Register ourselves (mptbase) in order to facilitate 8280 /* Register ourselves (mptbase) in order to facilitate
7651 * EventNotification handling. 8281 * EventNotification handling.
7652 */ 8282 */
7653 mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); 8283 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
7654 8284
7655 /* Register for hard reset handling callbacks. 8285 /* Register for hard reset handling callbacks.
7656 */ 8286 */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b3e981d2a506..1c8514dc31ca 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.07" 79#define MPT_LINUX_VERSION_COMMON "3.04.10"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -104,6 +104,7 @@
104#endif 104#endif
105 105
106#define MPT_NAME_LENGTH 32 106#define MPT_NAME_LENGTH 32
107#define MPT_KOBJ_NAME_LEN 20
107 108
108#define MPT_PROCFS_MPTBASEDIR "mpt" 109#define MPT_PROCFS_MPTBASEDIR "mpt"
109 /* chg it to "driver/fusion" ? */ 110 /* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
134 135
135#define MPT_COALESCING_TIMEOUT 0x10 136#define MPT_COALESCING_TIMEOUT 0x10
136 137
138
137/* 139/*
138 * SCSI transfer rate defines. 140 * SCSI transfer rate defines.
139 */ 141 */
@@ -161,10 +163,10 @@
161/* 163/*
162 * Set the MAX_SGE value based on user input. 164 * Set the MAX_SGE value based on user input.
163 */ 165 */
164#ifdef CONFIG_FUSION_MAX_SGE 166#ifdef CONFIG_FUSION_MAX_SGE
165#if CONFIG_FUSION_MAX_SGE < 16 167#if CONFIG_FUSION_MAX_SGE < 16
166#define MPT_SCSI_SG_DEPTH 16 168#define MPT_SCSI_SG_DEPTH 16
167#elif CONFIG_FUSION_MAX_SGE > 128 169#elif CONFIG_FUSION_MAX_SGE > 128
168#define MPT_SCSI_SG_DEPTH 128 170#define MPT_SCSI_SG_DEPTH 128
169#else 171#else
170#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE 172#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
173#define MPT_SCSI_SG_DEPTH 40 175#define MPT_SCSI_SG_DEPTH 40
174#endif 176#endif
175 177
178#ifdef CONFIG_FUSION_MAX_FC_SGE
179#if CONFIG_FUSION_MAX_FC_SGE < 16
180#define MPT_SCSI_FC_SG_DEPTH 16
181#elif CONFIG_FUSION_MAX_FC_SGE > 256
182#define MPT_SCSI_FC_SG_DEPTH 256
183#else
184#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
185#endif
186#else
187#define MPT_SCSI_FC_SG_DEPTH 40
188#endif
189
176/* debug print string length used for events and iocstatus */ 190/* debug print string length used for events and iocstatus */
177# define EVENT_DESCR_STR_SZ 100 191# define EVENT_DESCR_STR_SZ 100
178 192
@@ -431,38 +445,36 @@ do { \
431 * IOCTL structure and associated defines 445 * IOCTL structure and associated defines
432 */ 446 */
433 447
434#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
435#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
436#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
437#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
438#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
439#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
440#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
441
442#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ 448#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
443 449
444typedef struct _MPT_IOCTL { 450#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
445 struct _MPT_ADAPTER *ioc; 451#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
446 u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 452#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
447 u8 sense[MPT_SENSE_BUFFER_ALLOC]; 453#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
448 int wait_done; /* wake-up value for this ioc */ 454 on the current*/
449 u8 rsvd; 455#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
450 u8 status; /* current command status */ 456#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
451 u8 reset; /* 1 if bus reset allowed */ 457#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
452 u8 id; /* target for reset */ 458 complete routine */
453 struct mutex ioctl_mutex; 459
454} MPT_IOCTL; 460#define INITIALIZE_MGMT_STATUS(status) \
455 461 status = MPT_MGMT_STATUS_PENDING;
456#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ 462#define CLEAR_MGMT_STATUS(status) \
457#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ 463 status = 0;
458#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ 464#define CLEAR_MGMT_PENDING_STATUS(status) \
459 465 status &= ~MPT_MGMT_STATUS_PENDING;
460typedef struct _MPT_SAS_MGMT { 466#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
467 msg_context = value;
468
469typedef struct _MPT_MGMT {
461 struct mutex mutex; 470 struct mutex mutex;
462 struct completion done; 471 struct completion done;
463 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 472 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
473 u8 sense[MPT_SENSE_BUFFER_ALLOC];
464 u8 status; /* current command status */ 474 u8 status; /* current command status */
465}MPT_SAS_MGMT; 475 int completion_code;
476 u32 msg_context;
477} MPT_MGMT;
466 478
467/* 479/*
468 * Event Structure and define 480 * Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
564 u8 flags; 576 u8 flags;
565}; 577};
566 578
579typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
580typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
581 dma_addr_t dma_addr);
582
567/* 583/*
568 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 584 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
569 */ 585 */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
573 int pci_irq; /* This irq */ 589 int pci_irq; /* This irq */
574 char name[MPT_NAME_LENGTH]; /* "iocN" */ 590 char name[MPT_NAME_LENGTH]; /* "iocN" */
575 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ 591 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
592#ifdef CONFIG_FUSION_LOGGING
593 /* used in mpt_display_event_info */
594 char evStr[EVENT_DESCR_STR_SZ];
595#endif
576 char board_name[16]; 596 char board_name[16];
577 char board_assembly[16]; 597 char board_assembly[16];
578 char board_tracer[16]; 598 char board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
600 int reply_depth; /* Num Allocated reply frames */ 620 int reply_depth; /* Num Allocated reply frames */
601 int reply_sz; /* Reply frame size */ 621 int reply_sz; /* Reply frame size */
602 int num_chain; /* Number of chain buffers */ 622 int num_chain; /* Number of chain buffers */
623 MPT_ADD_SGE add_sge; /* Pointer to add_sge
624 function */
625 MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
626 function */
603 /* Pool of buffers for chaining. ReqToChain 627 /* Pool of buffers for chaining. ReqToChain
604 * and ChainToChain track index of chain buffers. 628 * and ChainToChain track index of chain buffers.
605 * ChainBuffer (DMA) virt/phys addresses. 629 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
640 RaidCfgData raid_data; /* Raid config. data */ 664 RaidCfgData raid_data; /* Raid config. data */
641 SasCfgData sas_data; /* Sas config. data */ 665 SasCfgData sas_data; /* Sas config. data */
642 FcCfgData fc_data; /* Fc config. data */ 666 FcCfgData fc_data; /* Fc config. data */
643 MPT_IOCTL *ioctl; /* ioctl data pointer */
644 struct proc_dir_entry *ioc_dentry; 667 struct proc_dir_entry *ioc_dentry;
645 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 668 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
646 spinlock_t diagLock; /* diagnostic reset lock */
647 int diagPending;
648 u32 biosVersion; /* BIOS version from IO Unit Page 2 */ 669 u32 biosVersion; /* BIOS version from IO Unit Page 2 */
649 int eventTypes; /* Event logging parameters */ 670 int eventTypes; /* Event logging parameters */
650 int eventContext; /* Next event context */ 671 int eventContext; /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
652 struct _mpt_ioctl_events *events; /* pointer to event log */ 673 struct _mpt_ioctl_events *events; /* pointer to event log */
653 u8 *cached_fw; /* Pointer to FW */ 674 u8 *cached_fw; /* Pointer to FW */
654 dma_addr_t cached_fw_dma; 675 dma_addr_t cached_fw_dma;
655 struct list_head configQ; /* linked list of config. requests */
656 int hs_reply_idx; 676 int hs_reply_idx;
657#ifndef MFCNT 677#ifndef MFCNT
658 u32 pad0; 678 u32 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
665 IOCFactsReply_t facts; 685 IOCFactsReply_t facts;
666 PortFactsReply_t pfacts[2]; 686 PortFactsReply_t pfacts[2];
667 FCPortPage0_t fc_port_page0[2]; 687 FCPortPage0_t fc_port_page0[2];
668 struct timer_list persist_timer; /* persist table timer */
669 int persist_wait_done; /* persist completion flag */
670 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
671 LANPage0_t lan_cnfg_page0; 688 LANPage0_t lan_cnfg_page0;
672 LANPage1_t lan_cnfg_page1; 689 LANPage1_t lan_cnfg_page1;
673 690
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
682 int aen_event_read_flag; /* flag to indicate event log was read*/ 699 int aen_event_read_flag; /* flag to indicate event log was read*/
683 u8 FirstWhoInit; 700 u8 FirstWhoInit;
684 u8 upload_fw; /* If set, do a fw upload */ 701 u8 upload_fw; /* If set, do a fw upload */
685 u8 reload_fw; /* Force a FW Reload on next reset */
686 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 702 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
687 u8 pad1[4]; 703 u8 pad1[4];
688 u8 DoneCtx; 704 u8 DoneCtx;
689 u8 TaskCtx; 705 u8 TaskCtx;
690 u8 InternalCtx; 706 u8 InternalCtx;
691 spinlock_t initializing_hba_lock;
692 int initializing_hba_lock_flag;
693 struct list_head list; 707 struct list_head list;
694 struct net_device *netdev; 708 struct net_device *netdev;
695 struct list_head sas_topology; 709 struct list_head sas_topology;
696 struct mutex sas_topology_mutex; 710 struct mutex sas_topology_mutex;
711
712 struct workqueue_struct *fw_event_q;
713 struct list_head fw_event_list;
714 spinlock_t fw_event_lock;
715 u8 fw_events_off; /* if '1', then ignore events */
716 char fw_event_q_name[MPT_KOBJ_NAME_LEN];
717
697 struct mutex sas_discovery_mutex; 718 struct mutex sas_discovery_mutex;
698 u8 sas_discovery_runtime; 719 u8 sas_discovery_runtime;
699 u8 sas_discovery_ignore_events; 720 u8 sas_discovery_ignore_events;
721
722 /* port_info object for the host */
723 struct mptsas_portinfo *hba_port_info;
724 u64 hba_port_sas_addr;
725 u16 hba_port_num_phy;
726 struct list_head sas_device_info_list;
727 struct mutex sas_device_info_mutex;
728 u8 old_sas_discovery_protocal;
729 u8 sas_discovery_quiesce_io;
700 int sas_index; /* index refrencing */ 730 int sas_index; /* index refrencing */
701 MPT_SAS_MGMT sas_mgmt; 731 MPT_MGMT sas_mgmt;
732 MPT_MGMT mptbase_cmds; /* for sending config pages */
733 MPT_MGMT internal_cmds;
734 MPT_MGMT taskmgmt_cmds;
735 MPT_MGMT ioctl_cmds;
736 spinlock_t taskmgmt_lock; /* diagnostic reset lock */
737 int taskmgmt_in_progress;
738 u8 taskmgmt_quiesce_io;
739 u8 ioc_reset_in_progress;
702 struct work_struct sas_persist_task; 740 struct work_struct sas_persist_task;
703 741
704 struct work_struct fc_setup_reset_work; 742 struct work_struct fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
707 u8 fc_link_speed[2]; 745 u8 fc_link_speed[2];
708 spinlock_t fc_rescan_work_lock; 746 spinlock_t fc_rescan_work_lock;
709 struct work_struct fc_rescan_work; 747 struct work_struct fc_rescan_work;
710 char fc_rescan_work_q_name[20]; 748 char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
711 struct workqueue_struct *fc_rescan_work_q; 749 struct workqueue_struct *fc_rescan_work_q;
750
751 /* driver forced bus resets count */
752 unsigned long hard_resets;
753 /* fw/external bus resets count */
754 unsigned long soft_resets;
755 /* cmd timeouts */
756 unsigned long timeouts;
757
712 struct scsi_cmnd **ScsiLookup; 758 struct scsi_cmnd **ScsiLookup;
713 spinlock_t scsi_lookup_lock; 759 spinlock_t scsi_lookup_lock;
714 760 u64 dma_mask;
715 char reset_work_q_name[20]; 761 u32 broadcast_aen_busy;
762 char reset_work_q_name[MPT_KOBJ_NAME_LEN];
716 struct workqueue_struct *reset_work_q; 763 struct workqueue_struct *reset_work_q;
717 struct delayed_work fault_reset_work; 764 struct delayed_work fault_reset_work;
718 spinlock_t fault_reset_work_lock; 765
766 u8 sg_addr_size;
767 u8 in_rescan;
768 u8 SGE_size;
719 769
720} MPT_ADAPTER; 770} MPT_ADAPTER;
721 771
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
753 dma_addr_t Address; 803 dma_addr_t Address;
754} MptSge_t; 804} MptSge_t;
755 805
756#define mpt_addr_size() \
757 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
758 MPI_SGE_FLAGS_32_BIT_ADDRESSING)
759 806
760#define mpt_msg_flags() \ 807#define mpt_msg_flags(ioc) \
761 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ 808 (ioc->sg_addr_size == sizeof(u64)) ? \
762 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) 809 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
810 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
811
812#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
813 (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
763 814
764/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
765/* 816/*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
835 /* Pool of memory for holding SCpnts before doing 886 /* Pool of memory for holding SCpnts before doing
836 * OS callbacks. freeQ is the free pool. 887 * OS callbacks. freeQ is the free pool.
837 */ 888 */
838 u8 tmPending;
839 u8 resetPending;
840 u8 negoNvram; /* DV disabled, nego NVRAM */ 889 u8 negoNvram; /* DV disabled, nego NVRAM */
841 u8 pad1; 890 u8 pad1;
842 u8 tmState;
843 u8 rsvd[2]; 891 u8 rsvd[2];
844 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ 892 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
845 struct scsi_cmnd *abortSCpnt; 893 struct scsi_cmnd *abortSCpnt;
846 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ 894 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
847 unsigned long hard_resets; /* driver forced bus resets count */
848 unsigned long soft_resets; /* fw/external bus resets count */
849 unsigned long timeouts; /* cmd timeouts */
850 ushort sel_timeout[MPT_MAX_FC_DEVICES]; 895 ushort sel_timeout[MPT_MAX_FC_DEVICES];
851 char *info_kbuf; 896 char *info_kbuf;
852 wait_queue_head_t scandv_waitq;
853 int scandv_wait_done;
854 long last_queue_full; 897 long last_queue_full;
855 u16 tm_iocstatus; 898 u16 tm_iocstatus;
856 u16 spi_pending; 899 u16 spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
870 * Generic structure passed to the base mpt_config function. 913 * Generic structure passed to the base mpt_config function.
871 */ 914 */
872typedef struct _x_config_parms { 915typedef struct _x_config_parms {
873 struct list_head linkage; /* linked list */
874 struct timer_list timer; /* timer function for this request */
875 union { 916 union {
876 ConfigExtendedPageHeader_t *ehdr; 917 ConfigExtendedPageHeader_t *ehdr;
877 ConfigPageHeader_t *hdr; 918 ConfigPageHeader_t *hdr;
878 } cfghdr; 919 } cfghdr;
879 dma_addr_t physAddr; 920 dma_addr_t physAddr;
880 int wait_done; /* wait for this request */
881 u32 pageAddr; /* properly formatted */ 921 u32 pageAddr; /* properly formatted */
922 u16 status;
882 u8 action; 923 u8 action;
883 u8 dir; 924 u8 dir;
884 u8 timeout; /* seconds */ 925 u8 timeout; /* seconds */
885 u8 pad1;
886 u16 status;
887 u16 pad2;
888} CONFIGPARMS; 926} CONFIGPARMS;
889 927
890/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 928/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
909extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 947extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
910extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 948extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
911extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 949extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
912extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
913 950
914extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); 951extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
915extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); 952extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
922extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 959extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 960extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); 961extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
962extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
963 pRaidPhysDiskPage1_t phys_disk);
964extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
965 u8 phys_disk_num);
966extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
967extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
925extern void mpt_halt_firmware(MPT_ADAPTER *ioc); 968extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
926 969
927 970
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
959#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) 1002#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
960#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) 1003#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
961#define MPT_SGE_FLAGS_DIRECTION (0x04000000) 1004#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
962#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
963#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) 1005#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
964 1006
965#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) 1007#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
972 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1014 MPT_SGE_FLAGS_END_OF_BUFFER | \
973 MPT_SGE_FLAGS_END_OF_LIST | \ 1015 MPT_SGE_FLAGS_END_OF_LIST | \
974 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1016 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
975 MPT_SGE_FLAGS_ADDRESSING | \
976 MPT_TRANSFER_IOC_TO_HOST) 1017 MPT_TRANSFER_IOC_TO_HOST)
977#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ 1018#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
978 (MPT_SGE_FLAGS_LAST_ELEMENT | \ 1019 (MPT_SGE_FLAGS_LAST_ELEMENT | \
979 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1020 MPT_SGE_FLAGS_END_OF_BUFFER | \
980 MPT_SGE_FLAGS_END_OF_LIST | \ 1021 MPT_SGE_FLAGS_END_OF_LIST | \
981 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1022 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
982 MPT_SGE_FLAGS_ADDRESSING | \
983 MPT_TRANSFER_HOST_TO_IOC) 1023 MPT_TRANSFER_HOST_TO_IOC)
984 1024
985/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1025/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c63817117c0a..9b2e2198aee9 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
85 85
86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; 86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
87static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
87 88
88static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); 89static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
89 90
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
127 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); 128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
128static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, 129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
129 struct buflist *buflist, MPT_ADAPTER *ioc); 130 struct buflist *buflist, MPT_ADAPTER *ioc);
130static void mptctl_timeout_expired (MPT_IOCTL *ioctl); 131static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
131static int mptctl_bus_reset(MPT_IOCTL *ioctl);
132static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
133static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
134 132
135/* 133/*
136 * Reset Handler cleanup function 134 * Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
183 int rc = 0; 181 int rc = 0;
184 182
185 if (nonblock) { 183 if (nonblock) {
186 if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) 184 if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
187 rc = -EAGAIN; 185 rc = -EAGAIN;
188 } else { 186 } else {
189 if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) 187 if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
190 rc = -ERESTARTSYS; 188 rc = -ERESTARTSYS;
191 } 189 }
192 return rc; 190 return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
202static int 200static int
203mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) 201mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
204{ 202{
205 char *sense_data; 203 char *sense_data;
206 int sz, req_index; 204 int req_index;
207 u16 iocStatus; 205 int sz;
208 u8 cmd;
209 206
210 if (req) 207 if (!req)
211 cmd = req->u.hdr.Function; 208 return 0;
212 else
213 return 1;
214 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
215 "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply));
216
217 if (ioc->ioctl) {
218
219 if (reply==NULL) {
220
221 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
222 "Function=%x!\n", ioc->name, cmd));
223 209
224 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; 210 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
225 ioc->ioctl->reset &= ~MPTCTL_RESET_OK; 211 "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
212 req, reply));
226 213
227 /* We are done, issue wake up 214 /*
228 */ 215 * Handling continuation of the same reply. Processing the first
229 ioc->ioctl->wait_done = 1; 216 * reply, and eating the other replys that come later.
230 wake_up (&mptctl_wait); 217 */
231 return 1; 218 if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
219 goto out_continuation;
232 220
233 } 221 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
234 222
235 /* Copy the reply frame (which much exist 223 if (!reply)
236 * for non-SCSI I/O) to the IOC structure. 224 goto out;
237 */
238 memcpy(ioc->ioctl->ReplyFrame, reply,
239 min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
240 ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
241 225
242 /* Set the command status to GOOD if IOC Status is GOOD 226 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
243 * OR if SCSI I/O cmd and data underrun or recovered error. 227 sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
244 */ 228 memcpy(ioc->ioctl_cmds.reply, reply, sz);
245 iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
246 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248
249 if (iocStatus || reply->u.reply.IOCLogInfo)
250 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
251 "loginfo (0x%08X)\n", ioc->name,
252 iocStatus,
253 le32_to_cpu(reply->u.reply.IOCLogInfo)));
254
255 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
256 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
257
258 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
259 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
260 "\tscsi_status (0x%02x), scsi_state (0x%02x), "
261 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
262 reply->u.sreply.SCSIStatus,
263 reply->u.sreply.SCSIState,
264 le16_to_cpu(reply->u.sreply.TaskTag),
265 le32_to_cpu(reply->u.sreply.TransferCount)));
266
267 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
268
269 if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
270 (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
271 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
272 }
273 }
274 229
275 /* Copy the sense data - if present 230 if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
276 */ 231 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
277 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && 232 "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
278 (reply->u.sreply.SCSIState & 233 le16_to_cpu(reply->u.reply.IOCStatus),
279 MPI_SCSI_STATE_AUTOSENSE_VALID)){ 234 le32_to_cpu(reply->u.reply.IOCLogInfo)));
235
236 if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
237 (req->u.hdr.Function ==
238 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
239
240 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
241 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
242 "scsi_status (0x%02x), scsi_state (0x%02x), "
243 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
244 reply->u.sreply.SCSIStatus,
245 reply->u.sreply.SCSIState,
246 le16_to_cpu(reply->u.sreply.TaskTag),
247 le32_to_cpu(reply->u.sreply.TransferCount)));
248
249 if (reply->u.sreply.SCSIState &
250 MPI_SCSI_STATE_AUTOSENSE_VALID) {
280 sz = req->u.scsireq.SenseBufferLength; 251 sz = req->u.scsireq.SenseBufferLength;
281 req_index = 252 req_index =
282 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); 253 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
283 sense_data = 254 sense_data = ((u8 *)ioc->sense_buf_pool +
284 ((u8 *)ioc->sense_buf_pool +
285 (req_index * MPT_SENSE_BUFFER_ALLOC)); 255 (req_index * MPT_SENSE_BUFFER_ALLOC));
286 memcpy(ioc->ioctl->sense, sense_data, sz); 256 memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
287 ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; 257 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
288 } 258 }
259 }
289 260
290 if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) 261 out:
291 mptctl_free_tm_flags(ioc); 262 /* We are done, issue wake up
292 263 */
293 /* We are done, issue wake up 264 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
294 */ 265 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
295 ioc->ioctl->wait_done = 1; 266 mpt_clear_taskmgmt_in_progress_flag(ioc);
296 wake_up (&mptctl_wait); 267 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
268 complete(&ioc->ioctl_cmds.done);
297 } 269 }
270
271 out_continuation:
272 if (reply && (reply->u.reply.MsgFlags &
273 MPI_MSGFLAGS_CONTINUATION_REPLY))
274 return 0;
298 return 1; 275 return 1;
299} 276}
300 277
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
304 * Expecting an interrupt, however timed out. 281 * Expecting an interrupt, however timed out.
305 * 282 *
306 */ 283 */
307static void mptctl_timeout_expired (MPT_IOCTL *ioctl) 284static void
285mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
308{ 286{
309 int rc = 1; 287 unsigned long flags;
310 288
311 if (ioctl == NULL) 289 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
312 return; 290 ioc->name, __func__));
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
316 291
317 ioctl->wait_done = 0; 292 if (mpt_fwfault_debug)
318 if (ioctl->reset & MPTCTL_RESET_OK) 293 mpt_halt_firmware(ioc);
319 rc = mptctl_bus_reset(ioctl);
320 294
321 if (rc) { 295 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
322 /* Issue a reset for this device. 296 if (ioc->ioc_reset_in_progress) {
323 * The IOC is not responding. 297 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
324 */ 298 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
325 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 299 mpt_free_msg_frame(ioc, mf);
326 ioctl->ioc->name)); 300 return;
327 mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
328 } 301 }
329 return; 302 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
303
330 304
305 if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
306 return;
307
308 /* Issue a reset for this device.
309 * The IOC is not responding.
310 */
311 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
312 ioc->name));
313 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
314 mpt_HardResetHandler(ioc, CAN_SLEEP);
315 mpt_free_msg_frame(ioc, mf);
316}
317
318static int
319mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
320{
321 if (!mf)
322 return 0;
323
324 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
325 "TaskMgmt completed (mf=%p, mr=%p)\n",
326 ioc->name, mf, mr));
327
328 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
329
330 if (!mr)
331 goto out;
332
333 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
334 memcpy(ioc->taskmgmt_cmds.reply, mr,
335 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
336 out:
337 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
338 mpt_clear_taskmgmt_in_progress_flag(ioc);
339 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
340 complete(&ioc->taskmgmt_cmds.done);
341 return 1;
342 }
343 return 0;
331} 344}
332 345
333/* mptctl_bus_reset 346/* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
335 * Bus reset code. 348 * Bus reset code.
336 * 349 *
337 */ 350 */
338static int mptctl_bus_reset(MPT_IOCTL *ioctl) 351static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
339{ 352{
340 MPT_FRAME_HDR *mf; 353 MPT_FRAME_HDR *mf;
341 SCSITaskMgmt_t *pScsiTm; 354 SCSITaskMgmt_t *pScsiTm;
342 MPT_SCSI_HOST *hd; 355 SCSITaskMgmtReply_t *pScsiTmReply;
343 int ii; 356 int ii;
344 int retval=0; 357 int retval;
345 358 unsigned long timeout;
346 359 unsigned long time_count;
347 ioctl->reset &= ~MPTCTL_RESET_OK; 360 u16 iocstatus;
348 361
349 if (ioctl->ioc->sh == NULL) 362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
364 (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
350 return -EPERM; 367 return -EPERM;
368 }
351 369
352 hd = shost_priv(ioctl->ioc->sh); 370 mutex_lock(&ioc->taskmgmt_cmds.mutex);
353 if (hd == NULL) 371 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
372 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
354 return -EPERM; 373 return -EPERM;
374 }
355 375
356 /* Single threading .... 376 retval = 0;
357 */
358 if (mptctl_set_tm_flags(hd) != 0)
359 return -EPERM;
360 377
361 /* Send request 378 /* Send request
362 */ 379 */
363 if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { 380 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
364 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", 381 if (mf == NULL) {
365 ioctl->ioc->name)); 382 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 383 "TaskMgmt, no msg frames!!\n", ioc->name));
367 mptctl_free_tm_flags(ioctl->ioc); 384 mpt_clear_taskmgmt_in_progress_flag(ioc);
368 return -ENOMEM; 385 retval = -ENOMEM;
386 goto mptctl_bus_reset_done;
369 } 387 }
370 388
371 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 389 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
372 ioctl->ioc->name, mf)); 390 ioc->name, mf));
373 391
374 pScsiTm = (SCSITaskMgmt_t *) mf; 392 pScsiTm = (SCSITaskMgmt_t *) mf;
375 pScsiTm->TargetID = ioctl->id; 393 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
376 pScsiTm->Bus = hd->port; /* 0 */
377 pScsiTm->ChainOffset = 0;
378 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 394 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
379 pScsiTm->Reserved = 0;
380 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 395 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
381 pScsiTm->Reserved1 = 0;
382 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; 396 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
383 397 pScsiTm->TargetID = 0;
398 pScsiTm->Bus = 0;
399 pScsiTm->ChainOffset = 0;
400 pScsiTm->Reserved = 0;
401 pScsiTm->Reserved1 = 0;
402 pScsiTm->TaskMsgContext = 0;
384 for (ii= 0; ii < 8; ii++) 403 for (ii= 0; ii < 8; ii++)
385 pScsiTm->LUN[ii] = 0; 404 pScsiTm->LUN[ii] = 0;
386
387 for (ii=0; ii < 7; ii++) 405 for (ii=0; ii < 7; ii++)
388 pScsiTm->Reserved2[ii] = 0; 406 pScsiTm->Reserved2[ii] = 0;
389 407
390 pScsiTm->TaskMsgContext = 0; 408 switch (ioc->bus_type) {
391 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT 409 case FC:
392 "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); 410 timeout = 40;
393 411 break;
394 DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); 412 case SAS:
413 timeout = 30;
414 break;
415 case SPI:
416 default:
417 timeout = 2;
418 break;
419 }
395 420
396 ioctl->wait_done=0; 421 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
422 "TaskMgmt type=%d timeout=%ld\n",
423 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
397 424
398 if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 425 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
399 (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 426 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
400 mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); 427 time_count = jiffies;
428 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
429 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
430 mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
401 else { 431 else {
402 retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, 432 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
403 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 433 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
404 if (retval != 0) { 434 if (retval != 0) {
405 dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" 435 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
406 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, 436 "TaskMgmt send_handshake FAILED!"
407 hd->ioc, mf)); 437 " (ioc %p, mf %p, rc=%d) \n", ioc->name,
438 ioc, mf, retval));
439 mpt_clear_taskmgmt_in_progress_flag(ioc);
408 goto mptctl_bus_reset_done; 440 goto mptctl_bus_reset_done;
409 } 441 }
410 } 442 }
411 443
412 /* Now wait for the command to complete */ 444 /* Now wait for the command to complete */
413 ii = wait_event_timeout(mptctl_wait, 445 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
414 ioctl->wait_done == 1, 446 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
415 HZ*5 /* 5 second timeout */); 447 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
448 "TaskMgmt failed\n", ioc->name));
449 mpt_free_msg_frame(ioc, mf);
450 mpt_clear_taskmgmt_in_progress_flag(ioc);
451 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
452 retval = 0;
453 else
454 retval = -1; /* return failure */
455 goto mptctl_bus_reset_done;
456 }
416 457
417 if(ii <=0 && (ioctl->wait_done != 1 )) { 458 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
418 mpt_free_msg_frame(hd->ioc, mf); 459 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
419 ioctl->wait_done = 0; 460 "TaskMgmt failed\n", ioc->name));
461 retval = -1; /* return failure */
462 goto mptctl_bus_reset_done;
463 }
464
465 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
466 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
467 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
468 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
469 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
470 pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
471 le16_to_cpu(pScsiTmReply->IOCStatus),
472 le32_to_cpu(pScsiTmReply->IOCLogInfo),
473 pScsiTmReply->ResponseCode,
474 le32_to_cpu(pScsiTmReply->TerminationCount)));
475
476 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
477
478 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
479 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
480 iocstatus == MPI_IOCSTATUS_SUCCESS)
481 retval = 0;
482 else {
483 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
484 "TaskMgmt failed\n", ioc->name));
420 retval = -1; /* return failure */ 485 retval = -1; /* return failure */
421 } 486 }
422 487
423mptctl_bus_reset_done:
424 488
425 mptctl_free_tm_flags(ioctl->ioc); 489 mptctl_bus_reset_done:
490 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
491 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
426 return retval; 492 return retval;
427} 493}
428 494
429static int
430mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
431 unsigned long flags;
432
433 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
434
435 if (hd->tmState == TM_STATE_NONE) {
436 hd->tmState = TM_STATE_IN_PROGRESS;
437 hd->tmPending = 1;
438 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
439 } else {
440 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
441 return -EBUSY;
442 }
443
444 return 0;
445}
446
447static void
448mptctl_free_tm_flags(MPT_ADAPTER *ioc)
449{
450 MPT_SCSI_HOST * hd;
451 unsigned long flags;
452
453 hd = shost_priv(ioc->sh);
454 if (hd == NULL)
455 return;
456
457 spin_lock_irqsave(&ioc->FreeQlock, flags);
458
459 hd->tmState = TM_STATE_NONE;
460 hd->tmPending = 0;
461 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
462
463 return;
464}
465 495
466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 496/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
467/* mptctl_ioc_reset 497/* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
473static int 503static int
474mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 504mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
475{ 505{
476 MPT_IOCTL *ioctl = ioc->ioctl;
477 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
478 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
479 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
480
481 if(ioctl == NULL)
482 return 1;
483
484 switch(reset_phase) { 506 switch(reset_phase) {
485 case MPT_IOC_SETUP_RESET: 507 case MPT_IOC_SETUP_RESET:
486 ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; 508 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
509 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
510 break;
511 case MPT_IOC_PRE_RESET:
512 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
513 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
487 break; 514 break;
488 case MPT_IOC_POST_RESET: 515 case MPT_IOC_POST_RESET:
489 ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; 516 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
517 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
518 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
519 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
520 complete(&ioc->ioctl_cmds.done);
521 }
490 break; 522 break;
491 case MPT_IOC_PRE_RESET:
492 default: 523 default:
493 break; 524 break;
494 } 525 }
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 else 673 else
643 ret = -EINVAL; 674 ret = -EINVAL;
644 675
645 mutex_unlock(&iocp->ioctl->ioctl_mutex); 676 mutex_unlock(&iocp->ioctl_cmds.mutex);
646 677
647 return ret; 678 return ret;
648} 679}
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
758 int sge_offset = 0; 789 int sge_offset = 0;
759 u16 iocstat; 790 u16 iocstat;
760 pFWDownloadReply_t ReplyMsg = NULL; 791 pFWDownloadReply_t ReplyMsg = NULL;
792 unsigned long timeleft;
761 793
762 if (mpt_verify_adapter(ioc, &iocp) < 0) { 794 if (mpt_verify_adapter(ioc, &iocp) < 0) {
763 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", 795 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
841 * 96 8 873 * 96 8
842 * 64 4 874 * 64 4
843 */ 875 */
844 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) 876 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
845 / (sizeof(dma_addr_t) + sizeof(u32)); 877 sizeof(FWDownloadTCSGE_t))
878 / iocp->SGE_size;
846 if (numfrags > maxfrags) { 879 if (numfrags > maxfrags) {
847 ret = -EMLINK; 880 ret = -EMLINK;
848 goto fwdl_out; 881 goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
870 if (nib == 0 || nib == 3) { 903 if (nib == 0 || nib == 3) {
871 ; 904 ;
872 } else if (sgIn->Address) { 905 } else if (sgIn->Address) {
873 mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); 906 iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
874 n++; 907 n++;
875 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { 908 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
876 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " 909 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
882 } 915 }
883 sgIn++; 916 sgIn++;
884 bl++; 917 bl++;
885 sgOut += (sizeof(dma_addr_t) + sizeof(u32)); 918 sgOut += iocp->SGE_size;
886 } 919 }
887 920
888 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); 921 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
891 * Finally, perform firmware download. 924 * Finally, perform firmware download.
892 */ 925 */
893 ReplyMsg = NULL; 926 ReplyMsg = NULL;
927 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
928 INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
894 mpt_put_msg_frame(mptctl_id, iocp, mf); 929 mpt_put_msg_frame(mptctl_id, iocp, mf);
895 930
896 /* Now wait for the command to complete */ 931 /* Now wait for the command to complete */
897 ret = wait_event_timeout(mptctl_wait, 932retry_wait:
898 iocp->ioctl->wait_done == 1, 933 timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
899 HZ*60); 934 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
935 ret = -ETIME;
936 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
937 if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
938 mpt_free_msg_frame(iocp, mf);
939 goto fwdl_out;
940 }
941 if (!timeleft)
942 mptctl_timeout_expired(iocp, mf);
943 else
944 goto retry_wait;
945 goto fwdl_out;
946 }
900 947
901 if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { 948 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
902 /* Now we need to reset the board */ 949 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
903 mptctl_timeout_expired(iocp->ioctl); 950 mpt_free_msg_frame(iocp, mf);
904 ret = -ENODATA; 951 ret = -ENODATA;
905 goto fwdl_out; 952 goto fwdl_out;
906 } 953 }
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
908 if (sgl) 955 if (sgl)
909 kfree_sgl(sgl, sgl_dma, buflist, iocp); 956 kfree_sgl(sgl, sgl_dma, buflist, iocp);
910 957
911 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; 958 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
912 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; 959 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
913 if (iocstat == MPI_IOCSTATUS_SUCCESS) { 960 if (iocstat == MPI_IOCSTATUS_SUCCESS) {
914 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); 961 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
932 return 0; 979 return 0;
933 980
934fwdl_out: 981fwdl_out:
982
983 CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
984 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
935 kfree_sgl(sgl, sgl_dma, buflist, iocp); 985 kfree_sgl(sgl, sgl_dma, buflist, iocp);
936 return ret; 986 return ret;
937} 987}
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1003 * 1053 *
1004 */ 1054 */
1005 sgl = sglbuf; 1055 sgl = sglbuf;
1006 sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; 1056 sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
1007 while (bytes_allocd < bytes) { 1057 while (bytes_allocd < bytes) {
1008 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1058 this_alloc = min(alloc_sz, bytes-bytes_allocd);
1009 buflist[buflist_ent].len = this_alloc; 1059 buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1024 dma_addr_t dma_addr; 1074 dma_addr_t dma_addr;
1025 1075
1026 bytes_allocd += this_alloc; 1076 bytes_allocd += this_alloc;
1027 sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); 1077 sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
1028 dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); 1078 dma_addr = pci_map_single(ioc->pcidev,
1079 buflist[buflist_ent].kptr, this_alloc, dir);
1029 sgl->Address = dma_addr; 1080 sgl->Address = dma_addr;
1030 1081
1031 fragcnt++; 1082 fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1771 int msgContext; 1822 int msgContext;
1772 u16 req_idx; 1823 u16 req_idx;
1773 ulong timeout; 1824 ulong timeout;
1825 unsigned long timeleft;
1774 struct scsi_device *sdev; 1826 struct scsi_device *sdev;
1827 unsigned long flags;
1828 u8 function;
1775 1829
1776 /* bufIn and bufOut are used for user to kernel space transfers 1830 /* bufIn and bufOut are used for user to kernel space transfers
1777 */ 1831 */
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1784 __FILE__, __LINE__, iocnum); 1838 __FILE__, __LINE__, iocnum);
1785 return -ENODEV; 1839 return -ENODEV;
1786 } 1840 }
1787 if (!ioc->ioctl) { 1841
1788 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1842 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1789 "No memory available during driver init.\n", 1843 if (ioc->ioc_reset_in_progress) {
1790 __FILE__, __LINE__); 1844 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1791 return -ENOMEM;
1792 } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
1793 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1845 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
1794 "Busy with IOC Reset \n", __FILE__, __LINE__); 1846 "Busy with diagnostic reset\n", __FILE__, __LINE__);
1795 return -EBUSY; 1847 return -EBUSY;
1796 } 1848 }
1849 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1797 1850
1798 /* Verify that the final request frame will not be too large. 1851 /* Verify that the final request frame will not be too large.
1799 */ 1852 */
1800 sz = karg.dataSgeOffset * 4; 1853 sz = karg.dataSgeOffset * 4;
1801 if (karg.dataInSize > 0) 1854 if (karg.dataInSize > 0)
1802 sz += sizeof(dma_addr_t) + sizeof(u32); 1855 sz += ioc->SGE_size;
1803 if (karg.dataOutSize > 0) 1856 if (karg.dataOutSize > 0)
1804 sz += sizeof(dma_addr_t) + sizeof(u32); 1857 sz += ioc->SGE_size;
1805 1858
1806 if (sz > ioc->req_sz) { 1859 if (sz > ioc->req_sz) {
1807 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1860 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1827 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1880 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
1828 "Unable to read MF from mpt_ioctl_command struct @ %p\n", 1881 "Unable to read MF from mpt_ioctl_command struct @ %p\n",
1829 ioc->name, __FILE__, __LINE__, mfPtr); 1882 ioc->name, __FILE__, __LINE__, mfPtr);
1883 function = -1;
1830 rc = -EFAULT; 1884 rc = -EFAULT;
1831 goto done_free_mem; 1885 goto done_free_mem;
1832 } 1886 }
1833 hdr->MsgContext = cpu_to_le32(msgContext); 1887 hdr->MsgContext = cpu_to_le32(msgContext);
1888 function = hdr->Function;
1834 1889
1835 1890
1836 /* Verify that this request is allowed. 1891 /* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1838 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", 1893 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
1839 ioc->name, hdr->Function, mf)); 1894 ioc->name, hdr->Function, mf));
1840 1895
1841 switch (hdr->Function) { 1896 switch (function) {
1842 case MPI_FUNCTION_IOC_FACTS: 1897 case MPI_FUNCTION_IOC_FACTS:
1843 case MPI_FUNCTION_PORT_FACTS: 1898 case MPI_FUNCTION_PORT_FACTS:
1844 karg.dataOutSize = karg.dataInSize = 0; 1899 karg.dataOutSize = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1893 } 1948 }
1894 1949
1895 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1950 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1896 pScsiReq->MsgFlags |= mpt_msg_flags(); 1951 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1897 1952
1898 1953
1899 /* verify that app has not requested 1954 /* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1935 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 1990 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1936 pScsiReq->DataLength = cpu_to_le32(dataSize); 1991 pScsiReq->DataLength = cpu_to_le32(dataSize);
1937 1992
1938 ioc->ioctl->reset = MPTCTL_RESET_OK;
1939 ioc->ioctl->id = pScsiReq->TargetID;
1940 1993
1941 } else { 1994 } else {
1942 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1995 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1979 int dataSize; 2032 int dataSize;
1980 2033
1981 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 2034 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1982 pScsiReq->MsgFlags |= mpt_msg_flags(); 2035 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1983 2036
1984 2037
1985 /* verify that app has not requested 2038 /* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2014 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 2067 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
2015 pScsiReq->DataLength = cpu_to_le32(dataSize); 2068 pScsiReq->DataLength = cpu_to_le32(dataSize);
2016 2069
2017 ioc->ioctl->reset = MPTCTL_RESET_OK;
2018 ioc->ioctl->id = pScsiReq->TargetID;
2019 } else { 2070 } else {
2020 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2071 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2021 "SCSI driver is not loaded. \n", 2072 "SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2026 break; 2077 break;
2027 2078
2028 case MPI_FUNCTION_SCSI_TASK_MGMT: 2079 case MPI_FUNCTION_SCSI_TASK_MGMT:
2029 { 2080 {
2030 MPT_SCSI_HOST *hd = NULL; 2081 SCSITaskMgmt_t *pScsiTm;
2031 if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { 2082 pScsiTm = (SCSITaskMgmt_t *)mf;
2032 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2083 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2033 "SCSI driver not loaded or SCSI host not found. \n", 2084 "\tTaskType=0x%x MsgFlags=0x%x "
2034 ioc->name, __FILE__, __LINE__); 2085 "TaskMsgContext=0x%x id=%d channel=%d\n",
2035 rc = -EFAULT; 2086 ioc->name, pScsiTm->TaskType, le32_to_cpu
2036 goto done_free_mem; 2087 (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
2037 } else if (mptctl_set_tm_flags(hd) != 0) { 2088 pScsiTm->TargetID, pScsiTm->Bus));
2038 rc = -EPERM;
2039 goto done_free_mem;
2040 }
2041 }
2042 break; 2089 break;
2090 }
2043 2091
2044 case MPI_FUNCTION_IOC_INIT: 2092 case MPI_FUNCTION_IOC_INIT:
2045 { 2093 {
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2123 if (karg.dataInSize > 0) { 2171 if (karg.dataInSize > 0) {
2124 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2172 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2125 MPI_SGE_FLAGS_END_OF_BUFFER | 2173 MPI_SGE_FLAGS_END_OF_BUFFER |
2126 MPI_SGE_FLAGS_DIRECTION | 2174 MPI_SGE_FLAGS_DIRECTION)
2127 mpt_addr_size() )
2128 << MPI_SGE_FLAGS_SHIFT; 2175 << MPI_SGE_FLAGS_SHIFT;
2129 } else { 2176 } else {
2130 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; 2177 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2141 /* Set up this SGE. 2188 /* Set up this SGE.
2142 * Copy to MF and to sglbuf 2189 * Copy to MF and to sglbuf
2143 */ 2190 */
2144 mpt_add_sge(psge, flagsLength, dma_addr_out); 2191 ioc->add_sge(psge, flagsLength, dma_addr_out);
2145 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2192 psge += ioc->SGE_size;
2146 2193
2147 /* Copy user data to kernel space. 2194 /* Copy user data to kernel space.
2148 */ 2195 */
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2175 /* Set up this SGE 2222 /* Set up this SGE
2176 * Copy to MF and to sglbuf 2223 * Copy to MF and to sglbuf
2177 */ 2224 */
2178 mpt_add_sge(psge, flagsLength, dma_addr_in); 2225 ioc->add_sge(psge, flagsLength, dma_addr_in);
2179 } 2226 }
2180 } 2227 }
2181 } else { 2228 } else {
2182 /* Add a NULL SGE 2229 /* Add a NULL SGE
2183 */ 2230 */
2184 mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); 2231 ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
2185 } 2232 }
2186 2233
2187 ioc->ioctl->wait_done = 0; 2234 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
2235 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2188 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { 2236 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
2189 2237
2238 mutex_lock(&ioc->taskmgmt_cmds.mutex);
2239 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
2240 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2241 goto done_free_mem;
2242 }
2243
2190 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 2244 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
2191 2245
2192 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 2246 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2197 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); 2251 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
2198 if (rc != 0) { 2252 if (rc != 0) {
2199 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2253 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2200 "_send_handshake FAILED! (ioc %p, mf %p)\n", 2254 "send_handshake FAILED! (ioc %p, mf %p)\n",
2201 ioc->name, ioc, mf)); 2255 ioc->name, ioc, mf));
2202 mptctl_free_tm_flags(ioc); 2256 mpt_clear_taskmgmt_in_progress_flag(ioc);
2203 rc = -ENODATA; 2257 rc = -ENODATA;
2258 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2204 goto done_free_mem; 2259 goto done_free_mem;
2205 } 2260 }
2206 } 2261 }
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2210 2265
2211 /* Now wait for the command to complete */ 2266 /* Now wait for the command to complete */
2212 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2267 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2213 timeout = wait_event_timeout(mptctl_wait, 2268retry_wait:
2214 ioc->ioctl->wait_done == 1, 2269 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2215 HZ*timeout); 2270 HZ*timeout);
2216 2271 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2217 if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { 2272 rc = -ETIME;
2218 /* Now we need to reset the board */ 2273 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
2219 2274 ioc->name, __func__));
2220 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) 2275 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2221 mptctl_free_tm_flags(ioc); 2276 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2222 2277 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2223 mptctl_timeout_expired(ioc->ioctl); 2278 goto done_free_mem;
2224 rc = -ENODATA; 2279 }
2280 if (!timeleft) {
2281 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2282 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2283 mptctl_timeout_expired(ioc, mf);
2284 mf = NULL;
2285 } else
2286 goto retry_wait;
2225 goto done_free_mem; 2287 goto done_free_mem;
2226 } 2288 }
2227 2289
2290 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2291 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2292
2293
2228 mf = NULL; 2294 mf = NULL;
2229 2295
2230 /* If a valid reply frame, copy to the user. 2296 /* If a valid reply frame, copy to the user.
2231 * Offset 2: reply length in U32's 2297 * Offset 2: reply length in U32's
2232 */ 2298 */
2233 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { 2299 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
2234 if (karg.maxReplyBytes < ioc->reply_sz) { 2300 if (karg.maxReplyBytes < ioc->reply_sz) {
2235 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); 2301 sz = min(karg.maxReplyBytes,
2302 4*ioc->ioctl_cmds.reply[2]);
2236 } else { 2303 } else {
2237 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); 2304 sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
2238 } 2305 }
2239
2240 if (sz > 0) { 2306 if (sz > 0) {
2241 if (copy_to_user(karg.replyFrameBufPtr, 2307 if (copy_to_user(karg.replyFrameBufPtr,
2242 &ioc->ioctl->ReplyFrame, sz)){ 2308 ioc->ioctl_cmds.reply, sz)){
2243 printk(MYIOC_s_ERR_FMT 2309 printk(MYIOC_s_ERR_FMT
2244 "%s@%d::mptctl_do_mpt_command - " 2310 "%s@%d::mptctl_do_mpt_command - "
2245 "Unable to write out reply frame %p\n", 2311 "Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2252 2318
2253 /* If valid sense data, copy to user. 2319 /* If valid sense data, copy to user.
2254 */ 2320 */
2255 if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { 2321 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
2256 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); 2322 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
2257 if (sz > 0) { 2323 if (sz > 0) {
2258 if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { 2324 if (copy_to_user(karg.senseDataPtr,
2325 ioc->ioctl_cmds.sense, sz)) {
2259 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2326 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2260 "Unable to write sense data to user %p\n", 2327 "Unable to write sense data to user %p\n",
2261 ioc->name, __FILE__, __LINE__, 2328 ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2269 /* If the overall status is _GOOD and data in, copy data 2336 /* If the overall status is _GOOD and data in, copy data
2270 * to user. 2337 * to user.
2271 */ 2338 */
2272 if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && 2339 if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
2273 (karg.dataInSize > 0) && (bufIn.kptr)) { 2340 (karg.dataInSize > 0) && (bufIn.kptr)) {
2274 2341
2275 if (copy_to_user(karg.dataInBufPtr, 2342 if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2284 2351
2285done_free_mem: 2352done_free_mem:
2286 2353
2287 ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | 2354 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2288 MPT_IOCTL_STATUS_SENSE_VALID | 2355 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2289 MPT_IOCTL_STATUS_RF_VALID );
2290 2356
2291 /* Free the allocated memory. 2357 /* Free the allocated memory.
2292 */ 2358 */
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2336 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2402 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2337 MPT_FRAME_HDR *mf = NULL; 2403 MPT_FRAME_HDR *mf = NULL;
2338 MPIHeader_t *mpi_hdr; 2404 MPIHeader_t *mpi_hdr;
2405 unsigned long timeleft;
2406 int retval;
2339 2407
2340 /* Reset long to int. Should affect IA64 and SPARC only 2408 /* Reset long to int. Should affect IA64 and SPARC only
2341 */ 2409 */
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2466 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 2534 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
2467 2535
2468 if (hd && (cim_rev == 1)) { 2536 if (hd && (cim_rev == 1)) {
2469 karg.hard_resets = hd->hard_resets; 2537 karg.hard_resets = ioc->hard_resets;
2470 karg.soft_resets = hd->soft_resets; 2538 karg.soft_resets = ioc->soft_resets;
2471 karg.timeouts = hd->timeouts; 2539 karg.timeouts = ioc->timeouts;
2472 } 2540 }
2473 } 2541 }
2474 2542
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2476 * Gather ISTWI(Industry Standard Two Wire Interface) Data 2544 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2477 */ 2545 */
2478 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2546 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2479 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2547 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
2480 ioc->name,__func__)); 2548 "%s, no msg frames!!\n", ioc->name, __func__));
2481 goto out; 2549 goto out;
2482 } 2550 }
2483 2551
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2498 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2566 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2499 if (!pbuf) 2567 if (!pbuf)
2500 goto out; 2568 goto out;
2501 mpt_add_sge((char *)&IstwiRWRequest->SGL, 2569 ioc->add_sge((char *)&IstwiRWRequest->SGL,
2502 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2570 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2503 2571
2504 ioc->ioctl->wait_done = 0; 2572 retval = 0;
2573 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
2574 IstwiRWRequest->MsgContext);
2575 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2505 mpt_put_msg_frame(mptctl_id, ioc, mf); 2576 mpt_put_msg_frame(mptctl_id, ioc, mf);
2506 2577
2507 rc = wait_event_timeout(mptctl_wait, 2578retry_wait:
2508 ioc->ioctl->wait_done == 1, 2579 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2509 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2580 HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
2510 2581 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2511 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2582 retval = -ETIME;
2512 /* 2583 printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
2513 * Now we need to reset the board 2584 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2514 */ 2585 mpt_free_msg_frame(ioc, mf);
2515 mpt_free_msg_frame(ioc, mf); 2586 goto out;
2516 mptctl_timeout_expired(ioc->ioctl); 2587 }
2588 if (!timeleft)
2589 mptctl_timeout_expired(ioc, mf);
2590 else
2591 goto retry_wait;
2517 goto out; 2592 goto out;
2518 } 2593 }
2519 2594
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2526 * bays have drives in them 2601 * bays have drives in them
2527 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2602 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2528 */ 2603 */
2529 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2604 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
2530 karg.rsvd = *(u32 *)pbuf; 2605 karg.rsvd = *(u32 *)pbuf;
2531 2606
2532 out: 2607 out:
2608 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2609 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2610
2533 if (pbuf) 2611 if (pbuf)
2534 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2612 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2535 2613
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
2753 2831
2754 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); 2832 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
2755 2833
2756 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2834 mutex_unlock(&iocp->ioctl_cmds.mutex);
2757 2835
2758 return ret; 2836 return ret;
2759} 2837}
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
2807 */ 2885 */
2808 ret = mptctl_do_mpt_command (karg, &uarg->MF); 2886 ret = mptctl_do_mpt_command (karg, &uarg->MF);
2809 2887
2810 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2888 mutex_unlock(&iocp->ioctl_cmds.mutex);
2811 2889
2812 return ret; 2890 return ret;
2813} 2891}
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
2859static int 2937static int
2860mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2938mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2861{ 2939{
2862 MPT_IOCTL *mem;
2863 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2940 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2864 2941
2865 /* 2942 mutex_init(&ioc->ioctl_cmds.mutex);
2866 * Allocate and inite a MPT_IOCTL structure 2943 init_completion(&ioc->ioctl_cmds.done);
2867 */
2868 mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
2869 if (!mem) {
2870 mptctl_remove(pdev);
2871 return -ENOMEM;
2872 }
2873
2874 ioc->ioctl = mem;
2875 ioc->ioctl->ioc = ioc;
2876 mutex_init(&ioc->ioctl->ioctl_mutex);
2877 return 0; 2944 return 0;
2878} 2945}
2879 2946
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2887static void 2954static void
2888mptctl_remove(struct pci_dev *pdev) 2955mptctl_remove(struct pci_dev *pdev)
2889{ 2956{
2890 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2891
2892 kfree ( ioc->ioctl );
2893} 2957}
2894 2958
2895static struct mpt_pci_driver mptctl_driver = { 2959static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
2929 goto out_fail; 2993 goto out_fail;
2930 } 2994 }
2931 2995
2996 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
2932 mpt_reset_register(mptctl_id, mptctl_ioc_reset); 2997 mpt_reset_register(mptctl_id, mptctl_ioc_reset);
2933 mpt_event_register(mptctl_id, mptctl_event_process); 2998 mpt_event_register(mptctl_id, mptctl_event_process);
2934 2999
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
2953 3018
2954 /* De-register callback handler from base module */ 3019 /* De-register callback handler from base module */
2955 mpt_deregister(mptctl_id); 3020 mpt_deregister(mptctl_id);
3021 mpt_reset_deregister(mptctl_taskmgmt_id);
2956 3022
2957 mpt_device_driver_deregister(MPTCTL_DRIVER); 3023 mpt_device_driver_deregister(MPTCTL_DRIVER);
2958 3024
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 510b9f492093..28e478879284 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -58,6 +58,7 @@
58#define MPT_DEBUG_FC 0x00080000 58#define MPT_DEBUG_FC 0x00080000
59#define MPT_DEBUG_SAS 0x00100000 59#define MPT_DEBUG_SAS 0x00100000
60#define MPT_DEBUG_SAS_WIDE 0x00200000 60#define MPT_DEBUG_SAS_WIDE 0x00200000
61#define MPT_DEBUG_36GB_MEM 0x00400000
61 62
62/* 63/*
63 * CONFIG_FUSION_LOGGING - enabled in Kconfig 64 * CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
135#define dsaswideprintk(IOC, CMD) \ 136#define dsaswideprintk(IOC, CMD) \
136 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) 137 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
137 138
139#define d36memprintk(IOC, CMD) \
140 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
138 141
139 142
140/* 143/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c3c24fdf9fb6..e61df133a59e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1251 * A slightly different algorithm is required for 1251 * A slightly different algorithm is required for
1252 * 64bit SGEs. 1252 * 64bit SGEs.
1253 */ 1253 */
1254 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1254 scale = ioc->req_sz/ioc->SGE_size;
1255 if (sizeof(dma_addr_t) == sizeof(u64)) { 1255 if (ioc->sg_addr_size == sizeof(u64)) {
1256 numSGE = (scale - 1) * 1256 numSGE = (scale - 1) *
1257 (ioc->facts.MaxChainDepth-1) + scale + 1257 (ioc->facts.MaxChainDepth-1) + scale +
1258 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1258 (ioc->req_sz - 60) / ioc->SGE_size;
1259 sizeof(u32));
1260 } else { 1259 } else {
1261 numSGE = 1 + (scale - 1) * 1260 numSGE = 1 + (scale - 1) *
1262 (ioc->facts.MaxChainDepth-1) + scale + 1261 (ioc->facts.MaxChainDepth-1) + scale +
1263 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1262 (ioc->req_sz - 64) / ioc->SGE_size;
1264 sizeof(u32));
1265 } 1263 }
1266 1264
1267 if (numSGE < sh->sg_tablesize) { 1265 if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1292 1290
1293 /* Clear the TM flags 1291 /* Clear the TM flags
1294 */ 1292 */
1295 hd->tmPending = 0;
1296 hd->tmState = TM_STATE_NONE;
1297 hd->resetPending = 0;
1298 hd->abortSCpnt = NULL; 1293 hd->abortSCpnt = NULL;
1299 1294
1300 /* Clear the pointer used to store 1295 /* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1312 hd->timer.data = (unsigned long) hd; 1307 hd->timer.data = (unsigned long) hd;
1313 hd->timer.function = mptscsih_timer_expired; 1308 hd->timer.function = mptscsih_timer_expired;
1314 1309
1315 init_waitqueue_head(&hd->scandv_waitq);
1316 hd->scandv_wait_done = 0;
1317 hd->last_queue_full = 0; 1310 hd->last_queue_full = 0;
1318 1311
1319 sh->transportt = mptfc_transport_template; 1312 sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b97..20e0b447e8e8 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -93,8 +93,37 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; 93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ 94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; 95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
96 96static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
97static void mptsas_hotplug_work(struct work_struct *work); 97
98static void mptsas_firmware_event_work(struct work_struct *work);
99static void mptsas_send_sas_event(struct fw_event_work *fw_event);
100static void mptsas_send_raid_event(struct fw_event_work *fw_event);
101static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
102static void mptsas_parse_device_info(struct sas_identify *identify,
103 struct mptsas_devinfo *device_info);
104static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
105 struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
106static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
107 (MPT_ADAPTER *ioc, u64 sas_address);
108static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
109 struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
110static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
111 struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
112static int mptsas_add_end_device(MPT_ADAPTER *ioc,
113 struct mptsas_phyinfo *phy_info);
114static void mptsas_del_end_device(MPT_ADAPTER *ioc,
115 struct mptsas_phyinfo *phy_info);
116static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
117static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
118 (MPT_ADAPTER *ioc, u64 sas_address);
119static void mptsas_expander_delete(MPT_ADAPTER *ioc,
120 struct mptsas_portinfo *port_info, u8 force);
121static void mptsas_send_expander_event(struct fw_event_work *fw_event);
122static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
123static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
124static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
125static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
126static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
98 127
99static void mptsas_print_phy_data(MPT_ADAPTER *ioc, 128static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
100 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 129 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
218 le16_to_cpu(pg1->AttachedDevHandle))); 247 le16_to_cpu(pg1->AttachedDevHandle)));
219} 248}
220 249
221static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) 250/* inhibit sas firmware event handling */
251static void
252mptsas_fw_event_off(MPT_ADAPTER *ioc)
222{ 253{
223 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 254 unsigned long flags;
224 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 255
256 spin_lock_irqsave(&ioc->fw_event_lock, flags);
257 ioc->fw_events_off = 1;
258 ioc->sas_discovery_quiesce_io = 0;
259 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
260
225} 261}
226 262
227static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) 263/* enable sas firmware event handling */
264static void
265mptsas_fw_event_on(MPT_ADAPTER *ioc)
228{ 266{
229 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); 267 unsigned long flags;
230 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 268
269 spin_lock_irqsave(&ioc->fw_event_lock, flags);
270 ioc->fw_events_off = 0;
271 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
231} 272}
232 273
233static struct mptsas_portinfo * 274/* queue a sas firmware event */
234mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) 275static void
276mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
277 unsigned long delay)
235{ 278{
236 struct list_head *head = &ioc->sas_topology; 279 unsigned long flags;
237 struct mptsas_portinfo *pi = NULL; 280
281 spin_lock_irqsave(&ioc->fw_event_lock, flags);
282 list_add_tail(&fw_event->list, &ioc->fw_event_list);
283 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
284 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
285 ioc->name, __func__, fw_event));
286 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
287 delay);
288 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
289}
290
291/* requeue a sas firmware event */
292static void
293mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
294 unsigned long delay)
295{
296 unsigned long flags;
297 spin_lock_irqsave(&ioc->fw_event_lock, flags);
298 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
299 "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
300 fw_event->retries++;
301 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
302 msecs_to_jiffies(delay));
303 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
304}
305
306/* free memory assoicated to a sas firmware event */
307static void
308mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&ioc->fw_event_lock, flags);
313 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
314 ioc->name, __func__, fw_event));
315 list_del(&fw_event->list);
316 kfree(fw_event);
317 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
318}
319
320/* walk the firmware event queue, and either stop or wait for
321 * outstanding events to complete */
322static void
323mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
324{
325 struct fw_event_work *fw_event, *next;
326 struct mptsas_target_reset_event *target_reset_list, *n;
327 u8 flush_q;
328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
329
330 /* flush the target_reset_list */
331 if (!list_empty(&hd->target_reset_list)) {
332 list_for_each_entry_safe(target_reset_list, n,
333 &hd->target_reset_list, list) {
334 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
335 "%s: removing target reset for id=%d\n",
336 ioc->name, __func__,
337 target_reset_list->sas_event_data.TargetID));
338 list_del(&target_reset_list->list);
339 kfree(target_reset_list);
340 }
341 }
342
343 if (list_empty(&ioc->fw_event_list) ||
344 !ioc->fw_event_q || in_interrupt())
345 return;
238 346
239 /* always the first entry on sas_topology list */ 347 flush_q = 0;
348 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
349 if (cancel_delayed_work(&fw_event->work))
350 mptsas_free_fw_event(ioc, fw_event);
351 else
352 flush_q = 1;
353 }
354 if (flush_q)
355 flush_workqueue(ioc->fw_event_q);
356}
240 357
241 if (!list_empty(head))
242 pi = list_entry(head->next, struct mptsas_portinfo, list);
243 358
244 return pi; 359static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
360{
361 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
362 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
363}
364
365static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
366{
367 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
368 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
245} 369}
246 370
247/* 371/*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
265 return rc; 389 return rc;
266} 390}
267 391
392/**
393 * mptsas_find_portinfo_by_sas_address -
394 * @ioc: Pointer to MPT_ADAPTER structure
395 * @handle:
396 *
397 * This function should be called with the sas_topology_mutex already held
398 *
399 **/
400static struct mptsas_portinfo *
401mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
402{
403 struct mptsas_portinfo *port_info, *rc = NULL;
404 int i;
405
406 if (sas_address >= ioc->hba_port_sas_addr &&
407 sas_address < (ioc->hba_port_sas_addr +
408 ioc->hba_port_num_phy))
409 return ioc->hba_port_info;
410
411 mutex_lock(&ioc->sas_topology_mutex);
412 list_for_each_entry(port_info, &ioc->sas_topology, list)
413 for (i = 0; i < port_info->num_phys; i++)
414 if (port_info->phy_info[i].identify.sas_address ==
415 sas_address) {
416 rc = port_info;
417 goto out;
418 }
419 out:
420 mutex_unlock(&ioc->sas_topology_mutex);
421 return rc;
422}
423
268/* 424/*
269 * Returns true if there is a scsi end device 425 * Returns true if there is a scsi end device
270 */ 426 */
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
308 if(phy_info->port_details != port_details) 464 if(phy_info->port_details != port_details)
309 continue; 465 continue;
310 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 466 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
467 mptsas_set_rphy(ioc, phy_info, NULL);
311 phy_info->port_details = NULL; 468 phy_info->port_details = NULL;
312 } 469 }
313 kfree(port_details); 470 kfree(port_details);
@@ -379,6 +536,285 @@ starget)
379 phy_info->port_details->starget = starget; 536 phy_info->port_details->starget = starget;
380} 537}
381 538
539/**
540 * mptsas_add_device_component -
541 * @ioc: Pointer to MPT_ADAPTER structure
542 * @channel: fw mapped id's
543 * @id:
544 * @sas_address:
545 * @device_info:
546 *
547 **/
548static void
549mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
550 u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
551{
552 struct mptsas_device_info *sas_info, *next;
553 struct scsi_device *sdev;
554 struct scsi_target *starget;
555 struct sas_rphy *rphy;
556
557 /*
558 * Delete all matching devices out of the list
559 */
560 mutex_lock(&ioc->sas_device_info_mutex);
561 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
562 list) {
563 if (!sas_info->is_logical_volume &&
564 (sas_info->sas_address == sas_address ||
565 (sas_info->fw.channel == channel &&
566 sas_info->fw.id == id))) {
567 list_del(&sas_info->list);
568 kfree(sas_info);
569 }
570 }
571
572 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
573 if (!sas_info)
574 goto out;
575
576 /*
577 * Set Firmware mapping
578 */
579 sas_info->fw.id = id;
580 sas_info->fw.channel = channel;
581
582 sas_info->sas_address = sas_address;
583 sas_info->device_info = device_info;
584 sas_info->slot = slot;
585 sas_info->enclosure_logical_id = enclosure_logical_id;
586 INIT_LIST_HEAD(&sas_info->list);
587 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
588
589 /*
590 * Set OS mapping
591 */
592 shost_for_each_device(sdev, ioc->sh) {
593 starget = scsi_target(sdev);
594 rphy = dev_to_rphy(starget->dev.parent);
595 if (rphy->identify.sas_address == sas_address) {
596 sas_info->os.id = starget->id;
597 sas_info->os.channel = starget->channel;
598 }
599 }
600
601 out:
602 mutex_unlock(&ioc->sas_device_info_mutex);
603 return;
604}
605
606/**
607 * mptsas_add_device_component_by_fw -
608 * @ioc: Pointer to MPT_ADAPTER structure
609 * @channel: fw mapped id's
610 * @id:
611 *
612 **/
613static void
614mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
615{
616 struct mptsas_devinfo sas_device;
617 struct mptsas_enclosure enclosure_info;
618 int rc;
619
620 rc = mptsas_sas_device_pg0(ioc, &sas_device,
621 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
622 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
623 (channel << 8) + id);
624 if (rc)
625 return;
626
627 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
628 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
629 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
630 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
631 sas_device.handle_enclosure);
632
633 mptsas_add_device_component(ioc, sas_device.channel,
634 sas_device.id, sas_device.sas_address, sas_device.device_info,
635 sas_device.slot, enclosure_info.enclosure_logical_id);
636}
637
638/**
639 * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
640 * @ioc: Pointer to MPT_ADAPTER structure
641 * @channel: fw mapped id's
642 * @id:
643 *
644 **/
645static void
646mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
647 struct scsi_target *starget)
648{
649 CONFIGPARMS cfg;
650 ConfigPageHeader_t hdr;
651 dma_addr_t dma_handle;
652 pRaidVolumePage0_t buffer = NULL;
653 int i;
654 RaidPhysDiskPage0_t phys_disk;
655 struct mptsas_device_info *sas_info, *next;
656
657 memset(&cfg, 0 , sizeof(CONFIGPARMS));
658 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
659 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
660 /* assumption that all volumes on channel = 0 */
661 cfg.pageAddr = starget->id;
662 cfg.cfghdr.hdr = &hdr;
663 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
664 cfg.timeout = 10;
665
666 if (mpt_config(ioc, &cfg) != 0)
667 goto out;
668
669 if (!hdr.PageLength)
670 goto out;
671
672 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
673 &dma_handle);
674
675 if (!buffer)
676 goto out;
677
678 cfg.physAddr = dma_handle;
679 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
680
681 if (mpt_config(ioc, &cfg) != 0)
682 goto out;
683
684 if (!buffer->NumPhysDisks)
685 goto out;
686
687 /*
688 * Adding entry for hidden components
689 */
690 for (i = 0; i < buffer->NumPhysDisks; i++) {
691
692 if (mpt_raid_phys_disk_pg0(ioc,
693 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
694 continue;
695
696 mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
697 phys_disk.PhysDiskID);
698
699 mutex_lock(&ioc->sas_device_info_mutex);
700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
701 list) {
702 if (!sas_info->is_logical_volume &&
703 (sas_info->fw.channel == phys_disk.PhysDiskBus &&
704 sas_info->fw.id == phys_disk.PhysDiskID)) {
705 sas_info->is_hidden_raid_component = 1;
706 sas_info->volume_id = starget->id;
707 }
708 }
709 mutex_unlock(&ioc->sas_device_info_mutex);
710
711 }
712
713 /*
714 * Delete all matching devices out of the list
715 */
716 mutex_lock(&ioc->sas_device_info_mutex);
717 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
718 list) {
719 if (sas_info->is_logical_volume && sas_info->fw.id ==
720 starget->id) {
721 list_del(&sas_info->list);
722 kfree(sas_info);
723 }
724 }
725
726 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
727 if (sas_info) {
728 sas_info->fw.id = starget->id;
729 sas_info->os.id = starget->id;
730 sas_info->os.channel = starget->channel;
731 sas_info->is_logical_volume = 1;
732 INIT_LIST_HEAD(&sas_info->list);
733 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
734 }
735 mutex_unlock(&ioc->sas_device_info_mutex);
736
737 out:
738 if (buffer)
739 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
740 dma_handle);
741}
742
743/**
744 * mptsas_add_device_component_starget -
745 * @ioc: Pointer to MPT_ADAPTER structure
746 * @starget:
747 *
748 **/
749static void
750mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
751 struct scsi_target *starget)
752{
753 VirtTarget *vtarget;
754 struct sas_rphy *rphy;
755 struct mptsas_phyinfo *phy_info = NULL;
756 struct mptsas_enclosure enclosure_info;
757
758 rphy = dev_to_rphy(starget->dev.parent);
759 vtarget = starget->hostdata;
760 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
761 rphy->identify.sas_address);
762 if (!phy_info)
763 return;
764
765 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
766 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
767 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
768 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
769 phy_info->attached.handle_enclosure);
770
771 mptsas_add_device_component(ioc, phy_info->attached.channel,
772 phy_info->attached.id, phy_info->attached.sas_address,
773 phy_info->attached.device_info,
774 phy_info->attached.slot, enclosure_info.enclosure_logical_id);
775}
776
777/**
778 * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
779 * @ioc: Pointer to MPT_ADAPTER structure
780 * @channel: os mapped id's
781 * @id:
782 *
783 **/
784static void
785mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
786{
787 struct mptsas_device_info *sas_info, *next;
788
789 /*
790 * Set is_cached flag
791 */
792 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
793 list) {
794 if (sas_info->os.channel == channel && sas_info->os.id == id)
795 sas_info->is_cached = 1;
796 }
797}
798
799/**
800 * mptsas_del_device_components - Cleaning the list
801 * @ioc: Pointer to MPT_ADAPTER structure
802 *
803 **/
804static void
805mptsas_del_device_components(MPT_ADAPTER *ioc)
806{
807 struct mptsas_device_info *sas_info, *next;
808
809 mutex_lock(&ioc->sas_device_info_mutex);
810 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
811 list) {
812 list_del(&sas_info->list);
813 kfree(sas_info);
814 }
815 mutex_unlock(&ioc->sas_device_info_mutex);
816}
817
382 818
383/* 819/*
384 * mptsas_setup_wide_ports 820 * mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
434 * Forming a port 870 * Forming a port
435 */ 871 */
436 if (!port_details) { 872 if (!port_details) {
437 port_details = kzalloc(sizeof(*port_details), 873 port_details = kzalloc(sizeof(struct
438 GFP_KERNEL); 874 mptsas_portinfo_details), GFP_KERNEL);
439 if (!port_details) 875 if (!port_details)
440 goto out; 876 goto out;
441 port_details->num_phys = 1; 877 port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
523 VirtTarget *vtarget = NULL; 959 VirtTarget *vtarget = NULL;
524 960
525 shost_for_each_device(sdev, ioc->sh) { 961 shost_for_each_device(sdev, ioc->sh) {
526 if ((vdevice = sdev->hostdata) == NULL) 962 vdevice = sdev->hostdata;
963 if ((vdevice == NULL) ||
964 (vdevice->vtarget == NULL))
965 continue;
966 if ((vdevice->vtarget->tflags &
967 MPT_TARGET_FLAGS_RAID_COMPONENT ||
968 vdevice->vtarget->raidVolume))
527 continue; 969 continue;
528 if (vdevice->vtarget->id == id && 970 if (vdevice->vtarget->id == id &&
529 vdevice->vtarget->channel == channel) 971 vdevice->vtarget->channel == channel)
530 vtarget = vdevice->vtarget; 972 vtarget = vdevice->vtarget;
531 } 973 }
532 return vtarget; 974 return vtarget;
533} 975}
534 976
977static void
978mptsas_queue_device_delete(MPT_ADAPTER *ioc,
979 MpiEventDataSasDeviceStatusChange_t *sas_event_data)
980{
981 struct fw_event_work *fw_event;
982 int sz;
983
984 sz = offsetof(struct fw_event_work, event_data) +
985 sizeof(MpiEventDataSasDeviceStatusChange_t);
986 fw_event = kzalloc(sz, GFP_ATOMIC);
987 if (!fw_event) {
988 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
989 ioc->name, __func__, __LINE__);
990 return;
991 }
992 memcpy(fw_event->event_data, sas_event_data,
993 sizeof(MpiEventDataSasDeviceStatusChange_t));
994 fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
995 fw_event->ioc = ioc;
996 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
997}
998
999static void
1000mptsas_queue_rescan(MPT_ADAPTER *ioc)
1001{
1002 struct fw_event_work *fw_event;
1003 int sz;
1004
1005 sz = offsetof(struct fw_event_work, event_data);
1006 fw_event = kzalloc(sz, GFP_ATOMIC);
1007 if (!fw_event) {
1008 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
1009 ioc->name, __func__, __LINE__);
1010 return;
1011 }
1012 fw_event->event = -1;
1013 fw_event->ioc = ioc;
1014 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
1015}
1016
1017
535/** 1018/**
536 * mptsas_target_reset 1019 * mptsas_target_reset
537 * 1020 *
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
550{ 1033{
551 MPT_FRAME_HDR *mf; 1034 MPT_FRAME_HDR *mf;
552 SCSITaskMgmt_t *pScsiTm; 1035 SCSITaskMgmt_t *pScsiTm;
553 1036 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__func__, __LINE__));
557 return 0; 1037 return 0;
1038
1039
1040 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
1041 if (mf == NULL) {
1042 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
1043 "%s, no msg frames @%d!!\n", ioc->name,
1044 __func__, __LINE__));
1045 goto out_fail;
558 } 1046 }
559 1047
1048 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1049 ioc->name, mf));
1050
560 /* Format the Request 1051 /* Format the Request
561 */ 1052 */
562 pScsiTm = (SCSITaskMgmt_t *) mf; 1053 pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
569 1060
570 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 1061 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
571 1062
572 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1063 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1064 "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
1065 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
1066
1067 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
573 1068
574 return 1; 1069 return 1;
1070
1071 out_fail:
1072
1073 mpt_clear_taskmgmt_in_progress_flag(ioc);
1074 return 0;
575} 1075}
576 1076
577/** 1077/**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
602 1102
603 vtarget->deleted = 1; /* block IO */ 1103 vtarget->deleted = 1; /* block IO */
604 1104
605 target_reset_list = kzalloc(sizeof(*target_reset_list), 1105 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
606 GFP_ATOMIC); 1106 GFP_ATOMIC);
607 if (!target_reset_list) { 1107 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 1108 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
609 ioc->name,__func__, __LINE__)); 1109 "%s, failed to allocate mem @%d..!!\n",
1110 ioc->name, __func__, __LINE__));
610 return; 1111 return;
611 } 1112 }
612 1113
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
614 sizeof(*sas_event_data)); 1115 sizeof(*sas_event_data));
615 list_add_tail(&target_reset_list->list, &hd->target_reset_list); 1116 list_add_tail(&target_reset_list->list, &hd->target_reset_list);
616 1117
617 if (hd->resetPending) 1118 target_reset_list->time_count = jiffies;
618 return;
619 1119
620 if (mptsas_target_reset(ioc, channel, id)) { 1120 if (mptsas_target_reset(ioc, channel, id)) {
621 target_reset_list->target_reset_issued = 1; 1121 target_reset_list->target_reset_issued = 1;
622 hd->resetPending = 1;
623 } 1122 }
624} 1123}
625 1124
626/** 1125/**
627 * mptsas_dev_reset_complete 1126 * mptsas_taskmgmt_complete - complete SAS task management function
628 * 1127 * @ioc: Pointer to MPT_ADAPTER structure
629 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
630 * enable work queue to finish off removing device from upper layers.
631 * then send next TARGET_RESET in the queue.
632 *
633 * @ioc
634 * 1128 *
1129 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
1130 * queue to finish off removing device from upper layers. then send next
1131 * TARGET_RESET in the queue.
635 **/ 1132 **/
636static void 1133static int
637mptsas_dev_reset_complete(MPT_ADAPTER *ioc) 1134mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
638{ 1135{
639 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 1136 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
640 struct list_head *head = &hd->target_reset_list; 1137 struct list_head *head = &hd->target_reset_list;
641 struct mptsas_target_reset_event *target_reset_list;
642 struct mptsas_hotplug_event *ev;
643 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
644 u8 id, channel; 1138 u8 id, channel;
645 __le64 sas_address; 1139 struct mptsas_target_reset_event *target_reset_list;
1140 SCSITaskMgmtReply_t *pScsiTmReply;
1141
1142 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
1143 "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
1144
1145 pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
1146 if (pScsiTmReply) {
1147 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1148 "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
1149 "\ttask_type = 0x%02X, iocstatus = 0x%04X "
1150 "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
1151 "term_cmnds = %d\n", ioc->name,
1152 pScsiTmReply->Bus, pScsiTmReply->TargetID,
1153 pScsiTmReply->TaskType,
1154 le16_to_cpu(pScsiTmReply->IOCStatus),
1155 le32_to_cpu(pScsiTmReply->IOCLogInfo),
1156 pScsiTmReply->ResponseCode,
1157 le32_to_cpu(pScsiTmReply->TerminationCount)));
1158
1159 if (pScsiTmReply->ResponseCode)
1160 mptscsih_taskmgmt_response_code(ioc,
1161 pScsiTmReply->ResponseCode);
1162 }
1163
1164 if (pScsiTmReply && (pScsiTmReply->TaskType ==
1165 MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
1166 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
1167 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1168 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
1169 memcpy(ioc->taskmgmt_cmds.reply, mr,
1170 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
1171 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
1172 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
1173 complete(&ioc->taskmgmt_cmds.done);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179 mpt_clear_taskmgmt_in_progress_flag(ioc);
646 1180
647 if (list_empty(head)) 1181 if (list_empty(head))
648 return; 1182 return 1;
649 1183
650 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); 1184 target_reset_list = list_entry(head->next,
1185 struct mptsas_target_reset_event, list);
651 1186
652 sas_event_data = &target_reset_list->sas_event_data; 1187 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
653 id = sas_event_data->TargetID; 1188 "TaskMgmt: completed (%d seconds)\n",
654 channel = sas_event_data->Bus; 1189 ioc->name, jiffies_to_msecs(jiffies -
655 hd->resetPending = 0; 1190 target_reset_list->time_count)/1000));
1191
1192 id = pScsiTmReply->TargetID;
1193 channel = pScsiTmReply->Bus;
1194 target_reset_list->time_count = jiffies;
656 1195
657 /* 1196 /*
658 * retry target reset 1197 * retry target reset
659 */ 1198 */
660 if (!target_reset_list->target_reset_issued) { 1199 if (!target_reset_list->target_reset_issued) {
661 if (mptsas_target_reset(ioc, channel, id)) { 1200 if (mptsas_target_reset(ioc, channel, id))
662 target_reset_list->target_reset_issued = 1; 1201 target_reset_list->target_reset_issued = 1;
663 hd->resetPending = 1; 1202 return 1;
664 }
665 return;
666 } 1203 }
667 1204
668 /* 1205 /*
669 * enable work queue to remove device from upper layers 1206 * enable work queue to remove device from upper layers
670 */ 1207 */
671 list_del(&target_reset_list->list); 1208 list_del(&target_reset_list->list);
1209 if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
1210 mptsas_queue_device_delete(ioc,
1211 &target_reset_list->sas_event_data);
672 1212
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__func__, __LINE__));
677 return;
678 }
679
680 INIT_WORK(&ev->work, mptsas_hotplug_work);
681 ev->ioc = ioc;
682 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
683 ev->parent_handle =
684 le16_to_cpu(sas_event_data->ParentDevHandle);
685 ev->channel = channel;
686 ev->id =id;
687 ev->phy_id = sas_event_data->PhyNum;
688 memcpy(&sas_address, &sas_event_data->SASAddress,
689 sizeof(__le64));
690 ev->sas_address = le64_to_cpu(sas_address);
691 ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
692 ev->event_type = MPTSAS_DEL_DEVICE;
693 schedule_work(&ev->work);
694 kfree(target_reset_list);
695 1213
696 /* 1214 /*
697 * issue target reset to next device in the queue 1215 * issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
699 1217
700 head = &hd->target_reset_list; 1218 head = &hd->target_reset_list;
701 if (list_empty(head)) 1219 if (list_empty(head))
702 return; 1220 return 1;
703 1221
704 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, 1222 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
705 list); 1223 list);
706 1224
707 sas_event_data = &target_reset_list->sas_event_data; 1225 id = target_reset_list->sas_event_data.TargetID;
708 id = sas_event_data->TargetID; 1226 channel = target_reset_list->sas_event_data.Bus;
709 channel = sas_event_data->Bus; 1227 target_reset_list->time_count = jiffies;
710 1228
711 if (mptsas_target_reset(ioc, channel, id)) { 1229 if (mptsas_target_reset(ioc, channel, id))
712 target_reset_list->target_reset_issued = 1; 1230 target_reset_list->target_reset_issued = 1;
713 hd->resetPending = 1;
714 }
715}
716 1231
717/** 1232 return 1;
718 * mptsas_taskmgmt_complete
719 *
720 * @ioc
721 * @mf
722 * @mr
723 *
724 **/
725static int
726mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
727{
728 mptsas_dev_reset_complete(ioc);
729 return mptscsih_taskmgmt_complete(ioc, mf, mr);
730} 1233}
731 1234
732/** 1235/**
@@ -740,37 +1243,59 @@ static int
740mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 1243mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
741{ 1244{
742 MPT_SCSI_HOST *hd; 1245 MPT_SCSI_HOST *hd;
743 struct mptsas_target_reset_event *target_reset_list, *n;
744 int rc; 1246 int rc;
745 1247
746 rc = mptscsih_ioc_reset(ioc, reset_phase); 1248 rc = mptscsih_ioc_reset(ioc, reset_phase);
1249 if ((ioc->bus_type != SAS) || (!rc))
1250 return rc;
747 1251
748 if (ioc->bus_type != SAS)
749 goto out;
750
751 if (reset_phase != MPT_IOC_POST_RESET)
752 goto out;
753
754 if (!ioc->sh || !ioc->sh->hostdata)
755 goto out;
756 hd = shost_priv(ioc->sh); 1252 hd = shost_priv(ioc->sh);
757 if (!hd->ioc) 1253 if (!hd->ioc)
758 goto out; 1254 goto out;
759 1255
760 if (list_empty(&hd->target_reset_list)) 1256 switch (reset_phase) {
761 goto out; 1257 case MPT_IOC_SETUP_RESET:
762 1258 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
763 /* flush the target_reset_list */ 1259 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
764 list_for_each_entry_safe(target_reset_list, n, 1260 mptsas_fw_event_off(ioc);
765 &hd->target_reset_list, list) { 1261 break;
766 list_del(&target_reset_list->list); 1262 case MPT_IOC_PRE_RESET:
767 kfree(target_reset_list); 1263 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1264 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
1265 break;
1266 case MPT_IOC_POST_RESET:
1267 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1268 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
1269 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1270 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
1271 complete(&ioc->sas_mgmt.done);
1272 }
1273 mptsas_cleanup_fw_event_q(ioc);
1274 mptsas_queue_rescan(ioc);
1275 mptsas_fw_event_on(ioc);
1276 break;
1277 default:
1278 break;
768 } 1279 }
769 1280
770 out: 1281 out:
771 return rc; 1282 return rc;
772} 1283}
773 1284
1285
1286/**
1287 * enum device_state -
1288 * @DEVICE_RETRY: need to retry the TUR
1289 * @DEVICE_ERROR: TUR return error, don't add device
1290 * @DEVICE_READY: device can be added
1291 *
1292 */
1293enum device_state{
1294 DEVICE_RETRY,
1295 DEVICE_ERROR,
1296 DEVICE_READY,
1297};
1298
774static int 1299static int
775mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 1300mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
776 u32 form, u32 form_specific) 1301 u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
836 return error; 1361 return error;
837} 1362}
838 1363
1364/**
1365 * mptsas_add_end_device - report a new end device to sas transport layer
1366 * @ioc: Pointer to MPT_ADAPTER structure
1367 * @phy_info: decribes attached device
1368 *
1369 * return (0) success (1) failure
1370 *
1371 **/
1372static int
1373mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1374{
1375 struct sas_rphy *rphy;
1376 struct sas_port *port;
1377 struct sas_identify identify;
1378 char *ds = NULL;
1379 u8 fw_id;
1380
1381 if (!phy_info) {
1382 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1383 "%s: exit at line=%d\n", ioc->name,
1384 __func__, __LINE__));
1385 return 1;
1386 }
1387
1388 fw_id = phy_info->attached.id;
1389
1390 if (mptsas_get_rphy(phy_info)) {
1391 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1392 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1393 __func__, fw_id, __LINE__));
1394 return 2;
1395 }
1396
1397 port = mptsas_get_port(phy_info);
1398 if (!port) {
1399 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1400 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1401 __func__, fw_id, __LINE__));
1402 return 3;
1403 }
1404
1405 if (phy_info->attached.device_info &
1406 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1407 ds = "ssp";
1408 if (phy_info->attached.device_info &
1409 MPI_SAS_DEVICE_INFO_STP_TARGET)
1410 ds = "stp";
1411 if (phy_info->attached.device_info &
1412 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1413 ds = "sata";
1414
1415 printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
1416 " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
1417 phy_info->attached.channel, phy_info->attached.id,
1418 phy_info->attached.phy_id, (unsigned long long)
1419 phy_info->attached.sas_address);
1420
1421 mptsas_parse_device_info(&identify, &phy_info->attached);
1422 rphy = sas_end_device_alloc(port);
1423 if (!rphy) {
1424 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1425 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1426 __func__, fw_id, __LINE__));
1427 return 5; /* non-fatal: an rphy can be added later */
1428 }
1429
1430 rphy->identify = identify;
1431 if (sas_rphy_add(rphy)) {
1432 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1433 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1434 __func__, fw_id, __LINE__));
1435 sas_rphy_free(rphy);
1436 return 6;
1437 }
1438 mptsas_set_rphy(ioc, phy_info, rphy);
1439 return 0;
1440}
1441
1442/**
1443 * mptsas_del_end_device - report a deleted end device to sas transport layer
1444 * @ioc: Pointer to MPT_ADAPTER structure
1445 * @phy_info: decribes attached device
1446 *
1447 **/
1448static void
1449mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1450{
1451 struct sas_rphy *rphy;
1452 struct sas_port *port;
1453 struct mptsas_portinfo *port_info;
1454 struct mptsas_phyinfo *phy_info_parent;
1455 int i;
1456 char *ds = NULL;
1457 u8 fw_id;
1458 u64 sas_address;
1459
1460 if (!phy_info)
1461 return;
1462
1463 fw_id = phy_info->attached.id;
1464 sas_address = phy_info->attached.sas_address;
1465
1466 if (!phy_info->port_details) {
1467 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1468 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1469 __func__, fw_id, __LINE__));
1470 return;
1471 }
1472 rphy = mptsas_get_rphy(phy_info);
1473 if (!rphy) {
1474 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1475 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1476 __func__, fw_id, __LINE__));
1477 return;
1478 }
1479
1480 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
1481 || phy_info->attached.device_info
1482 & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
1483 || phy_info->attached.device_info
1484 & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
1485 ds = "initiator";
1486 if (phy_info->attached.device_info &
1487 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1488 ds = "ssp";
1489 if (phy_info->attached.device_info &
1490 MPI_SAS_DEVICE_INFO_STP_TARGET)
1491 ds = "stp";
1492 if (phy_info->attached.device_info &
1493 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1494 ds = "sata";
1495
1496 dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
1497 "removing %s device: fw_channel %d, fw_id %d, phy %d,"
1498 "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
1499 phy_info->attached.id, phy_info->attached.phy_id,
1500 (unsigned long long) sas_address);
1501
1502 port = mptsas_get_port(phy_info);
1503 if (!port) {
1504 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1505 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1506 __func__, fw_id, __LINE__));
1507 return;
1508 }
1509 port_info = phy_info->portinfo;
1510 phy_info_parent = port_info->phy_info;
1511 for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
1512 if (!phy_info_parent->phy)
1513 continue;
1514 if (phy_info_parent->attached.sas_address !=
1515 sas_address)
1516 continue;
1517 dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
1518 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
1519 ioc->name, phy_info_parent->phy_id,
1520 phy_info_parent->phy);
1521 sas_port_delete_phy(port, phy_info_parent->phy);
1522 }
1523
1524 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
1525 "delete port %d, sas_addr (0x%llx)\n", ioc->name,
1526 port->port_identifier, (unsigned long long)sas_address);
1527 sas_port_delete(port);
1528 mptsas_set_port(ioc, phy_info, NULL);
1529 mptsas_port_delete(ioc, phy_info->port_details);
1530}
1531
1532struct mptsas_phyinfo *
1533mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
1534 struct mptsas_devinfo *sas_device)
1535{
1536 struct mptsas_phyinfo *phy_info;
1537 struct mptsas_portinfo *port_info;
1538 int i;
1539
1540 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
1541 sas_device->sas_address);
1542 if (!phy_info)
1543 goto out;
1544 port_info = phy_info->portinfo;
1545 if (!port_info)
1546 goto out;
1547 mutex_lock(&ioc->sas_topology_mutex);
1548 for (i = 0; i < port_info->num_phys; i++) {
1549 if (port_info->phy_info[i].attached.sas_address !=
1550 sas_device->sas_address)
1551 continue;
1552 port_info->phy_info[i].attached.channel = sas_device->channel;
1553 port_info->phy_info[i].attached.id = sas_device->id;
1554 port_info->phy_info[i].attached.sas_address =
1555 sas_device->sas_address;
1556 port_info->phy_info[i].attached.handle = sas_device->handle;
1557 port_info->phy_info[i].attached.handle_parent =
1558 sas_device->handle_parent;
1559 port_info->phy_info[i].attached.handle_enclosure =
1560 sas_device->handle_enclosure;
1561 }
1562 mutex_unlock(&ioc->sas_topology_mutex);
1563 out:
1564 return phy_info;
1565}
1566
1567/**
1568 * mptsas_firmware_event_work - work thread for processing fw events
1569 * @work: work queue payload containing info describing the event
1570 * Context: user
1571 *
1572 */
1573static void
1574mptsas_firmware_event_work(struct work_struct *work)
1575{
1576 struct fw_event_work *fw_event =
1577 container_of(work, struct fw_event_work, work.work);
1578 MPT_ADAPTER *ioc = fw_event->ioc;
1579
1580 /* special rescan topology handling */
1581 if (fw_event->event == -1) {
1582 if (ioc->in_rescan) {
1583 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1584 "%s: rescan ignored as it is in progress\n",
1585 ioc->name, __func__));
1586 return;
1587 }
1588 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
1589 "reset\n", ioc->name, __func__));
1590 ioc->in_rescan = 1;
1591 mptsas_not_responding_devices(ioc);
1592 mptsas_scan_sas_topology(ioc);
1593 ioc->in_rescan = 0;
1594 mptsas_free_fw_event(ioc, fw_event);
1595 return;
1596 }
1597
1598 /* events handling turned off during host reset */
1599 if (ioc->fw_events_off) {
1600 mptsas_free_fw_event(ioc, fw_event);
1601 return;
1602 }
1603
1604 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
1605 "event = (0x%02x)\n", ioc->name, __func__, fw_event,
1606 (fw_event->event & 0xFF)));
1607
1608 switch (fw_event->event) {
1609 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1610 mptsas_send_sas_event(fw_event);
1611 break;
1612 case MPI_EVENT_INTEGRATED_RAID:
1613 mptsas_send_raid_event(fw_event);
1614 break;
1615 case MPI_EVENT_IR2:
1616 mptsas_send_ir2_event(fw_event);
1617 break;
1618 case MPI_EVENT_PERSISTENT_TABLE_FULL:
1619 mptbase_sas_persist_operation(ioc,
1620 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1621 mptsas_free_fw_event(ioc, fw_event);
1622 break;
1623 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
1624 mptsas_broadcast_primative_work(fw_event);
1625 break;
1626 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
1627 mptsas_send_expander_event(fw_event);
1628 break;
1629 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1630 mptsas_send_link_status_event(fw_event);
1631 break;
1632 case MPI_EVENT_QUEUE_FULL:
1633 mptsas_handle_queue_full_event(fw_event);
1634 break;
1635 }
1636}
1637
1638
1639
839static int 1640static int
840mptsas_slave_configure(struct scsi_device *sdev) 1641mptsas_slave_configure(struct scsi_device *sdev)
841{ 1642{
1643 struct Scsi_Host *host = sdev->host;
1644 MPT_SCSI_HOST *hd = shost_priv(host);
1645 MPT_ADAPTER *ioc = hd->ioc;
1646 VirtDevice *vdevice = sdev->hostdata;
842 1647
843 if (sdev->channel == MPTSAS_RAID_CHANNEL) 1648 if (vdevice->vtarget->deleted) {
1649 sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
1650 vdevice->vtarget->deleted = 0;
1651 }
1652
1653 /*
1654 * RAID volumes placed beyond the last expected port.
1655 * Ignore sending sas mode pages in that case..
1656 */
1657 if (sdev->channel == MPTSAS_RAID_CHANNEL) {
1658 mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
844 goto out; 1659 goto out;
1660 }
845 1661
846 sas_read_port_mode_page(sdev); 1662 sas_read_port_mode_page(sdev);
847 1663
1664 mptsas_add_device_component_starget(ioc, scsi_target(sdev));
1665
848 out: 1666 out:
849 return mptscsih_slave_configure(sdev); 1667 return mptscsih_slave_configure(sdev);
850} 1668}
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
875 * RAID volumes placed beyond the last expected port. 1693 * RAID volumes placed beyond the last expected port.
876 */ 1694 */
877 if (starget->channel == MPTSAS_RAID_CHANNEL) { 1695 if (starget->channel == MPTSAS_RAID_CHANNEL) {
878 for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 1696 if (!ioc->raid_data.pIocPg2) {
879 if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 1697 kfree(vtarget);
880 channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 1698 return -ENXIO;
1699 }
1700 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1701 if (id == ioc->raid_data.pIocPg2->
1702 RaidVolume[i].VolumeID) {
1703 channel = ioc->raid_data.pIocPg2->
1704 RaidVolume[i].VolumeBus;
1705 }
1706 }
1707 vtarget->raidVolume = 1;
881 goto out; 1708 goto out;
882 } 1709 }
883 1710
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
926 struct sas_rphy *rphy; 1753 struct sas_rphy *rphy;
927 struct mptsas_portinfo *p; 1754 struct mptsas_portinfo *p;
928 int i; 1755 int i;
929 MPT_ADAPTER *ioc = hd->ioc; 1756 MPT_ADAPTER *ioc = hd->ioc;
1757 VirtTarget *vtarget;
930 1758
931 if (!starget->hostdata) 1759 if (!starget->hostdata)
932 return; 1760 return;
933 1761
1762 vtarget = starget->hostdata;
1763
1764 mptsas_del_device_component_by_os(ioc, starget->channel,
1765 starget->id);
1766
1767
934 if (starget->channel == MPTSAS_RAID_CHANNEL) 1768 if (starget->channel == MPTSAS_RAID_CHANNEL)
935 goto out; 1769 goto out;
936 1770
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
940 if (p->phy_info[i].attached.sas_address != 1774 if (p->phy_info[i].attached.sas_address !=
941 rphy->identify.sas_address) 1775 rphy->identify.sas_address)
942 continue; 1776 continue;
1777
1778 starget_printk(KERN_INFO, starget, MYIOC_s_FMT
1779 "delete device: fw_channel %d, fw_id %d, phy %d, "
1780 "sas_addr 0x%llx\n", ioc->name,
1781 p->phy_info[i].attached.channel,
1782 p->phy_info[i].attached.id,
1783 p->phy_info[i].attached.phy_id, (unsigned long long)
1784 p->phy_info[i].attached.sas_address);
1785
943 mptsas_set_starget(&p->phy_info[i], NULL); 1786 mptsas_set_starget(&p->phy_info[i], NULL);
944 goto out;
945 } 1787 }
946 } 1788 }
947 1789
948 out: 1790 out:
1791 vtarget->starget = NULL;
949 kfree(starget->hostdata); 1792 kfree(starget->hostdata);
950 starget->hostdata = NULL; 1793 starget->hostdata = NULL;
951} 1794}
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
1008static int 1851static int
1009mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1852mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1010{ 1853{
1854 MPT_SCSI_HOST *hd;
1855 MPT_ADAPTER *ioc;
1011 VirtDevice *vdevice = SCpnt->device->hostdata; 1856 VirtDevice *vdevice = SCpnt->device->hostdata;
1012 1857
1013 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { 1858 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1016 return 0; 1861 return 0;
1017 } 1862 }
1018 1863
1864 hd = shost_priv(SCpnt->device->host);
1865 ioc = hd->ioc;
1866
1867 if (ioc->sas_discovery_quiesce_io)
1868 return SCSI_MLQUEUE_HOST_BUSY;
1869
1019// scsi_print_command(SCpnt); 1870// scsi_print_command(SCpnt);
1020 1871
1021 return mptscsih_qcmd(SCpnt,done); 1872 return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
1114static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, 1965static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
1115 MPT_FRAME_HDR *reply) 1966 MPT_FRAME_HDR *reply)
1116{ 1967{
1117 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; 1968 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1118 if (reply != NULL) { 1969 if (reply != NULL) {
1119 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; 1970 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
1120 memcpy(ioc->sas_mgmt.reply, reply, 1971 memcpy(ioc->sas_mgmt.reply, reply,
1121 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); 1972 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
1122 } 1973 }
1123 complete(&ioc->sas_mgmt.done); 1974
1124 return 1; 1975 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1976 ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
1977 complete(&ioc->sas_mgmt.done);
1978 return 1;
1979 }
1980 return 0;
1125} 1981}
1126 1982
1127static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) 1983static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1160 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; 2016 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
1161 req->PhyNum = phy->identify.phy_identifier; 2017 req->PhyNum = phy->identify.phy_identifier;
1162 2018
2019 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1163 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2020 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1164 2021
1165 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 2022 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1174 2031
1175 /* a reply frame is expected */ 2032 /* a reply frame is expected */
1176 if ((ioc->sas_mgmt.status & 2033 if ((ioc->sas_mgmt.status &
1177 MPT_IOCTL_STATUS_RF_VALID) == 0) { 2034 MPT_MGMT_STATUS_RF_VALID) == 0) {
1178 error = -ENXIO; 2035 error = -ENXIO;
1179 goto out_unlock; 2036 goto out_unlock;
1180 } 2037 }
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1191 error = 0; 2048 error = 0;
1192 2049
1193 out_unlock: 2050 out_unlock:
2051 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1194 mutex_unlock(&ioc->sas_mgmt.mutex); 2052 mutex_unlock(&ioc->sas_mgmt.mutex);
1195 out: 2053 out:
1196 return error; 2054 return error;
@@ -1277,8 +2135,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1277 /* do we need to support multiple segments? */ 2135 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 2136 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2137 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 2138 ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1281 rsp->bio->bi_vcnt, rsp->data_len); 2139 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1282 return -EINVAL; 2140 return -EINVAL;
1283 } 2141 }
1284 2142
@@ -1295,7 +2153,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1295 smpreq = (SmpPassthroughRequest_t *)mf; 2153 smpreq = (SmpPassthroughRequest_t *)mf;
1296 memset(smpreq, 0, sizeof(*smpreq)); 2154 memset(smpreq, 0, sizeof(*smpreq));
1297 2155
1298 smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 2156 smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 2157 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
1300 2158
1301 if (rphy) 2159 if (rphy)
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1304 struct mptsas_portinfo *port_info; 2162 struct mptsas_portinfo *port_info;
1305 2163
1306 mutex_lock(&ioc->sas_topology_mutex); 2164 mutex_lock(&ioc->sas_topology_mutex);
1307 port_info = mptsas_get_hba_portinfo(ioc); 2165 port_info = ioc->hba_port_info;
1308 if (port_info && port_info->phy_info) 2166 if (port_info && port_info->phy_info)
1309 sas_address = 2167 sas_address =
1310 port_info->phy_info[0].phy->identify.sas_address; 2168 port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1319 /* request */ 2177 /* request */
1320 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2178 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 2179 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 2180 MPI_SGE_FLAGS_DIRECTION)
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 2181 << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (req->data_len - 4); 2182 flagsLength |= (blk_rq_bytes(req) - 4);
1325 2183
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 2184 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 req->data_len, PCI_DMA_BIDIRECTIONAL); 2185 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 2186 if (!dma_addr_out)
1329 goto put_mf; 2187 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 2188 ioc->add_sge(psge, flagsLength, dma_addr_out);
1331 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2189 psge += ioc->SGE_size;
1332 2190
1333 /* response */ 2191 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 2192 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1335 flagsLength |= rsp->data_len + 4; 2193 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2194 MPI_SGE_FLAGS_IOC_TO_HOST |
2195 MPI_SGE_FLAGS_END_OF_BUFFER;
2196
2197 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
2198 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 2199 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 2200 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 2201 if (!dma_addr_in)
1339 goto unmap; 2202 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 2203 ioc->add_sge(psge, flagsLength, dma_addr_in);
1341 2204
2205 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1342 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2206 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1343 2207
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 2208 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,30 +2215,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1351 } 2215 }
1352 mf = NULL; 2216 mf = NULL;
1353 2217
1354 if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { 2218 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
1355 SmpPassthroughReply_t *smprep; 2219 SmpPassthroughReply_t *smprep;
1356 2220
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 2221 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
1358 memcpy(req->sense, smprep, sizeof(*smprep)); 2222 memcpy(req->sense, smprep, sizeof(*smprep));
1359 req->sense_len = sizeof(*smprep); 2223 req->sense_len = sizeof(*smprep);
1360 req->data_len = 0; 2224 req->resid_len = 0;
1361 rsp->data_len -= smprep->ResponseDataLength; 2225 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 2226 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 2227 printk(MYIOC_s_ERR_FMT
2228 "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 2229 ioc->name, __func__);
1365 ret = -ENXIO; 2230 ret = -ENXIO;
1366 } 2231 }
1367unmap: 2232unmap:
1368 if (dma_addr_out) 2233 if (dma_addr_out)
1369 pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 2234 pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
1370 PCI_DMA_BIDIRECTIONAL); 2235 PCI_DMA_BIDIRECTIONAL);
1371 if (dma_addr_in) 2236 if (dma_addr_in)
1372 pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 2237 pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
1373 PCI_DMA_BIDIRECTIONAL); 2238 PCI_DMA_BIDIRECTIONAL);
1374put_mf: 2239put_mf:
1375 if (mf) 2240 if (mf)
1376 mpt_free_msg_frame(ioc, mf); 2241 mpt_free_msg_frame(ioc, mf);
1377out_unlock: 2242out_unlock:
2243 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1378 mutex_unlock(&ioc->sas_mgmt.mutex); 2244 mutex_unlock(&ioc->sas_mgmt.mutex);
1379out: 2245out:
1380 return ret; 2246 return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1438 2304
1439 port_info->num_phys = buffer->NumPhys; 2305 port_info->num_phys = buffer->NumPhys;
1440 port_info->phy_info = kcalloc(port_info->num_phys, 2306 port_info->phy_info = kcalloc(port_info->num_phys,
1441 sizeof(*port_info->phy_info),GFP_KERNEL); 2307 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1442 if (!port_info->phy_info) { 2308 if (!port_info->phy_info) {
1443 error = -ENOMEM; 2309 error = -ENOMEM;
1444 goto out_free_consistent; 2310 goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1600 __le64 sas_address; 2466 __le64 sas_address;
1601 int error=0; 2467 int error=0;
1602 2468
1603 if (ioc->sas_discovery_runtime &&
1604 mptsas_is_end_device(device_info))
1605 goto out;
1606
1607 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 2469 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
1608 hdr.ExtPageLength = 0; 2470 hdr.ExtPageLength = 0;
1609 hdr.PageNumber = 0; 2471 hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1644 2506
1645 mptsas_print_device_pg0(ioc, buffer); 2507 mptsas_print_device_pg0(ioc, buffer);
1646 2508
2509 memset(device_info, 0, sizeof(struct mptsas_devinfo));
1647 device_info->handle = le16_to_cpu(buffer->DevHandle); 2510 device_info->handle = le16_to_cpu(buffer->DevHandle);
1648 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); 2511 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
1649 device_info->handle_enclosure = 2512 device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1675 SasExpanderPage0_t *buffer; 2538 SasExpanderPage0_t *buffer;
1676 dma_addr_t dma_handle; 2539 dma_addr_t dma_handle;
1677 int i, error; 2540 int i, error;
2541 __le64 sas_address;
1678 2542
2543 memset(port_info, 0, sizeof(struct mptsas_portinfo));
1679 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 2544 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1680 hdr.ExtPageLength = 0; 2545 hdr.ExtPageLength = 0;
1681 hdr.PageNumber = 0; 2546 hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1721 } 2586 }
1722 2587
1723 /* save config data */ 2588 /* save config data */
1724 port_info->num_phys = buffer->NumPhys; 2589 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
1725 port_info->phy_info = kcalloc(port_info->num_phys, 2590 port_info->phy_info = kcalloc(port_info->num_phys,
1726 sizeof(*port_info->phy_info),GFP_KERNEL); 2591 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1727 if (!port_info->phy_info) { 2592 if (!port_info->phy_info) {
1728 error = -ENOMEM; 2593 error = -ENOMEM;
1729 goto out_free_consistent; 2594 goto out_free_consistent;
1730 } 2595 }
1731 2596
2597 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
1732 for (i = 0; i < port_info->num_phys; i++) { 2598 for (i = 0; i < port_info->num_phys; i++) {
1733 port_info->phy_info[i].portinfo = port_info; 2599 port_info->phy_info[i].portinfo = port_info;
1734 port_info->phy_info[i].handle = 2600 port_info->phy_info[i].handle =
1735 le16_to_cpu(buffer->DevHandle); 2601 le16_to_cpu(buffer->DevHandle);
2602 port_info->phy_info[i].identify.sas_address =
2603 le64_to_cpu(sas_address);
2604 port_info->phy_info[i].identify.handle_parent =
2605 le16_to_cpu(buffer->ParentDevHandle);
1736 } 2606 }
1737 2607
1738 out_free_consistent: 2608 out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1752 dma_addr_t dma_handle; 2622 dma_addr_t dma_handle;
1753 int error=0; 2623 int error=0;
1754 2624
1755 if (ioc->sas_discovery_runtime && 2625 hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
1756 mptsas_is_end_device(&phy_info->attached))
1757 goto out;
1758
1759 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1760 hdr.ExtPageLength = 0; 2626 hdr.ExtPageLength = 0;
1761 hdr.PageNumber = 1; 2627 hdr.PageNumber = 1;
1762 hdr.Reserved1 = 0; 2628 hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1791 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2657 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
1792 2658
1793 error = mpt_config(ioc, &cfg); 2659 error = mpt_config(ioc, &cfg);
2660
2661 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2662 error = -ENODEV;
2663 goto out;
2664 }
2665
1794 if (error) 2666 if (error)
1795 goto out_free_consistent; 2667 goto out_free_consistent;
1796 2668
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
2010 goto out; 2882 goto out;
2011 } 2883 }
2012 mptsas_set_port(ioc, phy_info, port); 2884 mptsas_set_port(ioc, phy_info, port);
2013 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2885 devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
2014 "sas_port_alloc: port=%p dev=%p port_id=%d\n", 2886 MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
2015 ioc->name, port, dev, port->port_identifier)); 2887 ioc->name, port->port_identifier,
2888 (unsigned long long)phy_info->
2889 attached.sas_address));
2016 } 2890 }
2017 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", 2891 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2018 ioc->name, phy_info->phy_id)); 2892 "sas_port_add_phy: phy_id=%d\n",
2893 ioc->name, phy_info->phy_id));
2019 sas_port_add_phy(port, phy_info->phy); 2894 sas_port_add_phy(port, phy_info->phy);
2020 phy_info->sas_port_add_phy = 0; 2895 phy_info->sas_port_add_phy = 0;
2896 devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
2897 MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
2898 phy_info->phy_id, phy_info->phy));
2021 } 2899 }
2022
2023 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { 2900 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
2024 2901
2025 struct sas_rphy *rphy; 2902 struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
2032 * the adding/removing of devices that occur 2909 * the adding/removing of devices that occur
2033 * after start of day. 2910 * after start of day.
2034 */ 2911 */
2035 if (ioc->sas_discovery_runtime && 2912 if (mptsas_is_end_device(&phy_info->attached) &&
2036 mptsas_is_end_device(&phy_info->attached)) 2913 phy_info->attached.handle_parent) {
2037 goto out; 2914 goto out;
2915 }
2038 2916
2039 mptsas_parse_device_info(&identify, &phy_info->attached); 2917 mptsas_parse_device_info(&identify, &phy_info->attached);
2040 if (scsi_is_host_device(parent)) { 2918 if (scsi_is_host_device(parent)) {
2041 struct mptsas_portinfo *port_info; 2919 struct mptsas_portinfo *port_info;
2042 int i; 2920 int i;
2043 2921
2044 mutex_lock(&ioc->sas_topology_mutex); 2922 port_info = ioc->hba_port_info;
2045 port_info = mptsas_get_hba_portinfo(ioc);
2046 mutex_unlock(&ioc->sas_topology_mutex);
2047 2923
2048 for (i = 0; i < port_info->num_phys; i++) 2924 for (i = 0; i < port_info->num_phys; i++)
2049 if (port_info->phy_info[i].identify.sas_address == 2925 if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2102 struct mptsas_portinfo *port_info, *hba; 2978 struct mptsas_portinfo *port_info, *hba;
2103 int error = -ENOMEM, i; 2979 int error = -ENOMEM, i;
2104 2980
2105 hba = kzalloc(sizeof(*port_info), GFP_KERNEL); 2981 hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2106 if (! hba) 2982 if (! hba)
2107 goto out; 2983 goto out;
2108 2984
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2112 2988
2113 mptsas_sas_io_unit_pg1(ioc); 2989 mptsas_sas_io_unit_pg1(ioc);
2114 mutex_lock(&ioc->sas_topology_mutex); 2990 mutex_lock(&ioc->sas_topology_mutex);
2115 port_info = mptsas_get_hba_portinfo(ioc); 2991 port_info = ioc->hba_port_info;
2116 if (!port_info) { 2992 if (!port_info) {
2117 port_info = hba; 2993 ioc->hba_port_info = port_info = hba;
2994 ioc->hba_port_num_phy = port_info->num_phys;
2118 list_add_tail(&port_info->list, &ioc->sas_topology); 2995 list_add_tail(&port_info->list, &ioc->sas_topology);
2119 } else { 2996 } else {
2120 for (i = 0; i < hba->num_phys; i++) { 2997 for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2130 hba = NULL; 3007 hba = NULL;
2131 } 3008 }
2132 mutex_unlock(&ioc->sas_topology_mutex); 3009 mutex_unlock(&ioc->sas_topology_mutex);
3010#if defined(CPQ_CIM)
3011 ioc->num_ports = port_info->num_phys;
3012#endif
2133 for (i = 0; i < port_info->num_phys; i++) { 3013 for (i = 0; i < port_info->num_phys; i++) {
2134 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 3014 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
2135 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 3015 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
2136 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 3016 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
2137 3017 port_info->phy_info[i].identify.handle =
3018 port_info->phy_info[i].handle;
2138 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, 3019 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
2139 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3020 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2140 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3021 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2141 port_info->phy_info[i].handle); 3022 port_info->phy_info[i].identify.handle);
3023 if (!ioc->hba_port_sas_addr)
3024 ioc->hba_port_sas_addr =
3025 port_info->phy_info[i].identify.sas_address;
2142 port_info->phy_info[i].identify.phy_id = 3026 port_info->phy_info[i].identify.phy_id =
2143 port_info->phy_info[i].phy_id = i; 3027 port_info->phy_info[i].phy_id = i;
2144 if (port_info->phy_info[i].attached.handle) 3028 if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2163 return error; 3047 return error;
2164} 3048}
2165 3049
2166static int 3050static void
2167mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) 3051mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
2168{ 3052{
2169 struct mptsas_portinfo *port_info, *p, *ex; 3053 struct mptsas_portinfo *parent;
2170 struct device *parent; 3054 struct device *parent_dev;
2171 struct sas_rphy *rphy; 3055 struct sas_rphy *rphy;
2172 int error = -ENOMEM, i, j; 3056 int i;
2173 3057 u64 sas_address; /* expander sas address */
2174 ex = kzalloc(sizeof(*port_info), GFP_KERNEL); 3058 u32 handle;
2175 if (!ex) 3059
2176 goto out; 3060 handle = port_info->phy_info[0].handle;
2177 3061 sas_address = port_info->phy_info[0].identify.sas_address;
2178 error = mptsas_sas_expander_pg0(ioc, ex,
2179 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
2180 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
2181 if (error)
2182 goto out_free_port_info;
2183
2184 *handle = ex->phy_info[0].handle;
2185
2186 mutex_lock(&ioc->sas_topology_mutex);
2187 port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
2188 if (!port_info) {
2189 port_info = ex;
2190 list_add_tail(&port_info->list, &ioc->sas_topology);
2191 } else {
2192 for (i = 0; i < ex->num_phys; i++) {
2193 port_info->phy_info[i].handle =
2194 ex->phy_info[i].handle;
2195 port_info->phy_info[i].port_id =
2196 ex->phy_info[i].port_id;
2197 }
2198 kfree(ex->phy_info);
2199 kfree(ex);
2200 ex = NULL;
2201 }
2202 mutex_unlock(&ioc->sas_topology_mutex);
2203
2204 for (i = 0; i < port_info->num_phys; i++) { 3062 for (i = 0; i < port_info->num_phys; i++) {
2205 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], 3063 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
2206 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << 3064 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
2207 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); 3065 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
2208 3066
2209 if (port_info->phy_info[i].identify.handle) { 3067 mptsas_sas_device_pg0(ioc,
2210 mptsas_sas_device_pg0(ioc, 3068 &port_info->phy_info[i].identify,
2211 &port_info->phy_info[i].identify, 3069 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2212 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3070 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2213 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3071 port_info->phy_info[i].identify.handle);
2214 port_info->phy_info[i].identify.handle); 3072 port_info->phy_info[i].identify.phy_id =
2215 port_info->phy_info[i].identify.phy_id = 3073 port_info->phy_info[i].phy_id;
2216 port_info->phy_info[i].phy_id;
2217 }
2218 3074
2219 if (port_info->phy_info[i].attached.handle) { 3075 if (port_info->phy_info[i].attached.handle) {
2220 mptsas_sas_device_pg0(ioc, 3076 mptsas_sas_device_pg0(ioc,
2221 &port_info->phy_info[i].attached, 3077 &port_info->phy_info[i].attached,
2222 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3078 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2223 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3079 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2224 port_info->phy_info[i].attached.handle); 3080 port_info->phy_info[i].attached.handle);
2225 port_info->phy_info[i].attached.phy_id = 3081 port_info->phy_info[i].attached.phy_id =
2226 port_info->phy_info[i].phy_id; 3082 port_info->phy_info[i].phy_id;
2227 } 3083 }
2228 } 3084 }
2229 3085
2230 parent = &ioc->sh->shost_gendev; 3086 mutex_lock(&ioc->sas_topology_mutex);
2231 for (i = 0; i < port_info->num_phys; i++) { 3087 parent = mptsas_find_portinfo_by_handle(ioc,
2232 mutex_lock(&ioc->sas_topology_mutex); 3088 port_info->phy_info[0].identify.handle_parent);
2233 list_for_each_entry(p, &ioc->sas_topology, list) { 3089 if (!parent) {
2234 for (j = 0; j < p->num_phys; j++) {
2235 if (port_info->phy_info[i].identify.handle !=
2236 p->phy_info[j].attached.handle)
2237 continue;
2238 rphy = mptsas_get_rphy(&p->phy_info[j]);
2239 parent = &rphy->dev;
2240 }
2241 }
2242 mutex_unlock(&ioc->sas_topology_mutex); 3090 mutex_unlock(&ioc->sas_topology_mutex);
3091 return;
2243 } 3092 }
3093 for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
3094 i++) {
3095 if (parent->phy_info[i].attached.sas_address == sas_address) {
3096 rphy = mptsas_get_rphy(&parent->phy_info[i]);
3097 parent_dev = &rphy->dev;
3098 }
3099 }
3100 mutex_unlock(&ioc->sas_topology_mutex);
2244 3101
2245 mptsas_setup_wide_ports(ioc, port_info); 3102 mptsas_setup_wide_ports(ioc, port_info);
2246
2247 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) 3103 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
2248 mptsas_probe_one_phy(parent, &port_info->phy_info[i], 3104 mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
2249 ioc->sas_index, 0); 3105 ioc->sas_index, 0);
3106}
2250 3107
2251 return 0; 3108static void
3109mptsas_expander_event_add(MPT_ADAPTER *ioc,
3110 MpiEventDataSasExpanderStatusChange_t *expander_data)
3111{
3112 struct mptsas_portinfo *port_info;
3113 int i;
3114 __le64 sas_address;
2252 3115
2253 out_free_port_info: 3116 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2254 if (ex) { 3117 if (!port_info)
2255 kfree(ex->phy_info); 3118 BUG();
2256 kfree(ex); 3119 port_info->num_phys = (expander_data->NumPhys) ?
3120 expander_data->NumPhys : 1;
3121 port_info->phy_info = kcalloc(port_info->num_phys,
3122 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
3123 if (!port_info->phy_info)
3124 BUG();
3125 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3126 for (i = 0; i < port_info->num_phys; i++) {
3127 port_info->phy_info[i].portinfo = port_info;
3128 port_info->phy_info[i].handle =
3129 le16_to_cpu(expander_data->DevHandle);
3130 port_info->phy_info[i].identify.sas_address =
3131 le64_to_cpu(sas_address);
3132 port_info->phy_info[i].identify.handle_parent =
3133 le16_to_cpu(expander_data->ParentDevHandle);
3134 }
3135
3136 mutex_lock(&ioc->sas_topology_mutex);
3137 list_add_tail(&port_info->list, &ioc->sas_topology);
3138 mutex_unlock(&ioc->sas_topology_mutex);
3139
3140 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3141 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3142 (unsigned long long)sas_address);
3143
3144 mptsas_expander_refresh(ioc, port_info);
3145}
3146
3147/**
3148 * mptsas_delete_expander_siblings - remove siblings attached to expander
3149 * @ioc: Pointer to MPT_ADAPTER structure
3150 * @parent: the parent port_info object
3151 * @expander: the expander port_info object
3152 **/
3153static void
3154mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
3155 *parent, struct mptsas_portinfo *expander)
3156{
3157 struct mptsas_phyinfo *phy_info;
3158 struct mptsas_portinfo *port_info;
3159 struct sas_rphy *rphy;
3160 int i;
3161
3162 phy_info = expander->phy_info;
3163 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3164 rphy = mptsas_get_rphy(phy_info);
3165 if (!rphy)
3166 continue;
3167 if (rphy->identify.device_type == SAS_END_DEVICE)
3168 mptsas_del_end_device(ioc, phy_info);
3169 }
3170
3171 phy_info = expander->phy_info;
3172 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3173 rphy = mptsas_get_rphy(phy_info);
3174 if (!rphy)
3175 continue;
3176 if (rphy->identify.device_type ==
3177 MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
3178 rphy->identify.device_type ==
3179 MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
3180 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3181 rphy->identify.sas_address);
3182 if (!port_info)
3183 continue;
3184 if (port_info == parent) /* backlink rphy */
3185 continue;
3186 /*
3187 Delete this expander even if the expdevpage is exists
3188 because the parent expander is already deleted
3189 */
3190 mptsas_expander_delete(ioc, port_info, 1);
3191 }
3192 }
3193}
3194
3195
3196/**
3197 * mptsas_expander_delete - remove this expander
3198 * @ioc: Pointer to MPT_ADAPTER structure
3199 * @port_info: expander port_info struct
3200 * @force: Flag to forcefully delete the expander
3201 *
3202 **/
3203
3204static void mptsas_expander_delete(MPT_ADAPTER *ioc,
3205 struct mptsas_portinfo *port_info, u8 force)
3206{
3207
3208 struct mptsas_portinfo *parent;
3209 int i;
3210 u64 expander_sas_address;
3211 struct mptsas_phyinfo *phy_info;
3212 struct mptsas_portinfo buffer;
3213 struct mptsas_portinfo_details *port_details;
3214 struct sas_port *port;
3215
3216 if (!port_info)
3217 return;
3218
3219 /* see if expander is still there before deleting */
3220 mptsas_sas_expander_pg0(ioc, &buffer,
3221 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3222 MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
3223 port_info->phy_info[0].identify.handle);
3224
3225 if (buffer.num_phys) {
3226 kfree(buffer.phy_info);
3227 if (!force)
3228 return;
3229 }
3230
3231
3232 /*
3233 * Obtain the port_info instance to the parent port
3234 */
3235 port_details = NULL;
3236 expander_sas_address =
3237 port_info->phy_info[0].identify.sas_address;
3238 parent = mptsas_find_portinfo_by_handle(ioc,
3239 port_info->phy_info[0].identify.handle_parent);
3240 mptsas_delete_expander_siblings(ioc, parent, port_info);
3241 if (!parent)
3242 goto out;
3243
3244 /*
3245 * Delete rphys in the parent that point
3246 * to this expander.
3247 */
3248 phy_info = parent->phy_info;
3249 port = NULL;
3250 for (i = 0; i < parent->num_phys; i++, phy_info++) {
3251 if (!phy_info->phy)
3252 continue;
3253 if (phy_info->attached.sas_address !=
3254 expander_sas_address)
3255 continue;
3256 if (!port) {
3257 port = mptsas_get_port(phy_info);
3258 port_details = phy_info->port_details;
3259 }
3260 dev_printk(KERN_DEBUG, &phy_info->phy->dev,
3261 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
3262 phy_info->phy_id, phy_info->phy);
3263 sas_port_delete_phy(port, phy_info->phy);
3264 }
3265 if (port) {
3266 dev_printk(KERN_DEBUG, &port->dev,
3267 MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
3268 ioc->name, port->port_identifier,
3269 (unsigned long long)expander_sas_address);
3270 sas_port_delete(port);
3271 mptsas_port_delete(ioc, port_details);
2257 } 3272 }
2258 out: 3273 out:
2259 return error; 3274
3275 printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
3276 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3277 (unsigned long long)expander_sas_address);
3278
3279 /*
3280 * free link
3281 */
3282 list_del(&port_info->list);
3283 kfree(port_info->phy_info);
3284 kfree(port_info);
2260} 3285}
2261 3286
2262/* 3287
2263 * mptsas_delete_expander_phys 3288/**
3289 * mptsas_send_expander_event - expanders events
3290 * @ioc: Pointer to MPT_ADAPTER structure
3291 * @expander_data: event data
2264 * 3292 *
2265 * 3293 *
2266 * This will traverse topology, and remove expanders 3294 * This function handles adding, removing, and refreshing
2267 * that are no longer present 3295 * device handles within the expander objects.
2268 */ 3296 */
2269static void 3297static void
2270mptsas_delete_expander_phys(MPT_ADAPTER *ioc) 3298mptsas_send_expander_event(struct fw_event_work *fw_event)
2271{ 3299{
2272 struct mptsas_portinfo buffer; 3300 MPT_ADAPTER *ioc;
2273 struct mptsas_portinfo *port_info, *n, *parent; 3301 MpiEventDataSasExpanderStatusChange_t *expander_data;
2274 struct mptsas_phyinfo *phy_info; 3302 struct mptsas_portinfo *port_info;
2275 struct sas_port * port; 3303 __le64 sas_address;
2276 int i; 3304 int i;
2277 u64 expander_sas_address;
2278 3305
3306 ioc = fw_event->ioc;
3307 expander_data = (MpiEventDataSasExpanderStatusChange_t *)
3308 fw_event->event_data;
3309 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3310 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3311
3312 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
3313 if (port_info) {
3314 for (i = 0; i < port_info->num_phys; i++) {
3315 port_info->phy_info[i].portinfo = port_info;
3316 port_info->phy_info[i].handle =
3317 le16_to_cpu(expander_data->DevHandle);
3318 port_info->phy_info[i].identify.sas_address =
3319 le64_to_cpu(sas_address);
3320 port_info->phy_info[i].identify.handle_parent =
3321 le16_to_cpu(expander_data->ParentDevHandle);
3322 }
3323 mptsas_expander_refresh(ioc, port_info);
3324 } else if (!port_info && expander_data->NumPhys)
3325 mptsas_expander_event_add(ioc, expander_data);
3326 } else if (expander_data->ReasonCode ==
3327 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
3328 mptsas_expander_delete(ioc, port_info, 0);
3329
3330 mptsas_free_fw_event(ioc, fw_event);
3331}
3332
3333
3334/**
3335 * mptsas_expander_add -
3336 * @ioc: Pointer to MPT_ADAPTER structure
3337 * @handle:
3338 *
3339 */
3340struct mptsas_portinfo *
3341mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
3342{
3343 struct mptsas_portinfo buffer, *port_info;
3344 int i;
3345
3346 if ((mptsas_sas_expander_pg0(ioc, &buffer,
3347 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3348 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
3349 return NULL;
3350
3351 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
3352 if (!port_info) {
3353 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3354 "%s: exit at line=%d\n", ioc->name,
3355 __func__, __LINE__));
3356 return NULL;
3357 }
3358 port_info->num_phys = buffer.num_phys;
3359 port_info->phy_info = buffer.phy_info;
3360 for (i = 0; i < port_info->num_phys; i++)
3361 port_info->phy_info[i].portinfo = port_info;
2279 mutex_lock(&ioc->sas_topology_mutex); 3362 mutex_lock(&ioc->sas_topology_mutex);
2280 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { 3363 list_add_tail(&port_info->list, &ioc->sas_topology);
3364 mutex_unlock(&ioc->sas_topology_mutex);
3365 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3366 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3367 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3368 mptsas_expander_refresh(ioc, port_info);
3369 return port_info;
3370}
2281 3371
2282 if (!(port_info->phy_info[0].identify.device_info & 3372static void
2283 MPI_SAS_DEVICE_INFO_SMP_TARGET)) 3373mptsas_send_link_status_event(struct fw_event_work *fw_event)
3374{
3375 MPT_ADAPTER *ioc;
3376 MpiEventDataSasPhyLinkStatus_t *link_data;
3377 struct mptsas_portinfo *port_info;
3378 struct mptsas_phyinfo *phy_info = NULL;
3379 __le64 sas_address;
3380 u8 phy_num;
3381 u8 link_rate;
3382
3383 ioc = fw_event->ioc;
3384 link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
3385
3386 memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
3387 sas_address = le64_to_cpu(sas_address);
3388 link_rate = link_data->LinkRates >> 4;
3389 phy_num = link_data->PhyNum;
3390
3391 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3392 if (port_info) {
3393 phy_info = &port_info->phy_info[phy_num];
3394 if (phy_info)
3395 phy_info->negotiated_link_rate = link_rate;
3396 }
3397
3398 if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
3399 link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
3400
3401 if (!port_info) {
3402 if (ioc->old_sas_discovery_protocal) {
3403 port_info = mptsas_expander_add(ioc,
3404 le16_to_cpu(link_data->DevHandle));
3405 if (port_info)
3406 goto out;
3407 }
3408 goto out;
3409 }
3410
3411 if (port_info == ioc->hba_port_info)
3412 mptsas_probe_hba_phys(ioc);
3413 else
3414 mptsas_expander_refresh(ioc, port_info);
3415 } else if (phy_info && phy_info->phy) {
3416 if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
3417 phy_info->phy->negotiated_linkrate =
3418 SAS_PHY_DISABLED;
3419 else if (link_rate ==
3420 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
3421 phy_info->phy->negotiated_linkrate =
3422 SAS_LINK_RATE_FAILED;
3423 else
3424 phy_info->phy->negotiated_linkrate =
3425 SAS_LINK_RATE_UNKNOWN;
3426 }
3427 out:
3428 mptsas_free_fw_event(ioc, fw_event);
3429}
3430
3431static void
3432mptsas_not_responding_devices(MPT_ADAPTER *ioc)
3433{
3434 struct mptsas_portinfo buffer, *port_info;
3435 struct mptsas_device_info *sas_info;
3436 struct mptsas_devinfo sas_device;
3437 u32 handle;
3438 VirtTarget *vtarget = NULL;
3439 struct mptsas_phyinfo *phy_info;
3440 u8 found_expander;
3441 int retval, retry_count;
3442 unsigned long flags;
3443
3444 mpt_findImVolumes(ioc);
3445
3446 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3447 if (ioc->ioc_reset_in_progress) {
3448 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3449 "%s: exiting due to a parallel reset \n", ioc->name,
3450 __func__));
3451 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3452 return;
3453 }
3454 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3455
3456 /* devices, logical volumes */
3457 mutex_lock(&ioc->sas_device_info_mutex);
3458 redo_device_scan:
3459 list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
3460 if (sas_info->is_cached)
2284 continue; 3461 continue;
3462 if (!sas_info->is_logical_volume) {
3463 sas_device.handle = 0;
3464 retry_count = 0;
3465retry_page:
3466 retval = mptsas_sas_device_pg0(ioc, &sas_device,
3467 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
3468 << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3469 (sas_info->fw.channel << 8) +
3470 sas_info->fw.id);
3471
3472 if (sas_device.handle)
3473 continue;
3474 if (retval == -EBUSY) {
3475 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3476 if (ioc->ioc_reset_in_progress) {
3477 dfailprintk(ioc,
3478 printk(MYIOC_s_DEBUG_FMT
3479 "%s: exiting due to reset\n",
3480 ioc->name, __func__));
3481 spin_unlock_irqrestore
3482 (&ioc->taskmgmt_lock, flags);
3483 mutex_unlock(&ioc->
3484 sas_device_info_mutex);
3485 return;
3486 }
3487 spin_unlock_irqrestore(&ioc->taskmgmt_lock,
3488 flags);
3489 }
2285 3490
2286 if (mptsas_sas_expander_pg0(ioc, &buffer, 3491 if (retval && (retval != -ENODEV)) {
2287 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << 3492 if (retry_count < 10) {
2288 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), 3493 retry_count++;
2289 port_info->phy_info[0].handle)) { 3494 goto retry_page;
3495 } else {
3496 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3497 "%s: Config page retry exceeded retry "
3498 "count deleting device 0x%llx\n",
3499 ioc->name, __func__,
3500 sas_info->sas_address));
3501 }
3502 }
2290 3503
2291 /* 3504 /* delete device */
2292 * Obtain the port_info instance to the parent port 3505 vtarget = mptsas_find_vtarget(ioc,
2293 */ 3506 sas_info->fw.channel, sas_info->fw.id);
2294 parent = mptsas_find_portinfo_by_handle(ioc,
2295 port_info->phy_info[0].identify.handle_parent);
2296 3507
2297 if (!parent) 3508 if (vtarget)
2298 goto next_port; 3509 vtarget->deleted = 1;
2299 3510
2300 expander_sas_address = 3511 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2301 port_info->phy_info[0].identify.sas_address; 3512 sas_info->sas_address);
2302 3513
2303 /* 3514 if (phy_info) {
2304 * Delete rphys in the parent that point 3515 mptsas_del_end_device(ioc, phy_info);
2305 * to this expander. The transport layer will 3516 goto redo_device_scan;
2306 * cleanup all the children.
2307 */
2308 phy_info = parent->phy_info;
2309 for (i = 0; i < parent->num_phys; i++, phy_info++) {
2310 port = mptsas_get_port(phy_info);
2311 if (!port)
2312 continue;
2313 if (phy_info->attached.sas_address !=
2314 expander_sas_address)
2315 continue;
2316 dsaswideprintk(ioc,
2317 dev_printk(KERN_DEBUG, &port->dev,
2318 MYIOC_s_FMT "delete port (%d)\n", ioc->name,
2319 port->port_identifier));
2320 sas_port_delete(port);
2321 mptsas_port_delete(ioc, phy_info->port_details);
2322 } 3517 }
2323 next_port: 3518 } else
3519 mptsas_volume_delete(ioc, sas_info->fw.id);
3520 }
3521 mutex_lock(&ioc->sas_device_info_mutex);
2324 3522
2325 phy_info = port_info->phy_info; 3523 /* expanders */
2326 for (i = 0; i < port_info->num_phys; i++, phy_info++) 3524 mutex_lock(&ioc->sas_topology_mutex);
2327 mptsas_port_delete(ioc, phy_info->port_details); 3525 redo_expander_scan:
3526 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2328 3527
2329 list_del(&port_info->list); 3528 if (port_info->phy_info &&
2330 kfree(port_info->phy_info); 3529 (!(port_info->phy_info[0].identify.device_info &
2331 kfree(port_info); 3530 MPI_SAS_DEVICE_INFO_SMP_TARGET)))
3531 continue;
3532 found_expander = 0;
3533 handle = 0xFFFF;
3534 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3535 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3536 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
3537 !found_expander) {
3538
3539 handle = buffer.phy_info[0].handle;
3540 if (buffer.phy_info[0].identify.sas_address ==
3541 port_info->phy_info[0].identify.sas_address) {
3542 found_expander = 1;
3543 }
3544 kfree(buffer.phy_info);
3545 }
3546
3547 if (!found_expander) {
3548 mptsas_expander_delete(ioc, port_info, 0);
3549 goto redo_expander_scan;
2332 } 3550 }
2333 /*
2334 * Free this memory allocated from inside
2335 * mptsas_sas_expander_pg0
2336 */
2337 kfree(buffer.phy_info);
2338 } 3551 }
2339 mutex_unlock(&ioc->sas_topology_mutex); 3552 mutex_lock(&ioc->sas_topology_mutex);
3553}
3554
3555/**
3556 * mptsas_probe_expanders - adding expanders
3557 * @ioc: Pointer to MPT_ADAPTER structure
3558 *
3559 **/
3560static void
3561mptsas_probe_expanders(MPT_ADAPTER *ioc)
3562{
3563 struct mptsas_portinfo buffer, *port_info;
3564 u32 handle;
3565 int i;
3566
3567 handle = 0xFFFF;
3568 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3569 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3570 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
3571
3572 handle = buffer.phy_info[0].handle;
3573 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3574 buffer.phy_info[0].identify.sas_address);
3575
3576 if (port_info) {
3577 /* refreshing handles */
3578 for (i = 0; i < buffer.num_phys; i++) {
3579 port_info->phy_info[i].handle = handle;
3580 port_info->phy_info[i].identify.handle_parent =
3581 buffer.phy_info[0].identify.handle_parent;
3582 }
3583 mptsas_expander_refresh(ioc, port_info);
3584 kfree(buffer.phy_info);
3585 continue;
3586 }
3587
3588 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
3589 if (!port_info) {
3590 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3591 "%s: exit at line=%d\n", ioc->name,
3592 __func__, __LINE__));
3593 return;
3594 }
3595 port_info->num_phys = buffer.num_phys;
3596 port_info->phy_info = buffer.phy_info;
3597 for (i = 0; i < port_info->num_phys; i++)
3598 port_info->phy_info[i].portinfo = port_info;
3599 mutex_lock(&ioc->sas_topology_mutex);
3600 list_add_tail(&port_info->list, &ioc->sas_topology);
3601 mutex_unlock(&ioc->sas_topology_mutex);
3602 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3603 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3604 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3605 mptsas_expander_refresh(ioc, port_info);
3606 }
2340} 3607}
2341 3608
2342/* 3609static void
2343 * Start of day discovery 3610mptsas_probe_devices(MPT_ADAPTER *ioc)
2344 */ 3611{
3612 u16 handle;
3613 struct mptsas_devinfo sas_device;
3614 struct mptsas_phyinfo *phy_info;
3615
3616 handle = 0xFFFF;
3617 while (!(mptsas_sas_device_pg0(ioc, &sas_device,
3618 MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
3619
3620 handle = sas_device.handle;
3621
3622 if ((sas_device.device_info &
3623 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
3624 MPI_SAS_DEVICE_INFO_STP_TARGET |
3625 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
3626 continue;
3627
3628 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
3629 if (!phy_info)
3630 continue;
3631
3632 if (mptsas_get_rphy(phy_info))
3633 continue;
3634
3635 mptsas_add_end_device(ioc, phy_info);
3636 }
3637}
3638
3639/**
3640 * mptsas_scan_sas_topology -
3641 * @ioc: Pointer to MPT_ADAPTER structure
3642 * @sas_address:
3643 *
3644 **/
2345static void 3645static void
2346mptsas_scan_sas_topology(MPT_ADAPTER *ioc) 3646mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
2347{ 3647{
2348 u32 handle = 0xFFFF; 3648 struct scsi_device *sdev;
2349 int i; 3649 int i;
2350 3650
2351 mutex_lock(&ioc->sas_discovery_mutex);
2352 mptsas_probe_hba_phys(ioc); 3651 mptsas_probe_hba_phys(ioc);
2353 while (!mptsas_probe_expander_phys(ioc, &handle)) 3652 mptsas_probe_expanders(ioc);
2354 ; 3653 mptsas_probe_devices(ioc);
3654
2355 /* 3655 /*
2356 Reporting RAID volumes. 3656 Reporting RAID volumes.
2357 */ 3657 */
2358 if (!ioc->ir_firmware) 3658 if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
2359 goto out; 3659 !ioc->raid_data.pIocPg2->NumActiveVolumes)
2360 if (!ioc->raid_data.pIocPg2) 3660 return;
2361 goto out;
2362 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
2363 goto out;
2364 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 3661 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
3662 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
3663 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
3664 if (sdev) {
3665 scsi_device_put(sdev);
3666 continue;
3667 }
3668 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
3669 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
3670 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
2365 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, 3671 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
2366 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 3672 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
2367 } 3673 }
2368 out:
2369 mutex_unlock(&ioc->sas_discovery_mutex);
2370} 3674}
2371 3675
2372/* 3676
2373 * Work queue thread to handle Runtime discovery
2374 * Mere purpose is the hot add/delete of expanders
2375 *(Mutex UNLOCKED)
2376 */
2377static void 3677static void
2378__mptsas_discovery_work(MPT_ADAPTER *ioc) 3678mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
2379{ 3679{
2380 u32 handle = 0xFFFF; 3680 MPT_ADAPTER *ioc;
3681 EventDataQueueFull_t *qfull_data;
3682 struct mptsas_device_info *sas_info;
3683 struct scsi_device *sdev;
3684 int depth;
3685 int id = -1;
3686 int channel = -1;
3687 int fw_id, fw_channel;
3688 u16 current_depth;
3689
3690
3691 ioc = fw_event->ioc;
3692 qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
3693 fw_id = qfull_data->TargetID;
3694 fw_channel = qfull_data->Bus;
3695 current_depth = le16_to_cpu(qfull_data->CurrentDepth);
3696
3697 /* if hidden raid component, look for the volume id */
3698 mutex_lock(&ioc->sas_device_info_mutex);
3699 if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
3700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3701 list) {
3702 if (sas_info->is_cached ||
3703 sas_info->is_logical_volume)
3704 continue;
3705 if (sas_info->is_hidden_raid_component &&
3706 (sas_info->fw.channel == fw_channel &&
3707 sas_info->fw.id == fw_id)) {
3708 id = sas_info->volume_id;
3709 channel = MPTSAS_RAID_CHANNEL;
3710 goto out;
3711 }
3712 }
3713 } else {
3714 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3715 list) {
3716 if (sas_info->is_cached ||
3717 sas_info->is_hidden_raid_component ||
3718 sas_info->is_logical_volume)
3719 continue;
3720 if (sas_info->fw.channel == fw_channel &&
3721 sas_info->fw.id == fw_id) {
3722 id = sas_info->os.id;
3723 channel = sas_info->os.channel;
3724 goto out;
3725 }
3726 }
2381 3727
2382 ioc->sas_discovery_runtime=1; 3728 }
2383 mptsas_delete_expander_phys(ioc);
2384 mptsas_probe_hba_phys(ioc);
2385 while (!mptsas_probe_expander_phys(ioc, &handle))
2386 ;
2387 ioc->sas_discovery_runtime=0;
2388}
2389 3729
2390/* 3730 out:
2391 * Work queue thread to handle Runtime discovery 3731 mutex_unlock(&ioc->sas_device_info_mutex);
2392 * Mere purpose is the hot add/delete of expanders 3732
2393 *(Mutex LOCKED) 3733 if (id != -1) {
2394 */ 3734 shost_for_each_device(sdev, ioc->sh) {
2395static void 3735 if (sdev->id == id && sdev->channel == channel) {
2396mptsas_discovery_work(struct work_struct *work) 3736 if (current_depth > sdev->queue_depth) {
2397{ 3737 sdev_printk(KERN_INFO, sdev,
2398 struct mptsas_discovery_event *ev = 3738 "strange observation, the queue "
2399 container_of(work, struct mptsas_discovery_event, work); 3739 "depth is (%d) meanwhile fw queue "
2400 MPT_ADAPTER *ioc = ev->ioc; 3740 "depth (%d)\n", sdev->queue_depth,
3741 current_depth);
3742 continue;
3743 }
3744 depth = scsi_track_queue_full(sdev,
3745 current_depth - 1);
3746 if (depth > 0)
3747 sdev_printk(KERN_INFO, sdev,
3748 "Queue depth reduced to (%d)\n",
3749 depth);
3750 else if (depth < 0)
3751 sdev_printk(KERN_INFO, sdev,
3752 "Tagged Command Queueing is being "
3753 "disabled\n");
3754 else if (depth == 0)
3755 sdev_printk(KERN_INFO, sdev,
3756 "Queue depth not changed yet\n");
3757 }
3758 }
3759 }
2401 3760
2402 mutex_lock(&ioc->sas_discovery_mutex); 3761 mptsas_free_fw_event(ioc, fw_event);
2403 __mptsas_discovery_work(ioc);
2404 mutex_unlock(&ioc->sas_discovery_mutex);
2405 kfree(ev);
2406} 3762}
2407 3763
3764
2408static struct mptsas_phyinfo * 3765static struct mptsas_phyinfo *
2409mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) 3766mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2410{ 3767{
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2429 return phy_info; 3786 return phy_info;
2430} 3787}
2431 3788
3789/**
3790 * mptsas_find_phyinfo_by_phys_disk_num -
3791 * @ioc: Pointer to MPT_ADAPTER structure
3792 * @phys_disk_num:
3793 * @channel:
3794 * @id:
3795 *
3796 **/
2432static struct mptsas_phyinfo * 3797static struct mptsas_phyinfo *
2433mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) 3798mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
3799 u8 channel, u8 id)
2434{ 3800{
2435 struct mptsas_portinfo *port_info;
2436 struct mptsas_phyinfo *phy_info = NULL; 3801 struct mptsas_phyinfo *phy_info = NULL;
3802 struct mptsas_portinfo *port_info;
3803 RaidPhysDiskPage1_t *phys_disk = NULL;
3804 int num_paths;
3805 u64 sas_address = 0;
2437 int i; 3806 int i;
2438 3807
2439 mutex_lock(&ioc->sas_topology_mutex); 3808 phy_info = NULL;
2440 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3809 if (!ioc->raid_data.pIocPg3)
2441 for (i = 0; i < port_info->num_phys; i++) { 3810 return NULL;
2442 if (!mptsas_is_end_device( 3811 /* dual port support */
2443 &port_info->phy_info[i].attached)) 3812 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
2444 continue; 3813 if (!num_paths)
2445 if (port_info->phy_info[i].attached.id != id) 3814 goto out;
2446 continue; 3815 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2447 if (port_info->phy_info[i].attached.channel != channel) 3816 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2448 continue; 3817 if (!phys_disk)
2449 phy_info = &port_info->phy_info[i]; 3818 goto out;
2450 break; 3819 mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
3820 for (i = 0; i < num_paths; i++) {
3821 if ((phys_disk->Path[i].Flags & 1) != 0)
3822 /* entry no longer valid */
3823 continue;
3824 if ((id == phys_disk->Path[i].PhysDiskID) &&
3825 (channel == phys_disk->Path[i].PhysDiskBus)) {
3826 memcpy(&sas_address, &phys_disk->Path[i].WWID,
3827 sizeof(u64));
3828 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
3829 sas_address);
3830 goto out;
2451 } 3831 }
2452 } 3832 }
2453 mutex_unlock(&ioc->sas_topology_mutex);
2454 return phy_info;
2455}
2456 3833
2457static struct mptsas_phyinfo * 3834 out:
2458mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 3835 kfree(phys_disk);
2459{ 3836 if (phy_info)
2460 struct mptsas_portinfo *port_info; 3837 return phy_info;
2461 struct mptsas_phyinfo *phy_info = NULL;
2462 int i;
2463 3838
3839 /*
3840 * Extra code to handle RAID0 case, where the sas_address is not updated
3841 * in phys_disk_page_1 when hotswapped
3842 */
2464 mutex_lock(&ioc->sas_topology_mutex); 3843 mutex_lock(&ioc->sas_topology_mutex);
2465 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3844 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2466 for (i = 0; i < port_info->num_phys; i++) { 3845 for (i = 0; i < port_info->num_phys && !phy_info; i++) {
2467 if (!mptsas_is_end_device( 3846 if (!mptsas_is_end_device(
2468 &port_info->phy_info[i].attached)) 3847 &port_info->phy_info[i].attached))
2469 continue; 3848 continue;
2470 if (port_info->phy_info[i].attached.phys_disk_num == ~0) 3849 if (port_info->phy_info[i].attached.phys_disk_num == ~0)
2471 continue; 3850 continue;
2472 if (port_info->phy_info[i].attached.phys_disk_num != id) 3851 if ((port_info->phy_info[i].attached.phys_disk_num ==
2473 continue; 3852 phys_disk_num) &&
2474 if (port_info->phy_info[i].attached.channel != channel) 3853 (port_info->phy_info[i].attached.id == id) &&
2475 continue; 3854 (port_info->phy_info[i].attached.channel ==
2476 phy_info = &port_info->phy_info[i]; 3855 channel))
2477 break; 3856 phy_info = &port_info->phy_info[i];
2478 } 3857 }
2479 } 3858 }
2480 mutex_unlock(&ioc->sas_topology_mutex); 3859 mutex_unlock(&ioc->sas_topology_mutex);
2481 return phy_info; 3860 return phy_info;
2482} 3861}
2483 3862
2484/*
2485 * Work queue thread to clear the persitency table
2486 */
2487static void
2488mptsas_persist_clear_table(struct work_struct *work)
2489{
2490 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2491
2492 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2493}
2494
2495static void 3863static void
2496mptsas_reprobe_lun(struct scsi_device *sdev, void *data) 3864mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
2497{ 3865{
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2517 pRaidVolumePage0_t buffer = NULL; 3885 pRaidVolumePage0_t buffer = NULL;
2518 RaidPhysDiskPage0_t phys_disk; 3886 RaidPhysDiskPage0_t phys_disk;
2519 int i; 3887 int i;
2520 struct mptsas_hotplug_event *ev; 3888 struct mptsas_phyinfo *phy_info;
3889 struct mptsas_devinfo sas_device;
2521 3890
2522 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 3891 memset(&cfg, 0 , sizeof(CONFIGPARMS));
2523 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 3892 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2557 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) 3926 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
2558 continue; 3927 continue;
2559 3928
2560 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3929 if (mptsas_sas_device_pg0(ioc, &sas_device,
2561 if (!ev) { 3930 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2562 printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); 3931 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2563 goto out; 3932 (phys_disk.PhysDiskBus << 8) +
2564 } 3933 phys_disk.PhysDiskID))
3934 continue;
2565 3935
2566 INIT_WORK(&ev->work, mptsas_hotplug_work); 3936 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2567 ev->ioc = ioc; 3937 sas_device.sas_address);
2568 ev->id = phys_disk.PhysDiskID; 3938 mptsas_add_end_device(ioc, phy_info);
2569 ev->channel = phys_disk.PhysDiskBus;
2570 ev->phys_disk_num_valid = 1;
2571 ev->phys_disk_num = phys_disk.PhysDiskNum;
2572 ev->event_type = MPTSAS_ADD_DEVICE;
2573 schedule_work(&ev->work);
2574 } 3939 }
2575 3940
2576 out: 3941 out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2582 * Work queue thread to handle SAS hotplug events 3947 * Work queue thread to handle SAS hotplug events
2583 */ 3948 */
2584static void 3949static void
2585mptsas_hotplug_work(struct work_struct *work) 3950mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
3951 struct mptsas_hotplug_event *hot_plug_info)
2586{ 3952{
2587 struct mptsas_hotplug_event *ev =
2588 container_of(work, struct mptsas_hotplug_event, work);
2589
2590 MPT_ADAPTER *ioc = ev->ioc;
2591 struct mptsas_phyinfo *phy_info; 3953 struct mptsas_phyinfo *phy_info;
2592 struct sas_rphy *rphy;
2593 struct sas_port *port;
2594 struct scsi_device *sdev;
2595 struct scsi_target * starget; 3954 struct scsi_target * starget;
2596 struct sas_identify identify;
2597 char *ds = NULL;
2598 struct mptsas_devinfo sas_device; 3955 struct mptsas_devinfo sas_device;
2599 VirtTarget *vtarget; 3956 VirtTarget *vtarget;
2600 VirtDevice *vdevice; 3957 int i;
2601 3958
2602 mutex_lock(&ioc->sas_discovery_mutex); 3959 switch (hot_plug_info->event_type) {
2603 switch (ev->event_type) {
2604 case MPTSAS_DEL_DEVICE:
2605 3960
2606 phy_info = NULL; 3961 case MPTSAS_ADD_PHYSDISK:
2607 if (ev->phys_disk_num_valid) { 3962
2608 if (ev->hidden_raid_component){ 3963 if (!ioc->raid_data.pIocPg2)
2609 if (mptsas_sas_device_pg0(ioc, &sas_device, 3964 break;
2610 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 3965
2611 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3966 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
2612 (ev->channel << 8) + ev->id)) { 3967 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
2613 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 3968 hot_plug_info->id) {
2614 "%s: exit at line=%d\n", ioc->name, 3969 printk(MYIOC_s_WARN_FMT "firmware bug: unable "
2615 __func__, __LINE__)); 3970 "to add hidden disk - target_id matchs "
2616 break; 3971 "volume_id\n", ioc->name);
2617 } 3972 mptsas_free_fw_event(ioc, fw_event);
2618 phy_info = mptsas_find_phyinfo_by_sas_address( 3973 return;
2619 ioc, sas_device.sas_address); 3974 }
2620 }else
2621 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
2622 ioc, ev->channel, ev->phys_disk_num);
2623 } 3975 }
3976 mpt_findImVolumes(ioc);
2624 3977
3978 case MPTSAS_ADD_DEVICE:
3979 memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
3980 mptsas_sas_device_pg0(ioc, &sas_device,
3981 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
3982 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3983 (hot_plug_info->channel << 8) +
3984 hot_plug_info->id);
3985
3986 if (!sas_device.handle)
3987 return;
3988
3989 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
2625 if (!phy_info) 3990 if (!phy_info)
2626 phy_info = mptsas_find_phyinfo_by_target(ioc, 3991 break;
2627 ev->channel, ev->id);
2628 3992
2629 /* 3993 if (mptsas_get_rphy(phy_info))
2630 * Sanity checks, for non-existing phys and remote rphys. 3994 break;
2631 */ 3995
2632 if (!phy_info){ 3996 mptsas_add_end_device(ioc, phy_info);
3997 break;
3998
3999 case MPTSAS_DEL_DEVICE:
4000 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4001 hot_plug_info->sas_address);
4002 mptsas_del_end_device(ioc, phy_info);
4003 break;
4004
4005 case MPTSAS_DEL_PHYSDISK:
4006
4007 mpt_findImVolumes(ioc);
4008
4009 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
4010 ioc, hot_plug_info->phys_disk_num,
4011 hot_plug_info->channel,
4012 hot_plug_info->id);
4013 mptsas_del_end_device(ioc, phy_info);
4014 break;
4015
4016 case MPTSAS_ADD_PHYSDISK_REPROBE:
4017
4018 if (mptsas_sas_device_pg0(ioc, &sas_device,
4019 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
4020 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
4021 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2633 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4022 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2634 "%s: exit at line=%d\n", ioc->name, 4023 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2635 __func__, __LINE__)); 4024 __func__, hot_plug_info->id, __LINE__));
2636 break; 4025 break;
2637 } 4026 }
2638 if (!phy_info->port_details) { 4027
4028 phy_info = mptsas_find_phyinfo_by_sas_address(
4029 ioc, sas_device.sas_address);
4030
4031 if (!phy_info) {
2639 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4032 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2640 "%s: exit at line=%d\n", ioc->name, 4033 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2641 __func__, __LINE__)); 4034 __func__, hot_plug_info->id, __LINE__));
2642 break; 4035 break;
2643 } 4036 }
2644 rphy = mptsas_get_rphy(phy_info); 4037
2645 if (!rphy) { 4038 starget = mptsas_get_starget(phy_info);
4039 if (!starget) {
2646 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4040 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2647 "%s: exit at line=%d\n", ioc->name, 4041 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2648 __func__, __LINE__)); 4042 __func__, hot_plug_info->id, __LINE__));
2649 break; 4043 break;
2650 } 4044 }
2651 4045
2652 port = mptsas_get_port(phy_info); 4046 vtarget = starget->hostdata;
2653 if (!port) { 4047 if (!vtarget) {
2654 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4048 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2655 "%s: exit at line=%d\n", ioc->name, 4049 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2656 __func__, __LINE__)); 4050 __func__, hot_plug_info->id, __LINE__));
2657 break; 4051 break;
2658 } 4052 }
2659 4053
2660 starget = mptsas_get_starget(phy_info); 4054 mpt_findImVolumes(ioc);
2661 if (starget) {
2662 vtarget = starget->hostdata;
2663 4055
2664 if (!vtarget) { 4056 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
2665 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4057 "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2666 "%s: exit at line=%d\n", ioc->name, 4058 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2667 __func__, __LINE__)); 4059 hot_plug_info->phys_disk_num, (unsigned long long)
2668 break; 4060 sas_device.sas_address);
2669 }
2670 4061
2671 /* 4062 vtarget->id = hot_plug_info->phys_disk_num;
2672 * Handling RAID components 4063 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
2673 */ 4064 phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
2674 if (ev->phys_disk_num_valid && 4065 mptsas_reprobe_target(starget, 1);
2675 ev->hidden_raid_component) {
2676 printk(MYIOC_s_INFO_FMT
2677 "RAID Hidding: channel=%d, id=%d, "
2678 "physdsk %d \n", ioc->name, ev->channel,
2679 ev->id, ev->phys_disk_num);
2680 vtarget->id = ev->phys_disk_num;
2681 vtarget->tflags |=
2682 MPT_TARGET_FLAGS_RAID_COMPONENT;
2683 mptsas_reprobe_target(starget, 1);
2684 phy_info->attached.phys_disk_num =
2685 ev->phys_disk_num;
2686 break;
2687 }
2688 }
2689
2690 if (phy_info->attached.device_info &
2691 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2692 ds = "ssp";
2693 if (phy_info->attached.device_info &
2694 MPI_SAS_DEVICE_INFO_STP_TARGET)
2695 ds = "stp";
2696 if (phy_info->attached.device_info &
2697 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2698 ds = "sata";
2699
2700 printk(MYIOC_s_INFO_FMT
2701 "removing %s device, channel %d, id %d, phy %d\n",
2702 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2703 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
2704 "delete port (%d)\n", ioc->name, port->port_identifier);
2705 sas_port_delete(port);
2706 mptsas_port_delete(ioc, phy_info->port_details);
2707 break; 4066 break;
2708 case MPTSAS_ADD_DEVICE:
2709 4067
2710 if (ev->phys_disk_num_valid) 4068 case MPTSAS_DEL_PHYSDISK_REPROBE:
2711 mpt_findImVolumes(ioc);
2712 4069
2713 /*
2714 * Refresh sas device pg0 data
2715 */
2716 if (mptsas_sas_device_pg0(ioc, &sas_device, 4070 if (mptsas_sas_device_pg0(ioc, &sas_device,
2717 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 4071 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2718 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 4072 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2719 (ev->channel << 8) + ev->id)) { 4073 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2720 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4074 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2721 "%s: exit at line=%d\n", ioc->name, 4075 "%s: fw_id=%d exit at line=%d\n",
2722 __func__, __LINE__)); 4076 ioc->name, __func__,
4077 hot_plug_info->id, __LINE__));
2723 break; 4078 break;
2724 } 4079 }
2725 4080
2726 __mptsas_discovery_work(ioc);
2727
2728 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4081 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2729 sas_device.sas_address); 4082 sas_device.sas_address);
2730 4083 if (!phy_info) {
2731 if (!phy_info || !phy_info->port_details) {
2732 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4084 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2733 "%s: exit at line=%d\n", ioc->name, 4085 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2734 __func__, __LINE__)); 4086 __func__, hot_plug_info->id, __LINE__));
2735 break; 4087 break;
2736 } 4088 }
2737 4089
2738 starget = mptsas_get_starget(phy_info); 4090 starget = mptsas_get_starget(phy_info);
2739 if (starget && (!ev->hidden_raid_component)){ 4091 if (!starget) {
2740 4092 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2741 vtarget = starget->hostdata; 4093 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2742 4094 __func__, hot_plug_info->id, __LINE__));
2743 if (!vtarget) {
2744 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2745 "%s: exit at line=%d\n", ioc->name,
2746 __func__, __LINE__));
2747 break;
2748 }
2749 /*
2750 * Handling RAID components
2751 */
2752 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2753 printk(MYIOC_s_INFO_FMT
2754 "RAID Exposing: channel=%d, id=%d, "
2755 "physdsk %d \n", ioc->name, ev->channel,
2756 ev->id, ev->phys_disk_num);
2757 vtarget->tflags &=
2758 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2759 vtarget->id = ev->id;
2760 mptsas_reprobe_target(starget, 0);
2761 phy_info->attached.phys_disk_num = ~0;
2762 }
2763 break; 4095 break;
2764 } 4096 }
2765 4097
2766 if (mptsas_get_rphy(phy_info)) { 4098 vtarget = starget->hostdata;
4099 if (!vtarget) {
2767 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4100 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2768 "%s: exit at line=%d\n", ioc->name, 4101 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2769 __func__, __LINE__)); 4102 __func__, hot_plug_info->id, __LINE__));
2770 if (ev->channel) printk("%d\n", __LINE__);
2771 break; 4103 break;
2772 } 4104 }
2773 4105
2774 port = mptsas_get_port(phy_info); 4106 if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
2775 if (!port) {
2776 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4107 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2777 "%s: exit at line=%d\n", ioc->name, 4108 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2778 __func__, __LINE__)); 4109 __func__, hot_plug_info->id, __LINE__));
2779 break; 4110 break;
2780 } 4111 }
2781 memcpy(&phy_info->attached, &sas_device,
2782 sizeof(struct mptsas_devinfo));
2783
2784 if (phy_info->attached.device_info &
2785 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2786 ds = "ssp";
2787 if (phy_info->attached.device_info &
2788 MPI_SAS_DEVICE_INFO_STP_TARGET)
2789 ds = "stp";
2790 if (phy_info->attached.device_info &
2791 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2792 ds = "sata";
2793
2794 printk(MYIOC_s_INFO_FMT
2795 "attaching %s device, channel %d, id %d, phy %d\n",
2796 ioc->name, ds, ev->channel, ev->id, ev->phy_id);
2797 4112
2798 mptsas_parse_device_info(&identify, &phy_info->attached); 4113 mpt_findImVolumes(ioc);
2799 rphy = sas_end_device_alloc(port);
2800 if (!rphy) {
2801 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2802 "%s: exit at line=%d\n", ioc->name,
2803 __func__, __LINE__));
2804 break; /* non-fatal: an rphy can be added later */
2805 }
2806 4114
2807 rphy->identify = identify; 4115 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
2808 if (sas_rphy_add(rphy)) { 4116 " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2809 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4117 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2810 "%s: exit at line=%d\n", ioc->name, 4118 hot_plug_info->phys_disk_num, (unsigned long long)
2811 __func__, __LINE__)); 4119 sas_device.sas_address);
2812 sas_rphy_free(rphy); 4120
2813 break; 4121 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2814 } 4122 vtarget->id = hot_plug_info->id;
2815 mptsas_set_rphy(ioc, phy_info, rphy); 4123 phy_info->attached.phys_disk_num = ~0;
4124 mptsas_reprobe_target(starget, 0);
4125 mptsas_add_device_component_by_fw(ioc,
4126 hot_plug_info->channel, hot_plug_info->id);
2816 break; 4127 break;
4128
2817 case MPTSAS_ADD_RAID: 4129 case MPTSAS_ADD_RAID:
2818 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4130
2819 ev->id, 0);
2820 if (sdev) {
2821 scsi_device_put(sdev);
2822 break;
2823 }
2824 printk(MYIOC_s_INFO_FMT
2825 "attaching raid volume, channel %d, id %d\n",
2826 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2827 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2828 mpt_findImVolumes(ioc); 4131 mpt_findImVolumes(ioc);
4132 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
4133 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4134 hot_plug_info->id);
4135 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
4136 hot_plug_info->id, 0);
2829 break; 4137 break;
4138
2830 case MPTSAS_DEL_RAID: 4139 case MPTSAS_DEL_RAID:
2831 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4140
2832 ev->id, 0);
2833 if (!sdev)
2834 break;
2835 printk(MYIOC_s_INFO_FMT
2836 "removing raid volume, channel %d, id %d\n",
2837 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2838 vdevice = sdev->hostdata;
2839 scsi_remove_device(sdev);
2840 scsi_device_put(sdev);
2841 mpt_findImVolumes(ioc); 4141 mpt_findImVolumes(ioc);
4142 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4143 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4144 hot_plug_info->id);
4145 scsi_remove_device(hot_plug_info->sdev);
4146 scsi_device_put(hot_plug_info->sdev);
2842 break; 4147 break;
4148
2843 case MPTSAS_ADD_INACTIVE_VOLUME: 4149 case MPTSAS_ADD_INACTIVE_VOLUME:
4150
4151 mpt_findImVolumes(ioc);
2844 mptsas_adding_inactive_raid_components(ioc, 4152 mptsas_adding_inactive_raid_components(ioc,
2845 ev->channel, ev->id); 4153 hot_plug_info->channel, hot_plug_info->id);
2846 break; 4154 break;
2847 case MPTSAS_IGNORE_EVENT: 4155
2848 default: 4156 default:
2849 break; 4157 break;
2850 } 4158 }
2851 4159
2852 mutex_unlock(&ioc->sas_discovery_mutex); 4160 mptsas_free_fw_event(ioc, fw_event);
2853 kfree(ev);
2854} 4161}
2855 4162
2856static void 4163static void
2857mptsas_send_sas_event(MPT_ADAPTER *ioc, 4164mptsas_send_sas_event(struct fw_event_work *fw_event)
2858 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
2859{ 4165{
2860 struct mptsas_hotplug_event *ev; 4166 MPT_ADAPTER *ioc;
2861 u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); 4167 struct mptsas_hotplug_event hot_plug_info;
2862 __le64 sas_address; 4168 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
4169 u32 device_info;
4170 u64 sas_address;
4171
4172 ioc = fw_event->ioc;
4173 sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
4174 fw_event->event_data;
4175 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
2863 4176
2864 if ((device_info & 4177 if ((device_info &
2865 (MPI_SAS_DEVICE_INFO_SSP_TARGET | 4178 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
2866 MPI_SAS_DEVICE_INFO_STP_TARGET | 4179 MPI_SAS_DEVICE_INFO_STP_TARGET |
2867 MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) 4180 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
4181 mptsas_free_fw_event(ioc, fw_event);
4182 return;
4183 }
4184
4185 if (sas_event_data->ReasonCode ==
4186 MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
4187 mptbase_sas_persist_operation(ioc,
4188 MPI_SAS_OP_CLEAR_NOT_PRESENT);
4189 mptsas_free_fw_event(ioc, fw_event);
2868 return; 4190 return;
4191 }
2869 4192
2870 switch (sas_event_data->ReasonCode) { 4193 switch (sas_event_data->ReasonCode) {
2871 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 4194 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2872
2873 mptsas_target_reset_queue(ioc, sas_event_data);
2874 break;
2875
2876 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 4195 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2877 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4196 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
2878 if (!ev) { 4197 hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
2879 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4198 hot_plug_info.channel = sas_event_data->Bus;
2880 break; 4199 hot_plug_info.id = sas_event_data->TargetID;
2881 } 4200 hot_plug_info.phy_id = sas_event_data->PhyNum;
2882
2883 INIT_WORK(&ev->work, mptsas_hotplug_work);
2884 ev->ioc = ioc;
2885 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2886 ev->parent_handle =
2887 le16_to_cpu(sas_event_data->ParentDevHandle);
2888 ev->channel = sas_event_data->Bus;
2889 ev->id = sas_event_data->TargetID;
2890 ev->phy_id = sas_event_data->PhyNum;
2891 memcpy(&sas_address, &sas_event_data->SASAddress, 4201 memcpy(&sas_address, &sas_event_data->SASAddress,
2892 sizeof(__le64)); 4202 sizeof(u64));
2893 ev->sas_address = le64_to_cpu(sas_address); 4203 hot_plug_info.sas_address = le64_to_cpu(sas_address);
2894 ev->device_info = device_info; 4204 hot_plug_info.device_info = device_info;
2895
2896 if (sas_event_data->ReasonCode & 4205 if (sas_event_data->ReasonCode &
2897 MPI_EVENT_SAS_DEV_STAT_RC_ADDED) 4206 MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
2898 ev->event_type = MPTSAS_ADD_DEVICE; 4207 hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
2899 else 4208 else
2900 ev->event_type = MPTSAS_DEL_DEVICE; 4209 hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
2901 schedule_work(&ev->work); 4210 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
2902 break; 4211 break;
4212
2903 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 4213 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
2904 /* 4214 mptbase_sas_persist_operation(ioc,
2905 * Persistent table is full. 4215 MPI_SAS_OP_CLEAR_NOT_PRESENT);
2906 */ 4216 mptsas_free_fw_event(ioc, fw_event);
2907 INIT_WORK(&ioc->sas_persist_task,
2908 mptsas_persist_clear_table);
2909 schedule_work(&ioc->sas_persist_task);
2910 break; 4217 break;
2911 /* 4218
2912 * TODO, handle other events
2913 */
2914 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 4219 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
2915 case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 4220 /* TODO */
2916 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 4221 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2917 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 4222 /* TODO */
2918 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
2919 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
2920 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
2921 default: 4223 default:
4224 mptsas_free_fw_event(ioc, fw_event);
2922 break; 4225 break;
2923 } 4226 }
2924} 4227}
4228
2925static void 4229static void
2926mptsas_send_raid_event(MPT_ADAPTER *ioc, 4230mptsas_send_raid_event(struct fw_event_work *fw_event)
2927 EVENT_DATA_RAID *raid_event_data)
2928{ 4231{
2929 struct mptsas_hotplug_event *ev; 4232 MPT_ADAPTER *ioc;
2930 int status = le32_to_cpu(raid_event_data->SettingsStatus); 4233 EVENT_DATA_RAID *raid_event_data;
2931 int state = (status >> 8) & 0xff; 4234 struct mptsas_hotplug_event hot_plug_info;
2932 4235 int status;
2933 if (ioc->bus_type != SAS) 4236 int state;
2934 return; 4237 struct scsi_device *sdev = NULL;
2935 4238 VirtDevice *vdevice = NULL;
2936 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4239 RaidPhysDiskPage0_t phys_disk;
2937 if (!ev) { 4240
2938 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4241 ioc = fw_event->ioc;
2939 return; 4242 raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
4243 status = le32_to_cpu(raid_event_data->SettingsStatus);
4244 state = (status >> 8) & 0xff;
4245
4246 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4247 hot_plug_info.id = raid_event_data->VolumeID;
4248 hot_plug_info.channel = raid_event_data->VolumeBus;
4249 hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
4250
4251 if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
4252 raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
4253 raid_event_data->ReasonCode ==
4254 MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
4255 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
4256 hot_plug_info.id, 0);
4257 hot_plug_info.sdev = sdev;
4258 if (sdev)
4259 vdevice = sdev->hostdata;
2940 } 4260 }
2941 4261
2942 INIT_WORK(&ev->work, mptsas_hotplug_work); 4262 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
2943 ev->ioc = ioc; 4263 "ReasonCode=%02x\n", ioc->name, __func__,
2944 ev->id = raid_event_data->VolumeID; 4264 raid_event_data->ReasonCode));
2945 ev->channel = raid_event_data->VolumeBus;
2946 ev->event_type = MPTSAS_IGNORE_EVENT;
2947 4265
2948 switch (raid_event_data->ReasonCode) { 4266 switch (raid_event_data->ReasonCode) {
2949 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 4267 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
2950 ev->phys_disk_num_valid = 1; 4268 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
2951 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2952 ev->event_type = MPTSAS_ADD_DEVICE;
2953 break; 4269 break;
2954 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: 4270 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
2955 ev->phys_disk_num_valid = 1; 4271 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
2956 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2957 ev->hidden_raid_component = 1;
2958 ev->event_type = MPTSAS_DEL_DEVICE;
2959 break; 4272 break;
2960 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: 4273 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
2961 switch (state) { 4274 switch (state) {
2962 case MPI_PD_STATE_ONLINE: 4275 case MPI_PD_STATE_ONLINE:
2963 case MPI_PD_STATE_NOT_COMPATIBLE: 4276 case MPI_PD_STATE_NOT_COMPATIBLE:
2964 ev->phys_disk_num_valid = 1; 4277 mpt_raid_phys_disk_pg0(ioc,
2965 ev->phys_disk_num = raid_event_data->PhysDiskNum; 4278 raid_event_data->PhysDiskNum, &phys_disk);
2966 ev->hidden_raid_component = 1; 4279 hot_plug_info.id = phys_disk.PhysDiskID;
2967 ev->event_type = MPTSAS_ADD_DEVICE; 4280 hot_plug_info.channel = phys_disk.PhysDiskBus;
4281 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
2968 break; 4282 break;
4283 case MPI_PD_STATE_FAILED:
2969 case MPI_PD_STATE_MISSING: 4284 case MPI_PD_STATE_MISSING:
2970 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: 4285 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
2971 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: 4286 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
2972 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: 4287 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
2973 ev->phys_disk_num_valid = 1; 4288 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
2974 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2975 ev->event_type = MPTSAS_DEL_DEVICE;
2976 break; 4289 break;
2977 default: 4290 default:
2978 break; 4291 break;
2979 } 4292 }
2980 break; 4293 break;
2981 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 4294 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
2982 ev->event_type = MPTSAS_DEL_RAID; 4295 if (!sdev)
4296 break;
4297 vdevice->vtarget->deleted = 1; /* block IO */
4298 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2983 break; 4299 break;
2984 case MPI_EVENT_RAID_RC_VOLUME_CREATED: 4300 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
2985 ev->event_type = MPTSAS_ADD_RAID; 4301 if (sdev) {
4302 scsi_device_put(sdev);
4303 break;
4304 }
4305 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2986 break; 4306 break;
2987 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 4307 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
4308 if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
4309 if (!sdev)
4310 break;
4311 vdevice->vtarget->deleted = 1; /* block IO */
4312 hot_plug_info.event_type = MPTSAS_DEL_RAID;
4313 break;
4314 }
2988 switch (state) { 4315 switch (state) {
2989 case MPI_RAIDVOL0_STATUS_STATE_FAILED: 4316 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
2990 case MPI_RAIDVOL0_STATUS_STATE_MISSING: 4317 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
2991 ev->event_type = MPTSAS_DEL_RAID; 4318 if (!sdev)
4319 break;
4320 vdevice->vtarget->deleted = 1; /* block IO */
4321 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2992 break; 4322 break;
2993 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: 4323 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2994 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: 4324 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2995 ev->event_type = MPTSAS_ADD_RAID; 4325 if (sdev) {
4326 scsi_device_put(sdev);
4327 break;
4328 }
4329 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2996 break; 4330 break;
2997 default: 4331 default:
2998 break; 4332 break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
3001 default: 4335 default:
3002 break; 4336 break;
3003 } 4337 }
3004 schedule_work(&ev->work); 4338
4339 if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
4340 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
4341 else
4342 mptsas_free_fw_event(ioc, fw_event);
3005} 4343}
3006 4344
3007static void 4345/**
3008mptsas_send_discovery_event(MPT_ADAPTER *ioc, 4346 * mptsas_issue_tm - send mptsas internal tm request
3009 EVENT_DATA_SAS_DISCOVERY *discovery_data) 4347 * @ioc: Pointer to MPT_ADAPTER structure
4348 * @type: Task Management type
4349 * @channel: channel number for task management
4350 * @id: Logical Target ID for reset (if appropriate)
4351 * @lun: Logical unit for reset (if appropriate)
4352 * @task_context: Context for the task to be aborted
4353 * @timeout: timeout for task management control
4354 *
4355 * return 0 on success and -1 on failure:
4356 *
4357 */
4358static int
4359mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
4360 int task_context, ulong timeout, u8 *issue_reset)
3010{ 4361{
3011 struct mptsas_discovery_event *ev; 4362 MPT_FRAME_HDR *mf;
4363 SCSITaskMgmt_t *pScsiTm;
4364 int retval;
4365 unsigned long timeleft;
4366
4367 *issue_reset = 0;
4368 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
4369 if (mf == NULL) {
4370 retval = -1; /* return failure */
4371 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
4372 "msg frames!!\n", ioc->name));
4373 goto out;
4374 }
3012 4375
3013 /* 4376 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
3014 * DiscoveryStatus 4377 "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
3015 * 4378 "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
3016 * This flag will be non-zero when firmware 4379 type, timeout, channel, id, (unsigned long long)lun,
3017 * kicks off discovery, and return to zero 4380 task_context));
3018 * once its completed. 4381
3019 */ 4382 pScsiTm = (SCSITaskMgmt_t *) mf;
3020 if (discovery_data->DiscoveryStatus) 4383 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
3021 return; 4384 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4385 pScsiTm->TaskType = type;
4386 pScsiTm->MsgFlags = 0;
4387 pScsiTm->TargetID = id;
4388 pScsiTm->Bus = channel;
4389 pScsiTm->ChainOffset = 0;
4390 pScsiTm->Reserved = 0;
4391 pScsiTm->Reserved1 = 0;
4392 pScsiTm->TaskMsgContext = task_context;
4393 int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
4394
4395 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4396 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
4397 retval = 0;
4398 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
4399
4400 /* Now wait for the command to complete */
4401 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
4402 timeout*HZ);
4403 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
4404 retval = -1; /* return failure */
4405 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
4406 "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
4407 mpt_free_msg_frame(ioc, mf);
4408 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
4409 goto out;
4410 *issue_reset = 1;
4411 goto out;
4412 }
3022 4413
3023 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4414 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
3024 if (!ev) 4415 retval = -1; /* return failure */
4416 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4417 "TaskMgmt request: failed with no reply\n", ioc->name));
4418 goto out;
4419 }
4420
4421 out:
4422 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4423 return retval;
4424}
4425
4426/**
4427 * mptsas_broadcast_primative_work - Handle broadcast primitives
4428 * @work: work queue payload containing info describing the event
4429 *
4430 * this will be handled in workqueue context.
4431 */
4432static void
4433mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4434{
4435 MPT_ADAPTER *ioc = fw_event->ioc;
4436 MPT_FRAME_HDR *mf;
4437 VirtDevice *vdevice;
4438 int ii;
4439 struct scsi_cmnd *sc;
4440 SCSITaskMgmtReply_t *pScsiTmReply;
4441 u8 issue_reset;
4442 int task_context;
4443 u8 channel, id;
4444 int lun;
4445 u32 termination_count;
4446 u32 query_count;
4447
4448 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4449 "%s - enter\n", ioc->name, __func__));
4450
4451 mutex_lock(&ioc->taskmgmt_cmds.mutex);
4452 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
4453 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4454 mptsas_requeue_fw_event(ioc, fw_event, 1000);
3025 return; 4455 return;
3026 INIT_WORK(&ev->work, mptsas_discovery_work); 4456 }
3027 ev->ioc = ioc; 4457
3028 schedule_work(&ev->work); 4458 issue_reset = 0;
3029}; 4459 termination_count = 0;
4460 query_count = 0;
4461 mpt_findImVolumes(ioc);
4462 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
4463
4464 for (ii = 0; ii < ioc->req_depth; ii++) {
4465 if (ioc->fw_events_off)
4466 goto out;
4467 sc = mptscsih_get_scsi_lookup(ioc, ii);
4468 if (!sc)
4469 continue;
4470 mf = MPT_INDEX_2_MFPTR(ioc, ii);
4471 if (!mf)
4472 continue;
4473 task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
4474 vdevice = sc->device->hostdata;
4475 if (!vdevice || !vdevice->vtarget)
4476 continue;
4477 if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
4478 continue; /* skip hidden raid components */
4479 if (vdevice->vtarget->raidVolume)
4480 continue; /* skip hidden raid components */
4481 channel = vdevice->vtarget->channel;
4482 id = vdevice->vtarget->id;
4483 lun = vdevice->lun;
4484 if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
4485 channel, id, (u64)lun, task_context, 30, &issue_reset))
4486 goto out;
4487 query_count++;
4488 termination_count +=
4489 le32_to_cpu(pScsiTmReply->TerminationCount);
4490 if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
4491 (pScsiTmReply->ResponseCode ==
4492 MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4493 pScsiTmReply->ResponseCode ==
4494 MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4495 continue;
4496 if (mptsas_issue_tm(ioc,
4497 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
4498 channel, id, (u64)lun, 0, 30, &issue_reset))
4499 goto out;
4500 termination_count +=
4501 le32_to_cpu(pScsiTmReply->TerminationCount);
4502 }
4503
4504 out:
4505 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4506 "%s - exit, query_count = %d termination_count = %d\n",
4507 ioc->name, __func__, query_count, termination_count));
4508
4509 ioc->broadcast_aen_busy = 0;
4510 mpt_clear_taskmgmt_in_progress_flag(ioc);
4511 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4512
4513 if (issue_reset) {
4514 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
4515 ioc->name, __func__);
4516 mpt_HardResetHandler(ioc, CAN_SLEEP);
4517 }
4518 mptsas_free_fw_event(ioc, fw_event);
4519}
3030 4520
3031/* 4521/*
3032 * mptsas_send_ir2_event - handle exposing hidden disk when 4522 * mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
3037 * 4527 *
3038 */ 4528 */
3039static void 4529static void
3040mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) 4530mptsas_send_ir2_event(struct fw_event_work *fw_event)
3041{ 4531{
3042 struct mptsas_hotplug_event *ev; 4532 MPT_ADAPTER *ioc;
3043 4533 struct mptsas_hotplug_event hot_plug_info;
3044 if (ir2_data->ReasonCode != 4534 MPI_EVENT_DATA_IR2 *ir2_data;
3045 MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) 4535 u8 reasonCode;
3046 return; 4536 RaidPhysDiskPage0_t phys_disk;
3047 4537
3048 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4538 ioc = fw_event->ioc;
3049 if (!ev) 4539 ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
4540 reasonCode = ir2_data->ReasonCode;
4541
4542 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
4543 "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
4544
4545 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4546 hot_plug_info.id = ir2_data->TargetID;
4547 hot_plug_info.channel = ir2_data->Bus;
4548 switch (reasonCode) {
4549 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
4550 hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
4551 break;
4552 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
4553 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4554 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
4555 break;
4556 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
4557 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4558 mpt_raid_phys_disk_pg0(ioc,
4559 ir2_data->PhysDiskNum, &phys_disk);
4560 hot_plug_info.id = phys_disk.PhysDiskID;
4561 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
4562 break;
4563 default:
4564 mptsas_free_fw_event(ioc, fw_event);
3050 return; 4565 return;
3051 4566 }
3052 INIT_WORK(&ev->work, mptsas_hotplug_work); 4567 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
3053 ev->ioc = ioc; 4568}
3054 ev->id = ir2_data->TargetID;
3055 ev->channel = ir2_data->Bus;
3056 ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
3057
3058 schedule_work(&ev->work);
3059};
3060 4569
3061static int 4570static int
3062mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) 4571mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
3063{ 4572{
3064 int rc=1; 4573 u32 event = le32_to_cpu(reply->Event);
3065 u8 event = le32_to_cpu(reply->Event) & 0xFF; 4574 int sz, event_data_sz;
4575 struct fw_event_work *fw_event;
4576 unsigned long delay;
3066 4577
3067 if (!ioc->sh) 4578 /* events turned off due to host reset or driver unloading */
3068 goto out; 4579 if (ioc->fw_events_off)
3069 4580 return 0;
3070 /*
3071 * sas_discovery_ignore_events
3072 *
3073 * This flag is to prevent anymore processing of
3074 * sas events once mptsas_remove function is called.
3075 */
3076 if (ioc->sas_discovery_ignore_events) {
3077 rc = mptscsih_event_process(ioc, reply);
3078 goto out;
3079 }
3080 4581
4582 delay = msecs_to_jiffies(1);
3081 switch (event) { 4583 switch (event) {
4584 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
4585 {
4586 EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
4587 (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
4588 if (broadcast_event_data->Primitive !=
4589 MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
4590 return 0;
4591 if (ioc->broadcast_aen_busy)
4592 return 0;
4593 ioc->broadcast_aen_busy = 1;
4594 break;
4595 }
3082 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 4596 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
3083 mptsas_send_sas_event(ioc, 4597 {
3084 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); 4598 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
4599 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
4600
4601 if (sas_event_data->ReasonCode ==
4602 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
4603 mptsas_target_reset_queue(ioc, sas_event_data);
4604 return 0;
4605 }
3085 break; 4606 break;
3086 case MPI_EVENT_INTEGRATED_RAID: 4607 }
3087 mptsas_send_raid_event(ioc, 4608 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
3088 (EVENT_DATA_RAID *)reply->Data); 4609 {
4610 MpiEventDataSasExpanderStatusChange_t *expander_data =
4611 (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
4612
4613 if (ioc->old_sas_discovery_protocal)
4614 return 0;
4615
4616 if (expander_data->ReasonCode ==
4617 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
4618 ioc->device_missing_delay)
4619 delay = HZ * ioc->device_missing_delay;
3089 break; 4620 break;
4621 }
4622 case MPI_EVENT_SAS_DISCOVERY:
4623 {
4624 u32 discovery_status;
4625 EventDataSasDiscovery_t *discovery_data =
4626 (EventDataSasDiscovery_t *)reply->Data;
4627
4628 discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
4629 ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
4630 if (ioc->old_sas_discovery_protocal && !discovery_status)
4631 mptsas_queue_rescan(ioc);
4632 return 0;
4633 }
4634 case MPI_EVENT_INTEGRATED_RAID:
3090 case MPI_EVENT_PERSISTENT_TABLE_FULL: 4635 case MPI_EVENT_PERSISTENT_TABLE_FULL:
3091 INIT_WORK(&ioc->sas_persist_task,
3092 mptsas_persist_clear_table);
3093 schedule_work(&ioc->sas_persist_task);
3094 break;
3095 case MPI_EVENT_SAS_DISCOVERY:
3096 mptsas_send_discovery_event(ioc,
3097 (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
3098 break;
3099 case MPI_EVENT_IR2: 4636 case MPI_EVENT_IR2:
3100 mptsas_send_ir2_event(ioc, 4637 case MPI_EVENT_SAS_PHY_LINK_STATUS:
3101 (PTR_MPI_EVENT_DATA_IR2)reply->Data); 4638 case MPI_EVENT_QUEUE_FULL:
3102 break; 4639 break;
3103 default: 4640 default:
3104 rc = mptscsih_event_process(ioc, reply); 4641 return 0;
3105 break;
3106 } 4642 }
3107 out:
3108 4643
3109 return rc; 4644 event_data_sz = ((reply->MsgLength * 4) -
4645 offsetof(EventNotificationReply_t, Data));
4646 sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
4647 fw_event = kzalloc(sz, GFP_ATOMIC);
4648 if (!fw_event) {
4649 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
4650 __func__, __LINE__);
4651 return 0;
4652 }
4653 memcpy(fw_event->event_data, reply->Data, event_data_sz);
4654 fw_event->event = event;
4655 fw_event->ioc = ioc;
4656 mptsas_add_fw_event(ioc, fw_event, delay);
4657 return 0;
4658}
4659
4660/* Delete a volume when no longer listed in ioc pg2
4661 */
4662static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
4663{
4664 struct scsi_device *sdev;
4665 int i;
4666
4667 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
4668 if (!sdev)
4669 return;
4670 if (!ioc->raid_data.pIocPg2)
4671 goto out;
4672 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
4673 goto out;
4674 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
4675 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
4676 goto release_sdev;
4677 out:
4678 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4679 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
4680 scsi_remove_device(sdev);
4681 release_sdev:
4682 scsi_device_put(sdev);
3110} 4683}
3111 4684
3112static int 4685static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3128 return r; 4701 return r;
3129 4702
3130 ioc = pci_get_drvdata(pdev); 4703 ioc = pci_get_drvdata(pdev);
4704 mptsas_fw_event_off(ioc);
3131 ioc->DoneCtx = mptsasDoneCtx; 4705 ioc->DoneCtx = mptsasDoneCtx;
3132 ioc->TaskCtx = mptsasTaskCtx; 4706 ioc->TaskCtx = mptsasTaskCtx;
3133 ioc->InternalCtx = mptsasInternalCtx; 4707 ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3211 * A slightly different algorithm is required for 4785 * A slightly different algorithm is required for
3212 * 64bit SGEs. 4786 * 64bit SGEs.
3213 */ 4787 */
3214 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4788 scale = ioc->req_sz/ioc->SGE_size;
3215 if (sizeof(dma_addr_t) == sizeof(u64)) { 4789 if (ioc->sg_addr_size == sizeof(u64)) {
3216 numSGE = (scale - 1) * 4790 numSGE = (scale - 1) *
3217 (ioc->facts.MaxChainDepth-1) + scale + 4791 (ioc->facts.MaxChainDepth-1) + scale +
3218 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 4792 (ioc->req_sz - 60) / ioc->SGE_size;
3219 sizeof(u32));
3220 } else { 4793 } else {
3221 numSGE = 1 + (scale - 1) * 4794 numSGE = 1 + (scale - 1) *
3222 (ioc->facts.MaxChainDepth-1) + scale + 4795 (ioc->facts.MaxChainDepth-1) + scale +
3223 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 4796 (ioc->req_sz - 64) / ioc->SGE_size;
3224 sizeof(u32));
3225 } 4797 }
3226 4798
3227 if (numSGE < sh->sg_tablesize) { 4799 if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3251 4823
3252 /* Clear the TM flags 4824 /* Clear the TM flags
3253 */ 4825 */
3254 hd->tmPending = 0;
3255 hd->tmState = TM_STATE_NONE;
3256 hd->resetPending = 0;
3257 hd->abortSCpnt = NULL; 4826 hd->abortSCpnt = NULL;
3258 4827
3259 /* Clear the pointer used to store 4828 /* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3273 4842
3274 ioc->sas_data.ptClear = mpt_pt_clear; 4843 ioc->sas_data.ptClear = mpt_pt_clear;
3275 4844
3276 init_waitqueue_head(&hd->scandv_waitq);
3277 hd->scandv_wait_done = 0;
3278 hd->last_queue_full = 0; 4845 hd->last_queue_full = 0;
3279 INIT_LIST_HEAD(&hd->target_reset_list); 4846 INIT_LIST_HEAD(&hd->target_reset_list);
4847 INIT_LIST_HEAD(&ioc->sas_device_info_list);
4848 mutex_init(&ioc->sas_device_info_mutex);
4849
3280 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4850 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
3281 4851
3282 if (ioc->sas_data.ptClear==1) { 4852 if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3291 goto out_mptsas_probe; 4861 goto out_mptsas_probe;
3292 } 4862 }
3293 4863
4864 /* older firmware doesn't support expander events */
4865 if ((ioc->facts.HeaderVersion >> 8) < 0xE)
4866 ioc->old_sas_discovery_protocal = 1;
3294 mptsas_scan_sas_topology(ioc); 4867 mptsas_scan_sas_topology(ioc);
3295 4868 mptsas_fw_event_on(ioc);
3296 return 0; 4869 return 0;
3297 4870
3298 out_mptsas_probe: 4871 out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3301 return error; 4874 return error;
3302} 4875}
3303 4876
4877void
4878mptsas_shutdown(struct pci_dev *pdev)
4879{
4880 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
4881
4882 mptsas_fw_event_off(ioc);
4883 mptsas_cleanup_fw_event_q(ioc);
4884}
4885
3304static void __devexit mptsas_remove(struct pci_dev *pdev) 4886static void __devexit mptsas_remove(struct pci_dev *pdev)
3305{ 4887{
3306 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 4888 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
3307 struct mptsas_portinfo *p, *n; 4889 struct mptsas_portinfo *p, *n;
3308 int i; 4890 int i;
3309 4891
4892 mptsas_shutdown(pdev);
4893
4894 mptsas_del_device_components(ioc);
4895
3310 ioc->sas_discovery_ignore_events = 1; 4896 ioc->sas_discovery_ignore_events = 1;
3311 sas_remove_host(ioc->sh); 4897 sas_remove_host(ioc->sh);
3312 4898
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
3315 list_del(&p->list); 4901 list_del(&p->list);
3316 for (i = 0 ; i < p->num_phys ; i++) 4902 for (i = 0 ; i < p->num_phys ; i++)
3317 mptsas_port_delete(ioc, p->phy_info[i].port_details); 4903 mptsas_port_delete(ioc, p->phy_info[i].port_details);
4904
3318 kfree(p->phy_info); 4905 kfree(p->phy_info);
3319 kfree(p); 4906 kfree(p);
3320 } 4907 }
3321 mutex_unlock(&ioc->sas_topology_mutex); 4908 mutex_unlock(&ioc->sas_topology_mutex);
3322 4909 ioc->hba_port_info = NULL;
3323 mptscsih_remove(pdev); 4910 mptscsih_remove(pdev);
3324} 4911}
3325 4912
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
3344 .id_table = mptsas_pci_table, 4931 .id_table = mptsas_pci_table,
3345 .probe = mptsas_probe, 4932 .probe = mptsas_probe,
3346 .remove = __devexit_p(mptsas_remove), 4933 .remove = __devexit_p(mptsas_remove),
3347 .shutdown = mptscsih_shutdown, 4934 .shutdown = mptsas_shutdown,
3348#ifdef CONFIG_PM 4935#ifdef CONFIG_PM
3349 .suspend = mptscsih_suspend, 4936 .suspend = mptscsih_suspend,
3350 .resume = mptscsih_resume, 4937 .resume = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
3364 return -ENODEV; 4951 return -ENODEV;
3365 4952
3366 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); 4953 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
3367 mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); 4954 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
3368 mptsasInternalCtx = 4955 mptsasInternalCtx =
3369 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); 4956 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
3370 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); 4957 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
4958 mptsasDeviceResetCtx =
4959 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
3371 4960
3372 mpt_event_register(mptsasDoneCtx, mptsas_event_process); 4961 mpt_event_register(mptsasDoneCtx, mptsas_event_process);
3373 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); 4962 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
3392 mpt_deregister(mptsasInternalCtx); 4981 mpt_deregister(mptsasInternalCtx);
3393 mpt_deregister(mptsasTaskCtx); 4982 mpt_deregister(mptsasTaskCtx);
3394 mpt_deregister(mptsasDoneCtx); 4983 mpt_deregister(mptsasDoneCtx);
4984 mpt_deregister(mptsasDeviceResetCtx);
3395} 4985}
3396 4986
3397module_init(mptsas_init); 4987module_init(mptsas_init);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 2b544e0877e6..953c2bfcf6aa 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
53 struct list_head list; 53 struct list_head list;
54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; 54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
55 u8 target_reset_issued; 55 u8 target_reset_issued;
56 unsigned long time_count;
56}; 57};
57 58
58enum mptsas_hotplug_action { 59enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
60 MPTSAS_DEL_DEVICE, 61 MPTSAS_DEL_DEVICE,
61 MPTSAS_ADD_RAID, 62 MPTSAS_ADD_RAID,
62 MPTSAS_DEL_RAID, 63 MPTSAS_DEL_RAID,
64 MPTSAS_ADD_PHYSDISK,
65 MPTSAS_ADD_PHYSDISK_REPROBE,
66 MPTSAS_DEL_PHYSDISK,
67 MPTSAS_DEL_PHYSDISK_REPROBE,
63 MPTSAS_ADD_INACTIVE_VOLUME, 68 MPTSAS_ADD_INACTIVE_VOLUME,
64 MPTSAS_IGNORE_EVENT, 69 MPTSAS_IGNORE_EVENT,
65}; 70};
66 71
72struct mptsas_mapping{
73 u8 id;
74 u8 channel;
75};
76
77struct mptsas_device_info {
78 struct list_head list;
79 struct mptsas_mapping os; /* operating system mapping*/
80 struct mptsas_mapping fw; /* firmware mapping */
81 u64 sas_address;
82 u32 device_info; /* specific bits for devices */
83 u16 slot; /* enclosure slot id */
84 u64 enclosure_logical_id; /*enclosure address */
85 u8 is_logical_volume; /* is this logical volume */
86 /* this belongs to volume */
87 u8 is_hidden_raid_component;
88 /* this valid when is_hidden_raid_component set */
89 u8 volume_id;
90 /* cached data for a removed device */
91 u8 is_cached;
92};
93
67struct mptsas_hotplug_event { 94struct mptsas_hotplug_event {
68 struct work_struct work;
69 MPT_ADAPTER *ioc; 95 MPT_ADAPTER *ioc;
70 enum mptsas_hotplug_action event_type; 96 enum mptsas_hotplug_action event_type;
71 u64 sas_address; 97 u64 sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
73 u8 id; 99 u8 id;
74 u32 device_info; 100 u32 device_info;
75 u16 handle; 101 u16 handle;
76 u16 parent_handle;
77 u8 phy_id; 102 u8 phy_id;
78 u8 phys_disk_num_valid; /* hrc (hidden raid component) */
79 u8 phys_disk_num; /* hrc - unique index*/ 103 u8 phys_disk_num; /* hrc - unique index*/
80 u8 hidden_raid_component; /* hrc - don't expose*/ 104 struct scsi_device *sdev;
105};
106
107struct fw_event_work {
108 struct list_head list;
109 struct delayed_work work;
110 MPT_ADAPTER *ioc;
111 u32 event;
112 u8 retries;
113 u8 event_data[1];
81}; 114};
82 115
83struct mptsas_discovery_event { 116struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e62c6bc4ad33..8440f78f6969 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
80/* 80/*
81 * Other private/forward protos... 81 * Other private/forward protos...
82 */ 82 */
83static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 83struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); 84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); 85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); 86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
92 SCSIIORequest_t *pReq, int req_idx); 92 SCSIIORequest_t *pReq, int req_idx);
93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); 93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
95static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
96static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
97 95
98static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); 96int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
97 int lun, int ctx2abort, ulong timeout);
99 98
100int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 99int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
101int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 100int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
102 101
102void
103mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
104static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
105 MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
103int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 106int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
104static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 107static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
105static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); 108static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
106 109
110static int
111mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
112 SCSITaskMgmtReply_t *pScsiTmReply);
107void mptscsih_remove(struct pci_dev *); 113void mptscsih_remove(struct pci_dev *);
108void mptscsih_shutdown(struct pci_dev *); 114void mptscsih_shutdown(struct pci_dev *);
109#ifdef CONFIG_PM 115#ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int mptscsih_resume(struct pci_dev *pdev);
113 119
114#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE 120#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
115 121
116/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
117/**
118 * mptscsih_add_sge - Place a simple SGE at address pAddr.
119 * @pAddr: virtual address for SGE
120 * @flagslength: SGE flags and data transfer length
121 * @dma_addr: Physical address
122 *
123 * This routine places a MPT request frame back on the MPT adapter's
124 * FreeQ.
125 */
126static inline void
127mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
128{
129 if (sizeof(dma_addr_t) == sizeof(u64)) {
130 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
131 u32 tmp = dma_addr & 0xFFFFFFFF;
132
133 pSge->FlagsLength = cpu_to_le32(flagslength);
134 pSge->Address.Low = cpu_to_le32(tmp);
135 tmp = (u32) ((u64)dma_addr >> 32);
136 pSge->Address.High = cpu_to_le32(tmp);
137
138 } else {
139 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
140 pSge->FlagsLength = cpu_to_le32(flagslength);
141 pSge->Address = cpu_to_le32(dma_addr);
142 }
143} /* mptscsih_add_sge() */
144
145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146/**
147 * mptscsih_add_chain - Place a chain SGE at address pAddr.
148 * @pAddr: virtual address for SGE
149 * @next: nextChainOffset value (u32's)
150 * @length: length of next SGL segment
151 * @dma_addr: Physical address
152 *
153 * This routine places a MPT request frame back on the MPT adapter's
154 * FreeQ.
155 */
156static inline void
157mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
158{
159 if (sizeof(dma_addr_t) == sizeof(u64)) {
160 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
161 u32 tmp = dma_addr & 0xFFFFFFFF;
162
163 pChain->Length = cpu_to_le16(length);
164 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
165
166 pChain->NextChainOffset = next;
167
168 pChain->Address.Low = cpu_to_le32(tmp);
169 tmp = (u32) ((u64)dma_addr >> 32);
170 pChain->Address.High = cpu_to_le32(tmp);
171 } else {
172 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
173 pChain->Length = cpu_to_le16(length);
174 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
175 pChain->NextChainOffset = next;
176 pChain->Address = cpu_to_le32(dma_addr);
177 }
178} /* mptscsih_add_chain() */
179 122
180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
181/* 124/*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
281 */ 224 */
282 225
283nextSGEset: 226nextSGEset:
284 numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); 227 numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
285 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; 228 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
286 229
287 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; 230 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
288 231
289 /* Get first (num - 1) SG elements 232 /* Get first (num - 1) SG elements
290 * Skip any SG entries with a length of 0 233 * Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
293 for (ii=0; ii < (numSgeThisFrame-1); ii++) { 236 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
294 thisxfer = sg_dma_len(sg); 237 thisxfer = sg_dma_len(sg);
295 if (thisxfer == 0) { 238 if (thisxfer == 0) {
296 sg = sg_next(sg); /* Get next SG element from the OS */ 239 /* Get next SG element from the OS */
240 sg = sg_next(sg);
297 sg_done++; 241 sg_done++;
298 continue; 242 continue;
299 } 243 }
300 244
301 v2 = sg_dma_address(sg); 245 v2 = sg_dma_address(sg);
302 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 246 ioc->add_sge(psge, sgflags | thisxfer, v2);
303 247
304 sg = sg_next(sg); /* Get next SG element from the OS */ 248 /* Get next SG element from the OS */
305 psge += (sizeof(u32) + sizeof(dma_addr_t)); 249 sg = sg_next(sg);
306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 250 psge += ioc->SGE_size;
251 sgeOffset += ioc->SGE_size;
307 sg_done++; 252 sg_done++;
308 } 253 }
309 254
@@ -320,12 +265,8 @@ nextSGEset:
320 thisxfer = sg_dma_len(sg); 265 thisxfer = sg_dma_len(sg);
321 266
322 v2 = sg_dma_address(sg); 267 v2 = sg_dma_address(sg);
323 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 268 ioc->add_sge(psge, sgflags | thisxfer, v2);
324 /* 269 sgeOffset += ioc->SGE_size;
325 sg = sg_next(sg);
326 psge += (sizeof(u32) + sizeof(dma_addr_t));
327 */
328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
329 sg_done++; 270 sg_done++;
330 271
331 if (chainSge) { 272 if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
334 * Update the chain element 275 * Update the chain element
335 * Offset and Length fields. 276 * Offset and Length fields.
336 */ 277 */
337 mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 278 ioc->add_chain((char *)chainSge, 0, sgeOffset,
279 ioc->ChainBufferDMA + chain_dma_off);
338 } else { 280 } else {
339 /* The current buffer is the original MF 281 /* The current buffer is the original MF
340 * and there is no Chain buffer. 282 * and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
367 * set properly). 309 * set properly).
368 */ 310 */
369 if (sg_done) { 311 if (sg_done) {
370 u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); 312 u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
371 sgflags = le32_to_cpu(*ptmp); 313 sgflags = le32_to_cpu(*ptmp);
372 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; 314 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
373 *ptmp = cpu_to_le32(sgflags); 315 *ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
381 * Old chain element is now complete. 323 * Old chain element is now complete.
382 */ 324 */
383 u8 nextChain = (u8) (sgeOffset >> 2); 325 u8 nextChain = (u8) (sgeOffset >> 2);
384 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 326 sgeOffset += ioc->SGE_size;
385 mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 327 ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
328 ioc->ChainBufferDMA + chain_dma_off);
386 } else { 329 } else {
387 /* The original MF buffer requires a chain buffer - 330 /* The original MF buffer requires a chain buffer -
388 * set the offset. 331 * set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
592 } 535 }
593 536
594 scsi_print_command(sc); 537 scsi_print_command(sc);
595 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", 538 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
596 ioc->name, pScsiReply->Bus, pScsiReply->TargetID); 539 ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
597 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " 540 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
598 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, 541 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
599 scsi_get_resid(sc)); 542 scsi_get_resid(sc));
600 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " 543 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
601 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), 544 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
602 le32_to_cpu(pScsiReply->TransferCount), sc->result); 545 le32_to_cpu(pScsiReply->TransferCount), sc->result);
546
603 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " 547 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
604 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", 548 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
605 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, 549 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
654 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); 598 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
655 req_idx_MR = (mr != NULL) ? 599 req_idx_MR = (mr != NULL) ?
656 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; 600 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
601
602 /* Special case, where already freed message frame is received from
603 * Firmware. It happens with Resetting IOC.
604 * Return immediately. Do not care
605 */
657 if ((req_idx != req_idx_MR) || 606 if ((req_idx != req_idx_MR) ||
658 (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { 607 (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
659 printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
660 ioc->name);
661 printk (MYIOC_s_ERR_FMT
662 "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
663 ioc->name, req_idx, req_idx_MR, mf, mr,
664 mptscsih_get_scsi_lookup(ioc, req_idx_MR));
665 return 0; 608 return 0;
666 }
667 609
668 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); 610 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
669 if (sc == NULL) { 611 if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
810 */ 752 */
811 753
812 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 754 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
813 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
814 /* Linux handles an unsolicited DID_RESET better 755 /* Linux handles an unsolicited DID_RESET better
815 * than an unsolicited DID_ABORT. 756 * than an unsolicited DID_ABORT.
816 */ 757 */
817 sc->result = DID_RESET << 16; 758 sc->result = DID_RESET << 16;
818 759
760 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
761 if (ioc->bus_type == FC)
762 sc->result = DID_ERROR << 16;
763 else
764 sc->result = DID_RESET << 16;
819 break; 765 break;
820 766
821 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 767 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
992 scsi_dma_unmap(sc); 938 scsi_dma_unmap(sc);
993 sc->result = DID_RESET << 16; 939 sc->result = DID_RESET << 16;
994 sc->host_scribble = NULL; 940 sc->host_scribble = NULL;
995 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT 941 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
996 "completing cmds: fw_channel %d, fw_id %d, sc=%p," 942 "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
997 " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); 943 "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
998 sc->scsi_done(sc); 944 sc->scsi_done(sc);
999 } 945 }
1000} 946}
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
1053 scsi_dma_unmap(sc); 999 scsi_dma_unmap(sc);
1054 sc->host_scribble = NULL; 1000 sc->host_scribble = NULL;
1055 sc->result = DID_NO_CONNECT << 16; 1001 sc->result = DID_NO_CONNECT << 16;
1056 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," 1002 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
1057 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, 1003 MYIOC_s_FMT "completing cmds: fw_channel %d, "
1058 vdevice->vtarget->id, sc, mf, ii); 1004 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
1005 vdevice->vtarget->channel, vdevice->vtarget->id,
1006 sc, mf, ii));
1059 sc->scsi_done(sc); 1007 sc->scsi_done(sc);
1060 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1008 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1061 } 1009 }
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1346 MPT_FRAME_HDR *mf; 1294 MPT_FRAME_HDR *mf;
1347 SCSIIORequest_t *pScsiReq; 1295 SCSIIORequest_t *pScsiReq;
1348 VirtDevice *vdevice = SCpnt->device->hostdata; 1296 VirtDevice *vdevice = SCpnt->device->hostdata;
1349 int lun;
1350 u32 datalen; 1297 u32 datalen;
1351 u32 scsictl; 1298 u32 scsictl;
1352 u32 scsidir; 1299 u32 scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1357 1304
1358 hd = shost_priv(SCpnt->device->host); 1305 hd = shost_priv(SCpnt->device->host);
1359 ioc = hd->ioc; 1306 ioc = hd->ioc;
1360 lun = SCpnt->device->lun;
1361 SCpnt->scsi_done = done; 1307 SCpnt->scsi_done = done;
1362 1308
1363 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1309 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1364 ioc->name, SCpnt, done)); 1310 ioc->name, SCpnt, done));
1365 1311
1366 if (hd->resetPending) { 1312 if (ioc->taskmgmt_quiesce_io) {
1367 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", 1313 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1368 ioc->name, SCpnt)); 1314 ioc->name, SCpnt));
1369 return SCSI_MLQUEUE_HOST_BUSY; 1315 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1422 pScsiReq->CDBLength = SCpnt->cmd_len; 1368 pScsiReq->CDBLength = SCpnt->cmd_len;
1423 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; 1369 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1424 pScsiReq->Reserved = 0; 1370 pScsiReq->Reserved = 0;
1425 pScsiReq->MsgFlags = mpt_msg_flags(); 1371 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
1426 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); 1372 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
1427 pScsiReq->Control = cpu_to_le32(scsictl); 1373 pScsiReq->Control = cpu_to_le32(scsictl);
1428 1374
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1448 */ 1394 */
1449 if (datalen == 0) { 1395 if (datalen == 0) {
1450 /* Add a NULL SGE */ 1396 /* Add a NULL SGE */
1451 mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, 1397 ioc->add_sge((char *)&pScsiReq->SGL,
1398 MPT_SGE_FLAGS_SSIMPLE_READ | 0,
1452 (dma_addr_t) -1); 1399 (dma_addr_t) -1);
1453 } else { 1400 } else {
1454 /* Add a 32 or 64 bit SGE */ 1401 /* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1528 1475
1529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1476/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1530/** 1477/**
1531 * mptscsih_TMHandler - Generic handler for SCSI Task Management. 1478 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1532 * @hd: Pointer to MPT SCSI HOST structure 1479 * @hd: Pointer to MPT_SCSI_HOST structure
1533 * @type: Task Management type 1480 * @type: Task Management type
1534 * @channel: channel number for task management 1481 * @channel: channel number for task management
1535 * @id: Logical Target ID for reset (if appropriate) 1482 * @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1537 * @ctx2abort: Context for the task to be aborted (if appropriate) 1484 * @ctx2abort: Context for the task to be aborted (if appropriate)
1538 * @timeout: timeout for task management control 1485 * @timeout: timeout for task management control
1539 * 1486 *
1540 * Fall through to mpt_HardResetHandler if: not operational, too many 1487 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1541 * failed TM requests or handshake failure. 1488 * or a non-interrupt thread. In the former, must not call schedule().
1542 * 1489 *
1543 * Remark: Currently invoked from a non-interrupt thread (_bh). 1490 * Not all fields are meaningfull for all task types.
1544 * 1491 *
1545 * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC 1492 * Returns 0 for SUCCESS, or FAILED.
1546 * will be active.
1547 * 1493 *
1548 * Returns 0 for SUCCESS, or %FAILED.
1549 **/ 1494 **/
1550int 1495int
1551mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) 1496mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1497 int ctx2abort, ulong timeout)
1552{ 1498{
1553 MPT_ADAPTER *ioc; 1499 MPT_FRAME_HDR *mf;
1554 int rc = -1; 1500 SCSITaskMgmt_t *pScsiTm;
1501 int ii;
1502 int retval;
1503 MPT_ADAPTER *ioc = hd->ioc;
1504 unsigned long timeleft;
1505 u8 issue_hard_reset;
1555 u32 ioc_raw_state; 1506 u32 ioc_raw_state;
1556 unsigned long flags; 1507 unsigned long time_count;
1557
1558 ioc = hd->ioc;
1559 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
1560
1561 // SJR - CHECKME - Can we avoid this here?
1562 // (mpt_HardResetHandler has this check...)
1563 spin_lock_irqsave(&ioc->diagLock, flags);
1564 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
1565 spin_unlock_irqrestore(&ioc->diagLock, flags);
1566 return FAILED;
1567 }
1568 spin_unlock_irqrestore(&ioc->diagLock, flags);
1569
1570 /* Wait a fixed amount of time for the TM pending flag to be cleared.
1571 * If we time out and not bus reset, then we return a FAILED status
1572 * to the caller.
1573 * The call to mptscsih_tm_pending_wait() will set the pending flag
1574 * if we are
1575 * successful. Otherwise, reload the FW.
1576 */
1577 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1578 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1579 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
1580 "Timed out waiting for last TM (%d) to complete! \n",
1581 ioc->name, hd->tmPending));
1582 return FAILED;
1583 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1584 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
1585 "reset: Timed out waiting for last TM (%d) "
1586 "to complete! \n", ioc->name,
1587 hd->tmPending));
1588 return FAILED;
1589 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1590 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
1591 "Timed out waiting for last TM (%d) to complete! \n",
1592 ioc->name, hd->tmPending));
1593 return FAILED;
1594 }
1595 } else {
1596 spin_lock_irqsave(&ioc->FreeQlock, flags);
1597 hd->tmPending |= (1 << type);
1598 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1599 }
1600 1508
1509 issue_hard_reset = 0;
1601 ioc_raw_state = mpt_GetIocState(ioc, 0); 1510 ioc_raw_state = mpt_GetIocState(ioc, 0);
1602 1511
1603 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { 1512 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
1604 printk(MYIOC_s_WARN_FMT 1513 printk(MYIOC_s_WARN_FMT
1605 "TM Handler for type=%x: IOC Not operational (0x%x)!\n", 1514 "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
1606 ioc->name, type, ioc_raw_state); 1515 ioc->name, type, ioc_raw_state);
1607 printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); 1516 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
1517 ioc->name, __func__);
1608 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) 1518 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
1609 printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " 1519 printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
1610 "FAILED!!\n", ioc->name); 1520 "FAILED!!\n", ioc->name);
1611 return FAILED; 1521 return 0;
1612 } 1522 }
1613 1523
1614 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { 1524 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
1615 printk(MYIOC_s_WARN_FMT 1525 printk(MYIOC_s_WARN_FMT
1616 "TM Handler for type=%x: ioc_state: " 1526 "TaskMgmt type=%x: ioc_state: "
1617 "DOORBELL_ACTIVE (0x%x)!\n", 1527 "DOORBELL_ACTIVE (0x%x)!\n",
1618 ioc->name, type, ioc_raw_state); 1528 ioc->name, type, ioc_raw_state);
1619 return FAILED; 1529 return FAILED;
1620 } 1530 }
1621 1531
1622 /* Isse the Task Mgmt request. 1532 mutex_lock(&ioc->taskmgmt_cmds.mutex);
1623 */ 1533 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
1624 if (hd->hard_resets < -1) 1534 mf = NULL;
1625 hd->hard_resets++; 1535 retval = FAILED;
1626 1536 goto out;
1627 rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, 1537 }
1628 ctx2abort, timeout);
1629 if (rc)
1630 printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
1631 ioc->name);
1632 else
1633 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
1634 ioc->name));
1635
1636 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1637 "TMHandler rc = %d!\n", ioc->name, rc));
1638
1639 return rc;
1640}
1641
1642
1643/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1644/**
1645 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1646 * @hd: Pointer to MPT_SCSI_HOST structure
1647 * @type: Task Management type
1648 * @channel: channel number for task management
1649 * @id: Logical Target ID for reset (if appropriate)
1650 * @lun: Logical Unit for reset (if appropriate)
1651 * @ctx2abort: Context for the task to be aborted (if appropriate)
1652 * @timeout: timeout for task management control
1653 *
1654 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1655 * or a non-interrupt thread. In the former, must not call schedule().
1656 *
1657 * Not all fields are meaningfull for all task types.
1658 *
1659 * Returns 0 for SUCCESS, or FAILED.
1660 *
1661 **/
1662static int
1663mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
1664{
1665 MPT_FRAME_HDR *mf;
1666 SCSITaskMgmt_t *pScsiTm;
1667 int ii;
1668 int retval;
1669 MPT_ADAPTER *ioc = hd->ioc;
1670 1538
1671 /* Return Fail to calling function if no message frames available. 1539 /* Return Fail to calling function if no message frames available.
1672 */ 1540 */
1673 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 1541 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
1674 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1542 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1675 ioc->name)); 1543 "TaskMgmt no msg frames!!\n", ioc->name));
1676 return FAILED; 1544 retval = FAILED;
1545 mpt_clear_taskmgmt_in_progress_flag(ioc);
1546 goto out;
1677 } 1547 }
1678 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1548 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1679 ioc->name, mf)); 1549 ioc->name, mf));
1680 1550
1681 /* Format the Request 1551 /* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1699 1569
1700 pScsiTm->TaskMsgContext = ctx2abort; 1570 pScsiTm->TaskMsgContext = ctx2abort;
1701 1571
1702 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1572 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
1703 "type=%d\n", ioc->name, ctx2abort, type)); 1573 "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
1574 type, timeout));
1704 1575
1705 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); 1576 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
1706 1577
1578 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1579 time_count = jiffies;
1707 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1580 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
1708 (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1581 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
1709 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1582 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1711 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, 1584 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
1712 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 1585 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
1713 if (retval) { 1586 if (retval) {
1714 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1587 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1715 " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, 1588 "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
1716 ioc, mf, retval)); 1589 ioc->name, mf, retval));
1717 goto fail_out; 1590 mpt_free_msg_frame(ioc, mf);
1591 mpt_clear_taskmgmt_in_progress_flag(ioc);
1592 goto out;
1718 } 1593 }
1719 } 1594 }
1720 1595
1721 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { 1596 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
1722 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1597 timeout*HZ);
1723 " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, 1598 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
1724 ioc, mf)); 1599 retval = FAILED;
1725 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1600 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
1726 ioc->name)); 1601 "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
1727 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1602 mpt_clear_taskmgmt_in_progress_flag(ioc);
1728 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1603 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
1729 ioc->name, retval)); 1604 goto out;
1730 goto fail_out; 1605 issue_hard_reset = 1;
1606 goto out;
1731 } 1607 }
1732 1608
1733 /* 1609 retval = mptscsih_taskmgmt_reply(ioc, type,
1734 * Handle success case, see if theres a non-zero ioc_status. 1610 (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
1735 */
1736 if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
1737 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1738 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1739 retval = 0;
1740 else
1741 retval = FAILED;
1742 1611
1743 return retval; 1612 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1613 "TaskMgmt completed (%d seconds)\n",
1614 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
1744 1615
1745 fail_out: 1616 out:
1746 1617
1747 /* 1618 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1748 * Free task management mf, and corresponding tm flags 1619 if (issue_hard_reset) {
1749 */ 1620 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
1750 mpt_free_msg_frame(ioc, mf); 1621 ioc->name, __func__);
1751 hd->tmPending = 0; 1622 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
1752 hd->tmState = TM_STATE_NONE; 1623 mpt_free_msg_frame(ioc, mf);
1753 return FAILED; 1624 }
1625
1626 retval = (retval == 0) ? 0 : FAILED;
1627 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
1628 return retval;
1754} 1629}
1630EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
1755 1631
1756static int 1632static int
1757mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) 1633mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1838 goto out; 1714 goto out;
1839 } 1715 }
1840 1716
1841 if (hd->resetPending) { 1717 if (ioc->timeouts < -1)
1842 retval = FAILED; 1718 ioc->timeouts++;
1843 goto out;
1844 }
1845
1846 if (hd->timeouts < -1)
1847 hd->timeouts++;
1848 1719
1849 if (mpt_fwfault_debug) 1720 if (mpt_fwfault_debug)
1850 mpt_halt_firmware(ioc); 1721 mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1861 1732
1862 hd->abortSCpnt = SCpnt; 1733 hd->abortSCpnt = SCpnt;
1863 1734
1864 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1735 retval = mptscsih_IssueTaskMgmt(hd,
1865 vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, 1736 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1866 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1737 vdevice->vtarget->channel,
1738 vdevice->vtarget->id, vdevice->lun,
1739 ctx2abort, mptscsih_get_tm_timeout(ioc));
1867 1740
1868 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1741 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
1869 SCpnt->serial_number == sn) 1742 SCpnt->serial_number == sn) {
1743 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1744 "task abort: command still in active list! (sc=%p)\n",
1745 ioc->name, SCpnt));
1870 retval = FAILED; 1746 retval = FAILED;
1747 } else {
1748 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1749 "task abort: command cleared from active list! (sc=%p)\n",
1750 ioc->name, SCpnt));
1751 retval = SUCCESS;
1752 }
1871 1753
1872 out: 1754 out:
1873 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1755 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
1874 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1756 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
1875 1757
1876 if (retval == 0) 1758 return retval;
1877 return SUCCESS;
1878 else
1879 return FAILED;
1880} 1759}
1881 1760
1882/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1761/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1909 ioc->name, SCpnt); 1788 ioc->name, SCpnt);
1910 scsi_print_command(SCpnt); 1789 scsi_print_command(SCpnt);
1911 1790
1912 if (hd->resetPending) {
1913 retval = FAILED;
1914 goto out;
1915 }
1916
1917 vdevice = SCpnt->device->hostdata; 1791 vdevice = SCpnt->device->hostdata;
1918 if (!vdevice || !vdevice->vtarget) { 1792 if (!vdevice || !vdevice->vtarget) {
1919 retval = 0; 1793 retval = SUCCESS;
1920 goto out; 1794 goto out;
1921 } 1795 }
1922 1796
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1927 goto out; 1801 goto out;
1928 } 1802 }
1929 1803
1930 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1804 retval = mptscsih_IssueTaskMgmt(hd,
1931 vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, 1805 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1932 mptscsih_get_tm_timeout(ioc)); 1806 vdevice->vtarget->channel,
1807 vdevice->vtarget->id, 0, 0,
1808 mptscsih_get_tm_timeout(ioc));
1933 1809
1934 out: 1810 out:
1935 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", 1811 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1972 ioc->name, SCpnt); 1848 ioc->name, SCpnt);
1973 scsi_print_command(SCpnt); 1849 scsi_print_command(SCpnt);
1974 1850
1975 if (hd->timeouts < -1) 1851 if (ioc->timeouts < -1)
1976 hd->timeouts++; 1852 ioc->timeouts++;
1977 1853
1978 vdevice = SCpnt->device->hostdata; 1854 vdevice = SCpnt->device->hostdata;
1979 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1855 if (!vdevice || !vdevice->vtarget)
1980 vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); 1856 return SUCCESS;
1857 retval = mptscsih_IssueTaskMgmt(hd,
1858 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1859 vdevice->vtarget->channel, 0, 0, 0,
1860 mptscsih_get_tm_timeout(ioc));
1981 1861
1982 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", 1862 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
1983 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1863 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
2001mptscsih_host_reset(struct scsi_cmnd *SCpnt) 1881mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2002{ 1882{
2003 MPT_SCSI_HOST * hd; 1883 MPT_SCSI_HOST * hd;
2004 int retval; 1884 int status = SUCCESS;
2005 MPT_ADAPTER *ioc; 1885 MPT_ADAPTER *ioc;
1886 int retval;
2006 1887
2007 /* If we can't locate the host to reset, then we failed. */ 1888 /* If we can't locate the host to reset, then we failed. */
2008 if ((hd = shost_priv(SCpnt->device->host)) == NULL){ 1889 if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2021 /* If our attempts to reset the host failed, then return a failed 1902 /* If our attempts to reset the host failed, then return a failed
2022 * status. The host will be taken off line by the SCSI mid-layer. 1903 * status. The host will be taken off line by the SCSI mid-layer.
2023 */ 1904 */
2024 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { 1905 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
2025 retval = FAILED; 1906 if (retval < 0)
2026 } else { 1907 status = FAILED;
2027 /* Make sure TM pending is cleared and TM state is set to 1908 else
2028 * NONE. 1909 status = SUCCESS;
2029 */
2030 retval = 0;
2031 hd->tmPending = 0;
2032 hd->tmState = TM_STATE_NONE;
2033 }
2034 1910
2035 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", 1911 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
2036 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1912 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
2037 1913
2038 return retval; 1914 return status;
2039} 1915}
2040 1916
2041/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2042/**
2043 * mptscsih_tm_pending_wait - wait for pending task management request to complete
2044 * @hd: Pointer to MPT host structure.
2045 *
2046 * Returns {SUCCESS,FAILED}.
2047 */
2048static int 1917static int
2049mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) 1918mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
1919 SCSITaskMgmtReply_t *pScsiTmReply)
2050{ 1920{
2051 unsigned long flags; 1921 u16 iocstatus;
2052 int loop_count = 4 * 10; /* Wait 10 seconds */ 1922 u32 termination_count;
2053 int status = FAILED; 1923 int retval;
2054 MPT_ADAPTER *ioc = hd->ioc;
2055 1924
2056 do { 1925 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
2057 spin_lock_irqsave(&ioc->FreeQlock, flags); 1926 retval = FAILED;
2058 if (hd->tmState == TM_STATE_NONE) { 1927 goto out;
2059 hd->tmState = TM_STATE_IN_PROGRESS; 1928 }
2060 hd->tmPending = 1;
2061 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2062 status = SUCCESS;
2063 break;
2064 }
2065 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2066 msleep(250);
2067 } while (--loop_count);
2068 1929
2069 return status; 1930 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2070}
2071 1931
2072/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1932 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2073/** 1933 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2074 * mptscsih_tm_wait_for_completion - wait for completion of TM task
2075 * @hd: Pointer to MPT host structure.
2076 * @timeout: timeout value
2077 *
2078 * Returns {SUCCESS,FAILED}.
2079 */
2080static int
2081mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
2082{
2083 unsigned long flags;
2084 int loop_count = 4 * timeout;
2085 int status = FAILED;
2086 MPT_ADAPTER *ioc = hd->ioc;
2087 1934
2088 do { 1935 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2089 spin_lock_irqsave(&ioc->FreeQlock, flags); 1936 "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
2090 if(hd->tmPending == 0) { 1937 "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
2091 status = SUCCESS; 1938 "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
2092 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1939 pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
2093 break; 1940 le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
2094 } 1941 termination_count));
2095 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2096 msleep(250);
2097 } while (--loop_count);
2098 1942
2099 return status; 1943 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
1944 pScsiTmReply->ResponseCode)
1945 mptscsih_taskmgmt_response_code(ioc,
1946 pScsiTmReply->ResponseCode);
1947
1948 if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
1949 retval = 0;
1950 goto out;
1951 }
1952
1953 retval = FAILED;
1954 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1955 if (termination_count == 1)
1956 retval = 0;
1957 goto out;
1958 }
1959
1960 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1961 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1962 retval = 0;
1963
1964 out:
1965 return retval;
2100} 1966}
2101 1967
2102/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2103static void 1969void
2104mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) 1970mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2105{ 1971{
2106 char *desc; 1972 char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2134 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", 2000 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
2135 ioc->name, response_code, desc); 2001 ioc->name, response_code, desc);
2136} 2002}
2003EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
2137 2004
2138/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2005/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2139/** 2006/**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2150 * Returns 1 indicating alloc'd request frame ptr should be freed. 2017 * Returns 1 indicating alloc'd request frame ptr should be freed.
2151 **/ 2018 **/
2152int 2019int
2153mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2020mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
2021 MPT_FRAME_HDR *mr)
2154{ 2022{
2155 SCSITaskMgmtReply_t *pScsiTmReply; 2023 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2156 SCSITaskMgmt_t *pScsiTmReq; 2024 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
2157 MPT_SCSI_HOST *hd;
2158 unsigned long flags;
2159 u16 iocstatus;
2160 u8 tmType;
2161 u32 termination_count;
2162
2163 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
2164 ioc->name, mf, mr));
2165 if (!ioc->sh) {
2166 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2167 "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
2168 return 1;
2169 }
2170
2171 if (mr == NULL) {
2172 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2173 "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
2174 return 1;
2175 }
2176
2177 hd = shost_priv(ioc->sh);
2178 pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
2179 pScsiTmReq = (SCSITaskMgmt_t*)mf;
2180 tmType = pScsiTmReq->TaskType;
2181 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2182 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2183 2025
2184 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && 2026 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2185 pScsiTmReply->ResponseCode)
2186 mptscsih_taskmgmt_response_code(ioc,
2187 pScsiTmReply->ResponseCode);
2188 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2189 2027
2190#ifdef CONFIG_FUSION_LOGGING 2028 if (!mr)
2191 if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
2192 (ioc->debug_level & MPT_DEBUG_TM ))
2193 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2194 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2195 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2196 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2197 le16_to_cpu(pScsiTmReply->IOCStatus),
2198 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
2199 le32_to_cpu(pScsiTmReply->TerminationCount));
2200#endif
2201 if (!iocstatus) {
2202 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
2203 hd->abortSCpnt = NULL;
2204 goto out; 2029 goto out;
2205 }
2206
2207 /* Error? (anything non-zero?) */
2208
2209 /* clear flags and continue.
2210 */
2211 switch (tmType) {
2212
2213 case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2214 if (termination_count == 1)
2215 iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
2216 hd->abortSCpnt = NULL;
2217 break;
2218
2219 case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
2220
2221 /* If an internal command is present
2222 * or the TM failed - reload the FW.
2223 * FC FW may respond FAILED to an ABORT
2224 */
2225 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
2226 hd->cmdPtr)
2227 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
2228 printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
2229 break;
2230
2231 case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2232 default:
2233 break;
2234 }
2235 2030
2031 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2032 memcpy(ioc->taskmgmt_cmds.reply, mr,
2033 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
2236 out: 2034 out:
2237 spin_lock_irqsave(&ioc->FreeQlock, flags); 2035 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
2238 hd->tmPending = 0; 2036 mpt_clear_taskmgmt_in_progress_flag(ioc);
2239 hd->tmState = TM_STATE_NONE; 2037 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2240 hd->tm_iocstatus = iocstatus; 2038 complete(&ioc->taskmgmt_cmds.done);
2241 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2039 return 1;
2242 2040 }
2243 return 1; 2041 return 0;
2244} 2042}
2245 2043
2246/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2044/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
2290mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) 2088mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2291{ 2089{
2292 struct inactive_raid_component_info *component_info; 2090 struct inactive_raid_component_info *component_info;
2293 int i; 2091 int i, j;
2092 RaidPhysDiskPage1_t *phys_disk;
2294 int rc = 0; 2093 int rc = 0;
2094 int num_paths;
2295 2095
2296 if (!ioc->raid_data.pIocPg3) 2096 if (!ioc->raid_data.pIocPg3)
2297 goto out; 2097 goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2303 } 2103 }
2304 } 2104 }
2305 2105
2106 if (ioc->bus_type != SAS)
2107 goto out;
2108
2109 /*
2110 * Check if dual path
2111 */
2112 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2113 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2114 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2115 if (num_paths < 2)
2116 continue;
2117 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2118 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2119 if (!phys_disk)
2120 continue;
2121 if ((mpt_raid_phys_disk_pg1(ioc,
2122 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2123 phys_disk))) {
2124 kfree(phys_disk);
2125 continue;
2126 }
2127 for (j = 0; j < num_paths; j++) {
2128 if ((phys_disk->Path[j].Flags &
2129 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2130 continue;
2131 if ((phys_disk->Path[j].Flags &
2132 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2133 continue;
2134 if ((id == phys_disk->Path[j].PhysDiskID) &&
2135 (channel == phys_disk->Path[j].PhysDiskBus)) {
2136 rc = 1;
2137 kfree(phys_disk);
2138 goto out;
2139 }
2140 }
2141 kfree(phys_disk);
2142 }
2143
2144
2306 /* 2145 /*
2307 * Check inactive list for matching phys disks 2146 * Check inactive list for matching phys disks
2308 */ 2147 */
@@ -2327,8 +2166,10 @@ u8
2327mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 2166mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2328{ 2167{
2329 struct inactive_raid_component_info *component_info; 2168 struct inactive_raid_component_info *component_info;
2330 int i; 2169 int i, j;
2170 RaidPhysDiskPage1_t *phys_disk;
2331 int rc = -ENXIO; 2171 int rc = -ENXIO;
2172 int num_paths;
2332 2173
2333 if (!ioc->raid_data.pIocPg3) 2174 if (!ioc->raid_data.pIocPg3)
2334 goto out; 2175 goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2340 } 2181 }
2341 } 2182 }
2342 2183
2184 if (ioc->bus_type != SAS)
2185 goto out;
2186
2187 /*
2188 * Check if dual path
2189 */
2190 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2191 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2192 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2193 if (num_paths < 2)
2194 continue;
2195 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2196 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2197 if (!phys_disk)
2198 continue;
2199 if ((mpt_raid_phys_disk_pg1(ioc,
2200 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2201 phys_disk))) {
2202 kfree(phys_disk);
2203 continue;
2204 }
2205 for (j = 0; j < num_paths; j++) {
2206 if ((phys_disk->Path[j].Flags &
2207 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2208 continue;
2209 if ((phys_disk->Path[j].Flags &
2210 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2211 continue;
2212 if ((id == phys_disk->Path[j].PhysDiskID) &&
2213 (channel == phys_disk->Path[j].PhysDiskBus)) {
2214 rc = phys_disk->PhysDiskNum;
2215 kfree(phys_disk);
2216 goto out;
2217 }
2218 }
2219 kfree(phys_disk);
2220 }
2221
2343 /* 2222 /*
2344 * Check inactive list for matching phys disks 2223 * Check inactive list for matching phys disks
2345 */ 2224 */
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2457 sdev->ppr, sdev->inquiry_len)); 2336 sdev->ppr, sdev->inquiry_len));
2458 2337
2459 vdevice->configured_lun = 1; 2338 vdevice->configured_lun = 1;
2460 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2461 2339
2462 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2340 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2463 "Queue depth=%d, tflags=%x\n", 2341 "Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2469 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2347 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2470 vtarget->minSyncFactor)); 2348 vtarget->minSyncFactor));
2471 2349
2350 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2472 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2351 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2473 "tagged %d, simple %d, ordered %d\n", 2352 "tagged %d, simple %d, ordered %d\n",
2474 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2353 ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2542} 2421}
2543 2422
2544/** 2423/**
2545 * mptscsih_get_scsi_lookup 2424 * mptscsih_get_scsi_lookup - retrieves scmd entry
2546 * @ioc: Pointer to MPT_ADAPTER structure 2425 * @ioc: Pointer to MPT_ADAPTER structure
2547 * @i: index into the array 2426 * @i: index into the array
2548 * 2427 *
2549 * retrieves scmd entry from ScsiLookup[] array list
2550 *
2551 * Returns the scsi_cmd pointer 2428 * Returns the scsi_cmd pointer
2552 **/ 2429 */
2553static struct scsi_cmnd * 2430struct scsi_cmnd *
2554mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) 2431mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2555{ 2432{
2556 unsigned long flags; 2433 unsigned long flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2562 2439
2563 return scmd; 2440 return scmd;
2564} 2441}
2442EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
2565 2443
2566/** 2444/**
2567 * mptscsih_getclear_scsi_lookup 2445 * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
2568 * @ioc: Pointer to MPT_ADAPTER structure 2446 * @ioc: Pointer to MPT_ADAPTER structure
2569 * @i: index into the array 2447 * @i: index into the array
2570 * 2448 *
2571 * retrieves and clears scmd entry from ScsiLookup[] array list
2572 *
2573 * Returns the scsi_cmd pointer 2449 * Returns the scsi_cmd pointer
2450 *
2574 **/ 2451 **/
2575static struct scsi_cmnd * 2452static struct scsi_cmnd *
2576mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) 2453mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
2635mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 2512mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2636{ 2513{
2637 MPT_SCSI_HOST *hd; 2514 MPT_SCSI_HOST *hd;
2638 unsigned long flags;
2639 2515
2640 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2641 ": IOC %s_reset routed to SCSI host driver!\n",
2642 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
2643 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
2644
2645 /* If a FW reload request arrives after base installed but
2646 * before all scsi hosts have been attached, then an alt_ioc
2647 * may have a NULL sh pointer.
2648 */
2649 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) 2516 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
2650 return 0; 2517 return 0;
2651 else
2652 hd = shost_priv(ioc->sh);
2653
2654 if (reset_phase == MPT_IOC_SETUP_RESET) {
2655 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
2656
2657 /* Clean Up:
2658 * 1. Set Hard Reset Pending Flag
2659 * All new commands go to doneQ
2660 */
2661 hd->resetPending = 1;
2662
2663 } else if (reset_phase == MPT_IOC_PRE_RESET) {
2664 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
2665 2518
2666 /* 2. Flush running commands 2519 hd = shost_priv(ioc->sh);
2667 * Clean ScsiLookup (and associated memory) 2520 switch (reset_phase) {
2668 * AND clean mytaskQ 2521 case MPT_IOC_SETUP_RESET:
2669 */ 2522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2670 2523 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
2671 /* 2b. Reply to OS all known outstanding I/O commands. 2524 break;
2672 */ 2525 case MPT_IOC_PRE_RESET:
2526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2527 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
2673 mptscsih_flush_running_cmds(hd); 2528 mptscsih_flush_running_cmds(hd);
2674 2529 break;
2675 /* 2c. If there was an internal command that 2530 case MPT_IOC_POST_RESET:
2676 * has not completed, configuration or io request, 2531 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2677 * free these resources. 2532 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
2678 */ 2533 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
2679 if (hd->cmdPtr) { 2534 ioc->internal_cmds.status |=
2680 del_timer(&hd->timer); 2535 MPT_MGMT_STATUS_DID_IOCRESET;
2681 mpt_free_msg_frame(ioc, hd->cmdPtr); 2536 complete(&ioc->internal_cmds.done);
2682 }
2683
2684 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
2685
2686 } else {
2687 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
2688
2689 /* Once a FW reload begins, all new OS commands are
2690 * redirected to the doneQ w/ a reset status.
2691 * Init all control structures.
2692 */
2693
2694 /* 2. Chain Buffer initialization
2695 */
2696
2697 /* 4. Renegotiate to all devices, if SPI
2698 */
2699
2700 /* 5. Enable new commands to be posted
2701 */
2702 spin_lock_irqsave(&ioc->FreeQlock, flags);
2703 hd->tmPending = 0;
2704 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2705 hd->resetPending = 0;
2706 hd->tmState = TM_STATE_NONE;
2707
2708 /* 6. If there was an internal command,
2709 * wake this process up.
2710 */
2711 if (hd->cmdPtr) {
2712 /*
2713 * Wake up the original calling thread
2714 */
2715 hd->pLocal = &hd->localReply;
2716 hd->pLocal->completion = MPT_SCANDV_DID_RESET;
2717 hd->scandv_wait_done = 1;
2718 wake_up(&hd->scandv_waitq);
2719 hd->cmdPtr = NULL;
2720 } 2537 }
2721 2538 break;
2722 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); 2539 default:
2723 2540 break;
2724 } 2541 }
2725
2726 return 1; /* currently means nothing really */ 2542 return 1; /* currently means nothing really */
2727} 2543}
2728 2544
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2730int 2546int
2731mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2547mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2732{ 2548{
2733 MPT_SCSI_HOST *hd;
2734 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2549 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2735 2550
2736 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2737 ioc->name, event)); 2552 "MPT event (=%02Xh) routed to SCSI host driver!\n",
2738 2553 ioc->name, event));
2739 if (ioc->sh == NULL ||
2740 ((hd = shost_priv(ioc->sh)) == NULL))
2741 return 1;
2742
2743 switch (event) {
2744 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2745 /* FIXME! */
2746 break;
2747 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2748 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2749 if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
2750 hd->soft_resets++;
2751 break;
2752 case MPI_EVENT_LOGOUT: /* 09 */
2753 /* FIXME! */
2754 break;
2755
2756 case MPI_EVENT_RESCAN: /* 06 */
2757 break;
2758
2759 /*
2760 * CHECKME! Don't think we need to do
2761 * anything for these, but...
2762 */
2763 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
2764 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
2765 /*
2766 * CHECKME! Falling thru...
2767 */
2768 break;
2769
2770 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2771 break;
2772 2554
2773 case MPI_EVENT_NONE: /* 00 */ 2555 if ((event == MPI_EVENT_IOC_BUS_RESET ||
2774 case MPI_EVENT_LOG_DATA: /* 01 */ 2556 event == MPI_EVENT_EXT_BUS_RESET) &&
2775 case MPI_EVENT_STATE_CHANGE: /* 02 */ 2557 (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
2776 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 2558 ioc->soft_resets++;
2777 default:
2778 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
2779 ioc->name, event));
2780 break;
2781 }
2782 2559
2783 return 1; /* currently means nothing really */ 2560 return 1; /* currently means nothing really */
2784} 2561}
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2809 * Used ONLY for DV and other internal commands. 2586 * Used ONLY for DV and other internal commands.
2810 */ 2587 */
2811int 2588int
2812mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2589mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2590 MPT_FRAME_HDR *reply)
2813{ 2591{
2814 MPT_SCSI_HOST *hd;
2815 SCSIIORequest_t *pReq; 2592 SCSIIORequest_t *pReq;
2816 int completionCode; 2593 SCSIIOReply_t *pReply;
2594 u8 cmd;
2817 u16 req_idx; 2595 u16 req_idx;
2596 u8 *sense_data;
2597 int sz;
2818 2598
2819 hd = shost_priv(ioc->sh); 2599 ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2820 2600 ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
2821 if ((mf == NULL) || 2601 if (!reply)
2822 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { 2602 goto out;
2823 printk(MYIOC_s_ERR_FMT
2824 "ScanDvComplete, %s req frame ptr! (=%p)\n",
2825 ioc->name, mf?"BAD":"NULL", (void *) mf);
2826 goto wakeup;
2827 }
2828
2829 del_timer(&hd->timer);
2830 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
2831 mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
2832 pReq = (SCSIIORequest_t *) mf;
2833 2603
2834 if (mf != hd->cmdPtr) { 2604 pReply = (SCSIIOReply_t *) reply;
2835 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", 2605 pReq = (SCSIIORequest_t *) req;
2836 ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2606 ioc->internal_cmds.completion_code =
2607 mptscsih_get_completion_code(ioc, req, reply);
2608 ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2609 memcpy(ioc->internal_cmds.reply, reply,
2610 min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
2611 cmd = reply->u.hdr.Function;
2612 if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
2613 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
2614 (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
2615 req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
2616 sense_data = ((u8 *)ioc->sense_buf_pool +
2617 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2618 sz = min_t(int, pReq->SenseBufferLength,
2619 MPT_SENSE_BUFFER_ALLOC);
2620 memcpy(ioc->internal_cmds.sense, sense_data, sz);
2837 } 2621 }
2838 hd->cmdPtr = NULL; 2622 out:
2839 2623 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
2840 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", 2624 return 0;
2841 ioc->name, mf, mr, req_idx)); 2625 ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2842 2626 complete(&ioc->internal_cmds.done);
2843 hd->pLocal = &hd->localReply;
2844 hd->pLocal->scsiStatus = 0;
2845
2846 /* If target struct exists, clear sense valid flag.
2847 */
2848 if (mr == NULL) {
2849 completionCode = MPT_SCANDV_GOOD;
2850 } else {
2851 SCSIIOReply_t *pReply;
2852 u16 status;
2853 u8 scsi_status;
2854
2855 pReply = (SCSIIOReply_t *) mr;
2856
2857 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2858 scsi_status = pReply->SCSIStatus;
2859
2860
2861 switch(status) {
2862
2863 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2864 completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
2865 break;
2866
2867 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2868 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2869 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2870 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2871 completionCode = MPT_SCANDV_DID_RESET;
2872 break;
2873
2874 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2875 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2876 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2877 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2878 ConfigReply_t *pr = (ConfigReply_t *)mr;
2879 completionCode = MPT_SCANDV_GOOD;
2880 hd->pLocal->header.PageVersion = pr->Header.PageVersion;
2881 hd->pLocal->header.PageLength = pr->Header.PageLength;
2882 hd->pLocal->header.PageNumber = pr->Header.PageNumber;
2883 hd->pLocal->header.PageType = pr->Header.PageType;
2884
2885 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2886 /* If the RAID Volume request is successful,
2887 * return GOOD, else indicate that
2888 * some type of error occurred.
2889 */
2890 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
2891 if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
2892 completionCode = MPT_SCANDV_GOOD;
2893 else
2894 completionCode = MPT_SCANDV_SOME_ERROR;
2895 memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
2896
2897 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
2898 u8 *sense_data;
2899 int sz;
2900
2901 /* save sense data in global structure
2902 */
2903 completionCode = MPT_SCANDV_SENSE;
2904 hd->pLocal->scsiStatus = scsi_status;
2905 sense_data = ((u8 *)ioc->sense_buf_pool +
2906 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2907
2908 sz = min_t(int, pReq->SenseBufferLength,
2909 SCSI_STD_SENSE_BYTES);
2910 memcpy(hd->pLocal->sense, sense_data, sz);
2911
2912 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n",
2913 ioc->name, sense_data));
2914 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2915 if (pReq->CDB[0] == INQUIRY)
2916 completionCode = MPT_SCANDV_ISSUE_SENSE;
2917 else
2918 completionCode = MPT_SCANDV_DID_RESET;
2919 }
2920 else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2921 completionCode = MPT_SCANDV_DID_RESET;
2922 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2923 completionCode = MPT_SCANDV_DID_RESET;
2924 else {
2925 completionCode = MPT_SCANDV_GOOD;
2926 hd->pLocal->scsiStatus = scsi_status;
2927 }
2928 break;
2929
2930 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2931 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2932 completionCode = MPT_SCANDV_DID_RESET;
2933 else
2934 completionCode = MPT_SCANDV_SOME_ERROR;
2935 break;
2936
2937 default:
2938 completionCode = MPT_SCANDV_SOME_ERROR;
2939 break;
2940
2941 } /* switch(status) */
2942
2943 } /* end of address reply case */
2944
2945 hd->pLocal->completion = completionCode;
2946
2947 /* MF and RF are freed in mpt_interrupt
2948 */
2949wakeup:
2950 /* Free Chain buffers (will never chain) in scan or dv */
2951 //mptscsih_freeChainBuffers(ioc, req_idx);
2952
2953 /*
2954 * Wake up the original calling thread
2955 */
2956 hd->scandv_wait_done = 1;
2957 wake_up(&hd->scandv_waitq);
2958
2959 return 1; 2627 return 1;
2960} 2628}
2961 2629
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
3004 return; 2672 return;
3005} 2673}
3006 2674
2675/**
2676 * mptscsih_get_completion_code -
2677 * @ioc: Pointer to MPT_ADAPTER structure
2678 * @req: Pointer to original MPT request frame
2679 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
2680 *
2681 **/
2682static int
2683mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2684 MPT_FRAME_HDR *reply)
2685{
2686 SCSIIOReply_t *pReply;
2687 MpiRaidActionReply_t *pr;
2688 u8 scsi_status;
2689 u16 status;
2690 int completion_code;
2691
2692 pReply = (SCSIIOReply_t *)reply;
2693 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2694 scsi_status = pReply->SCSIStatus;
2695
2696 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2697 "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
2698 "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
2699 scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
2700
2701 switch (status) {
2702
2703 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2704 completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
2705 break;
2706
2707 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2708 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2709 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2710 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2711 completion_code = MPT_SCANDV_DID_RESET;
2712 break;
2713
2714 case MPI_IOCSTATUS_BUSY:
2715 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2716 completion_code = MPT_SCANDV_BUSY;
2717 break;
2718
2719 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2720 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2721 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2722 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2723 completion_code = MPT_SCANDV_GOOD;
2724 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2725 pr = (MpiRaidActionReply_t *)reply;
2726 if (le16_to_cpu(pr->ActionStatus) ==
2727 MPI_RAID_ACTION_ASTATUS_SUCCESS)
2728 completion_code = MPT_SCANDV_GOOD;
2729 else
2730 completion_code = MPT_SCANDV_SOME_ERROR;
2731 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
2732 completion_code = MPT_SCANDV_SENSE;
2733 else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2734 if (req->u.scsireq.CDB[0] == INQUIRY)
2735 completion_code = MPT_SCANDV_ISSUE_SENSE;
2736 else
2737 completion_code = MPT_SCANDV_DID_RESET;
2738 } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2739 completion_code = MPT_SCANDV_DID_RESET;
2740 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2741 completion_code = MPT_SCANDV_DID_RESET;
2742 else if (scsi_status == MPI_SCSI_STATUS_BUSY)
2743 completion_code = MPT_SCANDV_BUSY;
2744 else
2745 completion_code = MPT_SCANDV_GOOD;
2746 break;
2747
2748 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2749 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2750 completion_code = MPT_SCANDV_DID_RESET;
2751 else
2752 completion_code = MPT_SCANDV_SOME_ERROR;
2753 break;
2754 default:
2755 completion_code = MPT_SCANDV_SOME_ERROR;
2756 break;
2757
2758 } /* switch(status) */
2759
2760 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2761 " completionCode set to %08xh\n", ioc->name, completion_code));
2762 return completion_code;
2763}
3007 2764
3008/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2765/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3009/** 2766/**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3030{ 2787{
3031 MPT_FRAME_HDR *mf; 2788 MPT_FRAME_HDR *mf;
3032 SCSIIORequest_t *pScsiReq; 2789 SCSIIORequest_t *pScsiReq;
3033 SCSIIORequest_t ReqCopy;
3034 int my_idx, ii, dir; 2790 int my_idx, ii, dir;
3035 int rc, cmdTimeout; 2791 int timeout;
3036 int in_isr;
3037 char cmdLen; 2792 char cmdLen;
3038 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; 2793 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3039 char cmd = io->cmd; 2794 u8 cmd = io->cmd;
3040 MPT_ADAPTER *ioc = hd->ioc; 2795 MPT_ADAPTER *ioc = hd->ioc;
2796 int ret = 0;
2797 unsigned long timeleft;
2798 unsigned long flags;
3041 2799
3042 in_isr = in_interrupt(); 2800 /* don't send internal command during diag reset */
3043 if (in_isr) { 2801 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3044 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2802 if (ioc->ioc_reset_in_progress) {
3045 ioc->name)); 2803 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3046 return -EPERM; 2804 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2805 "%s: busy with host reset\n", ioc->name, __func__));
2806 return MPT_SCANDV_BUSY;
3047 } 2807 }
2808 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3048 2809
2810 mutex_lock(&ioc->internal_cmds.mutex);
3049 2811
3050 /* Set command specific information 2812 /* Set command specific information
3051 */ 2813 */
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3055 dir = MPI_SCSIIO_CONTROL_READ; 2817 dir = MPI_SCSIIO_CONTROL_READ;
3056 CDB[0] = cmd; 2818 CDB[0] = cmd;
3057 CDB[4] = io->size; 2819 CDB[4] = io->size;
3058 cmdTimeout = 10; 2820 timeout = 10;
3059 break; 2821 break;
3060 2822
3061 case TEST_UNIT_READY: 2823 case TEST_UNIT_READY:
3062 cmdLen = 6; 2824 cmdLen = 6;
3063 dir = MPI_SCSIIO_CONTROL_READ; 2825 dir = MPI_SCSIIO_CONTROL_READ;
3064 cmdTimeout = 10; 2826 timeout = 10;
3065 break; 2827 break;
3066 2828
3067 case START_STOP: 2829 case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3069 dir = MPI_SCSIIO_CONTROL_READ; 2831 dir = MPI_SCSIIO_CONTROL_READ;
3070 CDB[0] = cmd; 2832 CDB[0] = cmd;
3071 CDB[4] = 1; /*Spin up the disk */ 2833 CDB[4] = 1; /*Spin up the disk */
3072 cmdTimeout = 15; 2834 timeout = 15;
3073 break; 2835 break;
3074 2836
3075 case REQUEST_SENSE: 2837 case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3077 CDB[0] = cmd; 2839 CDB[0] = cmd;
3078 CDB[4] = io->size; 2840 CDB[4] = io->size;
3079 dir = MPI_SCSIIO_CONTROL_READ; 2841 dir = MPI_SCSIIO_CONTROL_READ;
3080 cmdTimeout = 10; 2842 timeout = 10;
3081 break; 2843 break;
3082 2844
3083 case READ_BUFFER: 2845 case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3096 CDB[6] = (io->size >> 16) & 0xFF; 2858 CDB[6] = (io->size >> 16) & 0xFF;
3097 CDB[7] = (io->size >> 8) & 0xFF; 2859 CDB[7] = (io->size >> 8) & 0xFF;
3098 CDB[8] = io->size & 0xFF; 2860 CDB[8] = io->size & 0xFF;
3099 cmdTimeout = 10; 2861 timeout = 10;
3100 break; 2862 break;
3101 2863
3102 case WRITE_BUFFER: 2864 case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3111 CDB[6] = (io->size >> 16) & 0xFF; 2873 CDB[6] = (io->size >> 16) & 0xFF;
3112 CDB[7] = (io->size >> 8) & 0xFF; 2874 CDB[7] = (io->size >> 8) & 0xFF;
3113 CDB[8] = io->size & 0xFF; 2875 CDB[8] = io->size & 0xFF;
3114 cmdTimeout = 10; 2876 timeout = 10;
3115 break; 2877 break;
3116 2878
3117 case RESERVE: 2879 case RESERVE:
3118 cmdLen = 6; 2880 cmdLen = 6;
3119 dir = MPI_SCSIIO_CONTROL_READ; 2881 dir = MPI_SCSIIO_CONTROL_READ;
3120 CDB[0] = cmd; 2882 CDB[0] = cmd;
3121 cmdTimeout = 10; 2883 timeout = 10;
3122 break; 2884 break;
3123 2885
3124 case RELEASE: 2886 case RELEASE:
3125 cmdLen = 6; 2887 cmdLen = 6;
3126 dir = MPI_SCSIIO_CONTROL_READ; 2888 dir = MPI_SCSIIO_CONTROL_READ;
3127 CDB[0] = cmd; 2889 CDB[0] = cmd;
3128 cmdTimeout = 10; 2890 timeout = 10;
3129 break; 2891 break;
3130 2892
3131 case SYNCHRONIZE_CACHE: 2893 case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3133 dir = MPI_SCSIIO_CONTROL_READ; 2895 dir = MPI_SCSIIO_CONTROL_READ;
3134 CDB[0] = cmd; 2896 CDB[0] = cmd;
3135// CDB[1] = 0x02; /* set immediate bit */ 2897// CDB[1] = 0x02; /* set immediate bit */
3136 cmdTimeout = 10; 2898 timeout = 10;
3137 break; 2899 break;
3138 2900
3139 default: 2901 default:
3140 /* Error Case */ 2902 /* Error Case */
3141 return -EFAULT; 2903 ret = -EFAULT;
2904 goto out;
3142 } 2905 }
3143 2906
3144 /* Get and Populate a free Frame 2907 /* Get and Populate a free Frame
2908 * MsgContext set in mpt_get_msg_frame call
3145 */ 2909 */
3146 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 2910 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
3147 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 2911 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
3148 ioc->name)); 2912 ioc->name, __func__));
3149 return -EBUSY; 2913 ret = MPT_SCANDV_BUSY;
2914 goto out;
3150 } 2915 }
3151 2916
3152 pScsiReq = (SCSIIORequest_t *) mf; 2917 pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3172 2937
3173 pScsiReq->Reserved = 0; 2938 pScsiReq->Reserved = 0;
3174 2939
3175 pScsiReq->MsgFlags = mpt_msg_flags(); 2940 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
3176 /* MsgContext set in mpt_get_msg_fram call */ 2941 /* MsgContext set in mpt_get_msg_fram call */
3177 2942
3178 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); 2943 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3184 2949
3185 if (cmd == REQUEST_SENSE) { 2950 if (cmd == REQUEST_SENSE) {
3186 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); 2951 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
3187 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 2952 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3188 ioc->name, cmd)); 2953 "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
3189 } 2954 }
3190 2955
3191 for (ii=0; ii < 16; ii++) 2956 for (ii = 0; ii < 16; ii++)
3192 pScsiReq->CDB[ii] = CDB[ii]; 2957 pScsiReq->CDB[ii] = CDB[ii];
3193 2958
3194 pScsiReq->DataLength = cpu_to_le32(io->size); 2959 pScsiReq->DataLength = cpu_to_le32(io->size);
3195 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 2960 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
3196 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 2961 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
3197 2962
3198 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 2963 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3199 ioc->name, cmd, io->channel, io->id, io->lun)); 2964 "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
2965 ioc->name, __func__, cmd, io->channel, io->id, io->lun));
3200 2966
3201 if (dir == MPI_SCSIIO_CONTROL_READ) { 2967 if (dir == MPI_SCSIIO_CONTROL_READ)
3202 mpt_add_sge((char *) &pScsiReq->SGL, 2968 ioc->add_sge((char *) &pScsiReq->SGL,
3203 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, 2969 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
3204 io->data_dma); 2970 else
3205 } else { 2971 ioc->add_sge((char *) &pScsiReq->SGL,
3206 mpt_add_sge((char *) &pScsiReq->SGL, 2972 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
3207 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
3208 io->data_dma);
3209 }
3210
3211 /* The ISR will free the request frame, but we need
3212 * the information to initialize the target. Duplicate.
3213 */
3214 memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
3215
3216 /* Issue this command after:
3217 * finish init
3218 * add timer
3219 * Wait until the reply has been received
3220 * ScsiScanDvCtx callback function will
3221 * set hd->pLocal;
3222 * set scandv_wait_done and call wake_up
3223 */
3224 hd->pLocal = NULL;
3225 hd->timer.expires = jiffies + HZ*cmdTimeout;
3226 hd->scandv_wait_done = 0;
3227
3228 /* Save cmd pointer, for resource free if timeout or
3229 * FW reload occurs
3230 */
3231 hd->cmdPtr = mf;
3232 2973
3233 add_timer(&hd->timer); 2974 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
3234 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 2975 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
3235 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 2976 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
3236 2977 timeout*HZ);
3237 if (hd->pLocal) { 2978 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
3238 rc = hd->pLocal->completion; 2979 ret = MPT_SCANDV_DID_RESET;
3239 hd->pLocal->skip = 0; 2980 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3240 2981 "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
3241 /* Always set fatal error codes in some cases. 2982 cmd));
3242 */ 2983 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
3243 if (rc == MPT_SCANDV_SELECTION_TIMEOUT) 2984 mpt_free_msg_frame(ioc, mf);
3244 rc = -ENXIO; 2985 goto out;
3245 else if (rc == MPT_SCANDV_SOME_ERROR) 2986 }
3246 rc = -rc; 2987 if (!timeleft) {
3247 } else { 2988 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
3248 rc = -EFAULT; 2989 ioc->name, __func__);
3249 /* This should never happen. */ 2990 mpt_HardResetHandler(ioc, CAN_SLEEP);
3250 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 2991 mpt_free_msg_frame(ioc, mf);
3251 ioc->name)); 2992 }
2993 goto out;
3252 } 2994 }
3253 2995
3254 return rc; 2996 ret = ioc->internal_cmds.completion_code;
2997 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
2998 ioc->name, __func__, ret));
2999
3000 out:
3001 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
3002 mutex_unlock(&ioc->internal_cmds.mutex);
3003 return ret;
3255} 3004}
3256 3005
3257/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3006/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
3491 &dev_attr_debug_level, 3240 &dev_attr_debug_level,
3492 NULL, 3241 NULL,
3493}; 3242};
3243
3494EXPORT_SYMBOL(mptscsih_host_attrs); 3244EXPORT_SYMBOL(mptscsih_host_attrs);
3495 3245
3496EXPORT_SYMBOL(mptscsih_remove); 3246EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
3516EXPORT_SYMBOL(mptscsih_ioc_reset); 3266EXPORT_SYMBOL(mptscsih_ioc_reset);
3517EXPORT_SYMBOL(mptscsih_change_queue_depth); 3267EXPORT_SYMBOL(mptscsih_change_queue_depth);
3518EXPORT_SYMBOL(mptscsih_timer_expired); 3268EXPORT_SYMBOL(mptscsih_timer_expired);
3519EXPORT_SYMBOL(mptscsih_TMHandler);
3520 3269
3521/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3270/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 319aa3033371..eb3f677528ac 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -60,6 +60,7 @@
60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) 60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
61#define MPT_SCANDV_ISSUE_SENSE (0x00000010) 61#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
62#define MPT_SCANDV_FALLBACK (0x00000020) 62#define MPT_SCANDV_FALLBACK (0x00000020)
63#define MPT_SCANDV_BUSY (0x00000040)
63 64
64#define MPT_SCANDV_MAX_RETRIES (10) 65#define MPT_SCANDV_MAX_RETRIES (10)
65 66
@@ -89,6 +90,7 @@
89 90
90#endif 91#endif
91 92
93
92typedef struct _internal_cmd { 94typedef struct _internal_cmd {
93 char *data; /* data pointer */ 95 char *data; /* data pointer */
94 dma_addr_t data_dma; /* data dma address */ 96 dma_addr_t data_dma; /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
112extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); 114extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
113extern const char * mptscsih_info(struct Scsi_Host *SChost); 115extern const char * mptscsih_info(struct Scsi_Host *SChost);
114extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); 116extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
117extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
118 u8 id, int lun, int ctx2abort, ulong timeout);
115extern void mptscsih_slave_destroy(struct scsi_device *device); 119extern void mptscsih_slave_destroy(struct scsi_device *device);
116extern int mptscsih_slave_configure(struct scsi_device *device); 120extern int mptscsih_slave_configure(struct scsi_device *device);
117extern int mptscsih_abort(struct scsi_cmnd * SCpnt); 121extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
126extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
127extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
128extern void mptscsih_timer_expired(unsigned long data); 132extern void mptscsih_timer_expired(unsigned long data);
129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
132extern struct device_attribute *mptscsih_host_attrs[]; 135extern struct device_attribute *mptscsih_host_attrs[];
136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 61620144e49c..c5b808fd55ba 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | 300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
301 (IOCPage4Ptr->Header.PageLength + ii) * 4; 301 (IOCPage4Ptr->Header.PageLength + ii) * 4;
302 302
303 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); 303 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
304 304
305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", 306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; 614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
615} 615}
616 616
617static int 617int
618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) 618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
619{ 619{
620 MPT_ADAPTER *ioc = hd->ioc;
620 MpiRaidActionRequest_t *pReq; 621 MpiRaidActionRequest_t *pReq;
621 MPT_FRAME_HDR *mf; 622 MPT_FRAME_HDR *mf;
622 MPT_ADAPTER *ioc = hd->ioc; 623 int ret;
624 unsigned long timeleft;
625
626 mutex_lock(&ioc->internal_cmds.mutex);
623 627
624 /* Get and Populate a free Frame 628 /* Get and Populate a free Frame
625 */ 629 */
626 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 630 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
627 ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 631 dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
628 ioc->name)); 632 "%s: no msg frames!\n", ioc->name, __func__));
629 return -EAGAIN; 633 ret = -EAGAIN;
634 goto out;
630 } 635 }
631 pReq = (MpiRaidActionRequest_t *)mf; 636 pReq = (MpiRaidActionRequest_t *)mf;
632 if (quiesce) 637 if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
643 pReq->Reserved2 = 0; 648 pReq->Reserved2 = 0;
644 pReq->ActionDataWord = 0; /* Reserved for this action */ 649 pReq->ActionDataWord = 0; /* Reserved for this action */
645 650
646 mpt_add_sge((char *)&pReq->ActionDataSGE, 651 ioc->add_sge((char *)&pReq->ActionDataSGE,
647 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); 652 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
648 653
649 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 654 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
650 ioc->name, pReq->Action, channel, id)); 655 ioc->name, pReq->Action, channel, id));
651 656
652 hd->pLocal = NULL; 657 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
653 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
654 hd->scandv_wait_done = 0;
655
656 /* Save cmd pointer, for resource free if timeout or
657 * FW reload occurs
658 */
659 hd->cmdPtr = mf;
660
661 add_timer(&hd->timer);
662 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 658 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
663 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 659 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
660 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
661 ret = -ETIME;
662 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
663 ioc->name, __func__));
664 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
665 goto out;
666 if (!timeleft) {
667 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
668 ioc->name, __func__);
669 mpt_HardResetHandler(ioc, CAN_SLEEP);
670 mpt_free_msg_frame(ioc, mf);
671 }
672 goto out;
673 }
664 674
665 if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) 675 ret = ioc->internal_cmds.completion_code;
666 return -1;
667 676
668 return 0; 677 out:
678 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
679 mutex_unlock(&ioc->internal_cmds.mutex);
680 return ret;
669} 681}
670 682
671static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, 683static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1423 * A slightly different algorithm is required for 1435 * A slightly different algorithm is required for
1424 * 64bit SGEs. 1436 * 64bit SGEs.
1425 */ 1437 */
1426 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1438 scale = ioc->req_sz/ioc->SGE_size;
1427 if (sizeof(dma_addr_t) == sizeof(u64)) { 1439 if (ioc->sg_addr_size == sizeof(u64)) {
1428 numSGE = (scale - 1) * 1440 numSGE = (scale - 1) *
1429 (ioc->facts.MaxChainDepth-1) + scale + 1441 (ioc->facts.MaxChainDepth-1) + scale +
1430 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1442 (ioc->req_sz - 60) / ioc->SGE_size;
1431 sizeof(u32));
1432 } else { 1443 } else {
1433 numSGE = 1 + (scale - 1) * 1444 numSGE = 1 + (scale - 1) *
1434 (ioc->facts.MaxChainDepth-1) + scale + 1445 (ioc->facts.MaxChainDepth-1) + scale +
1435 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1446 (ioc->req_sz - 64) / ioc->SGE_size;
1436 sizeof(u32));
1437 } 1447 }
1438 1448
1439 if (numSGE < sh->sg_tablesize) { 1449 if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1464 1474
1465 /* Clear the TM flags 1475 /* Clear the TM flags
1466 */ 1476 */
1467 hd->tmPending = 0;
1468 hd->tmState = TM_STATE_NONE;
1469 hd->resetPending = 0;
1470 hd->abortSCpnt = NULL; 1477 hd->abortSCpnt = NULL;
1471 1478
1472 /* Clear the pointer used to store 1479 /* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1493 mpt_saf_te)); 1500 mpt_saf_te));
1494 ioc->spi_data.noQas = 0; 1501 ioc->spi_data.noQas = 0;
1495 1502
1496 init_waitqueue_head(&hd->scandv_waitq);
1497 hd->scandv_wait_done = 0;
1498 hd->last_queue_full = 0; 1503 hd->last_queue_full = 0;
1499 hd->spi_pending = 0; 1504 hd->spi_pending = 0;
1500 1505
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1514 * issue internal bus reset 1519 * issue internal bus reset
1515 */ 1520 */
1516 if (ioc->spi_data.bus_reset) 1521 if (ioc->spi_data.bus_reset)
1517 mptscsih_TMHandler(hd, 1522 mptscsih_IssueTaskMgmt(hd,
1518 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1523 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1519 0, 0, 0, 0, 5); 1524 0, 0, 0, 0, 5);
1520 1525
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc41..335d4c78a775 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (blk_end_request(req, error, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes))
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431
432 if (blk_pc_request(req))
433 leftover = req->data_len;
434
435 if (error) 430 if (error)
436 blk_end_request(req, -EIO, leftover); 431 blk_end_request_all(req, -EIO);
437 }
438 432
439 spin_lock_irqsave(q->queue_lock, flags); 433 spin_lock_irqsave(q->queue_lock, flags);
440 434
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 755 break;
762 756
763 case CACHE_SMARTFETCH: 757 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 758 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 759 ctl_flags = 0x201F0008;
766 else 760 else
767 ctl_flags = 0x001F0000; 761 ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 775 ctl_flags = 0x001F0010;
782 break; 776 break;
783 case CACHE_SMARTBACK: 777 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 778 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 779 ctl_flags = 0x001F0004;
786 else 780 else
787 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
788 break; 782 break;
789 case CACHE_SMARTTHROUGH: 783 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
792 else 786 else
793 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
800 if (c->adaptec) { 794 if (c->adaptec) {
801 u8 cmd[10]; 795 u8 cmd[10];
802 u32 scsi_flags; 796 u32 scsi_flags;
803 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 797 u16 hwsec;
804 798
799 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
805 memset(cmd, 0, 10); 800 memset(cmd, 0, 10);
806 801
807 sgl_offset = SGL_OFFSET_12; 802 sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
827 822
828 *mptr++ = cpu_to_le32(scsi_flags); 823 *mptr++ = cpu_to_le32(scsi_flags);
829 824
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 825 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 826 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 827
833 memcpy(mptr, cmd, 10); 828 memcpy(mptr, cmd, 10);
834 mptr += 4; 829 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 830 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
836 } else 831 } else
837#endif 832#endif
838 { 833 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 834 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 835 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 836 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
842 *mptr++ = 837 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 838 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 839 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 840 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 841 }
847 842
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 843 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
883 struct request *req; 878 struct request *req;
884 879
885 while (!blk_queue_plugged(q)) { 880 while (!blk_queue_plugged(q)) {
886 req = elv_next_request(q); 881 req = blk_peek_request(q);
887 if (!req) 882 if (!req)
888 break; 883 break;
889 884
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
896 891
897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 892 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
898 if (!i2o_block_transfer(req)) { 893 if (!i2o_block_transfer(req)) {
899 blkdev_dequeue_request(req); 894 blk_start_request(req);
900 continue; 895 continue;
901 } else 896 } else
902 osm_info("transfer error\n"); 897 osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
922 blk_stop_queue(q); 917 blk_stop_queue(q);
923 break; 918 break;
924 } 919 }
925 } else 920 } else {
926 end_request(req, 0); 921 blk_start_request(req);
922 __blk_end_request_all(req, -EIO);
923 }
927 } 924 }
928}; 925};
929 926
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
1082 */ 1079 */
1083 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1084 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1085 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1082 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1086 } else 1083 } else
1087 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1088 1085