aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas23
-rw-r--r--drivers/message/fusion/mptbase.c115
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptctl.c241
-rw-r--r--drivers/message/fusion/mptctl.h4
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c76
-rw-r--r--drivers/s390/scsi/zfcp_def.h13
-rw-r--r--drivers/s390/scsi/zfcp_erp.c82
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c80
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c4
-rw-r--r--drivers/scsi/3w-9xxx.c7
-rw-r--r--drivers/scsi/aacraid/aachba.c217
-rw-r--r--drivers/scsi/aacraid/aacraid.h18
-rw-r--r--drivers/scsi/aacraid/commctrl.c22
-rw-r--r--drivers/scsi/aacraid/comminit.c12
-rw-r--r--drivers/scsi/aacraid/commsup.c50
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c50
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/ipr.c49
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/iscsi_tcp.c78
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c101
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h53
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c276
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h44
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c963
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c260
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--include/scsi/iscsi_if.h3
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h34
48 files changed, 2388 insertions, 812 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index f8c16cbf56ba..2dafa63bd370 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,26 @@
11 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.04
33 Older Version : 00.00.02.04
4
5i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added.
6 During initialization, depending on the device id, the template members
7 are initialized with function pointers specific to the ppc or
8 xscale controllers.
9
10 -Sumant Patro <Sumant.Patro@lsil.com>
11
121 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro
13 <Sumant.Patro@lsil.com>
142 Current Version : 00.00.02.04
153 Older Version : 00.00.02.02
16i. Register 16 byte CDB capability with scsi midlayer
17
18 "Ths patch properly registers the 16 byte command length capability of the
19 megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas
20 hardware supports 16 byte CDB's."
21
22 -Joshua Giles <joshua_giles@dell.com>
23
11 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 241 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.02 252 Current Version : 00.00.02.02
33 Older Version : 00.00.02.01 263 Older Version : 00.00.02.01
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9a2c7605d49c..642a61b6d0a4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -452,8 +452,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 452 } else if (func == MPI_FUNCTION_EVENT_ACK) {
453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n",
454 ioc->name)); 454 ioc->name));
455 } else if (func == MPI_FUNCTION_CONFIG || 455 } else if (func == MPI_FUNCTION_CONFIG) {
456 func == MPI_FUNCTION_TOOLBOX) {
457 CONFIGPARMS *pCfg; 456 CONFIGPARMS *pCfg;
458 unsigned long flags; 457 unsigned long flags;
459 458
@@ -5327,115 +5326,6 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5327} 5326}
5328 5327
5329/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5328/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5330/**
5331 * mpt_toolbox - Generic function to issue toolbox message
5332 * @ioc - Pointer to an adapter structure
5333 * @cfg - Pointer to a toolbox structure. Struct contains
5334 * action, page address, direction, physical address
5335 * and pointer to a configuration page header
5336 * Page header is updated.
5337 *
5338 * Returns 0 for success
5339 * -EPERM if not allowed due to ISR context
5340 * -EAGAIN if no msg frames currently available
5341 * -EFAULT for non-successful reply or no reply (timeout)
5342 */
5343int
5344mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5345{
5346 ToolboxIstwiReadWriteRequest_t *pReq;
5347 MPT_FRAME_HDR *mf;
5348 struct pci_dev *pdev;
5349 unsigned long flags;
5350 int rc;
5351 u32 flagsLength;
5352 int in_isr;
5353
5354 /* Prevent calling wait_event() (below), if caller happens
5355 * to be in ISR context, because that is fatal!
5356 */
5357 in_isr = in_interrupt();
5358 if (in_isr) {
5359 dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n",
5360 ioc->name));
5361 return -EPERM;
5362 }
5363
5364 /* Get and Populate a free Frame
5365 */
5366 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5367 dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n",
5368 ioc->name));
5369 return -EAGAIN;
5370 }
5371 pReq = (ToolboxIstwiReadWriteRequest_t *)mf;
5372 pReq->Tool = pCfg->action;
5373 pReq->Reserved = 0;
5374 pReq->ChainOffset = 0;
5375 pReq->Function = MPI_FUNCTION_TOOLBOX;
5376 pReq->Reserved1 = 0;
5377 pReq->Reserved2 = 0;
5378 pReq->MsgFlags = 0;
5379 pReq->Flags = pCfg->dir;
5380 pReq->BusNum = 0;
5381 pReq->Reserved3 = 0;
5382 pReq->NumAddressBytes = 0x01;
5383 pReq->Reserved4 = 0;
5384 pReq->DataLength = cpu_to_le16(0x04);
5385 pdev = ioc->pcidev;
5386 if (pdev->devfn & 1)
5387 pReq->DeviceAddr = 0xB2;
5388 else
5389 pReq->DeviceAddr = 0xB0;
5390 pReq->Addr1 = 0;
5391 pReq->Addr2 = 0;
5392 pReq->Addr3 = 0;
5393 pReq->Reserved5 = 0;
5394
5395 /* Add a SGE to the config request.
5396 */
5397
5398 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4;
5399
5400 mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr);
5401
5402 dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n",
5403 ioc->name, pReq->Tool));
5404
5405 /* Append pCfg pointer to end of mf
5406 */
5407 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5408
5409 /* Initalize the timer
5410 */
5411 init_timer(&pCfg->timer);
5412 pCfg->timer.data = (unsigned long) ioc;
5413 pCfg->timer.function = mpt_timer_expired;
5414 pCfg->wait_done = 0;
5415
5416 /* Set the timer; ensure 10 second minimum */
5417 if (pCfg->timeout < 10)
5418 pCfg->timer.expires = jiffies + HZ*10;
5419 else
5420 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5421
5422 /* Add to end of Q, set timer and then issue this command */
5423 spin_lock_irqsave(&ioc->FreeQlock, flags);
5424 list_add_tail(&pCfg->linkage, &ioc->configQ);
5425 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5426
5427 add_timer(&pCfg->timer);
5428 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5429 wait_event(mpt_waitq, pCfg->wait_done);
5430
5431 /* mf has been freed - do not access */
5432
5433 rc = pCfg->status;
5434
5435 return rc;
5436}
5437
5438/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5439/* 5329/*
5440 * mpt_timer_expired - Call back for timer process. 5330 * mpt_timer_expired - Call back for timer process.
5441 * Used only internal config functionality. 5331 * Used only internal config functionality.
@@ -6142,7 +6032,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6142 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6032 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
6143 int idx; 6033 int idx;
6144 6034
6145 idx = ioc->eventContext % ioc->eventLogSize; 6035 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
6146 6036
6147 ioc->events[idx].event = event; 6037 ioc->events[idx].event = event;
6148 ioc->events[idx].eventContext = ioc->eventContext; 6038 ioc->events[idx].eventContext = ioc->eventContext;
@@ -6540,7 +6430,6 @@ EXPORT_SYMBOL(mpt_lan_index);
6540EXPORT_SYMBOL(mpt_stm_index); 6430EXPORT_SYMBOL(mpt_stm_index);
6541EXPORT_SYMBOL(mpt_HardResetHandler); 6431EXPORT_SYMBOL(mpt_HardResetHandler);
6542EXPORT_SYMBOL(mpt_config); 6432EXPORT_SYMBOL(mpt_config);
6543EXPORT_SYMBOL(mpt_toolbox);
6544EXPORT_SYMBOL(mpt_findImVolumes); 6433EXPORT_SYMBOL(mpt_findImVolumes);
6545EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6434EXPORT_SYMBOL(mpt_read_ioc_pg_3);
6546EXPORT_SYMBOL(mpt_alloc_fw_memory); 6435EXPORT_SYMBOL(mpt_alloc_fw_memory);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index ea2649ecad1f..723d54300953 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -616,6 +616,7 @@ typedef struct _MPT_ADAPTER
616 * increments by 32 bytes 616 * increments by 32 bytes
617 */ 617 */
618 int errata_flag_1064; 618 int errata_flag_1064;
619 int aen_event_read_flag; /* flag to indicate event log was read*/
619 u8 FirstWhoInit; 620 u8 FirstWhoInit;
620 u8 upload_fw; /* If set, do a fw upload */ 621 u8 upload_fw; /* If set, do a fw upload */
621 u8 reload_fw; /* Force a FW Reload on next reset */ 622 u8 reload_fw; /* Force a FW Reload on next reset */
@@ -1026,7 +1027,6 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
1026extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1027extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
1027extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1028extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
1028extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1029extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1029extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index bdf709987982..9b64e07400da 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -136,6 +136,12 @@ static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
136 */ 136 */
137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
138 138
139/*
140 * Event Handler function
141 */
142static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
143struct fasync_struct *async_queue=NULL;
144
139/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
140/* 146/*
141 * Scatter gather list (SGL) sizes and limits... 147 * Scatter gather list (SGL) sizes and limits...
@@ -385,18 +391,18 @@ static int mptctl_bus_reset(MPT_IOCTL *ioctl)
385 } 391 }
386 392
387 /* Now wait for the command to complete */ 393 /* Now wait for the command to complete */
388 ii = wait_event_interruptible_timeout(mptctl_wait, 394 ii = wait_event_timeout(mptctl_wait,
389 ioctl->wait_done == 1, 395 ioctl->wait_done == 1,
390 HZ*5 /* 5 second timeout */); 396 HZ*5 /* 5 second timeout */);
391 397
392 if(ii <=0 && (ioctl->wait_done != 1 )) { 398 if(ii <=0 && (ioctl->wait_done != 1 )) {
399 mpt_free_msg_frame(hd->ioc, mf);
393 ioctl->wait_done = 0; 400 ioctl->wait_done = 0;
394 retval = -1; /* return failure */ 401 retval = -1; /* return failure */
395 } 402 }
396 403
397mptctl_bus_reset_done: 404mptctl_bus_reset_done:
398 405
399 mpt_free_msg_frame(hd->ioc, mf);
400 mptctl_free_tm_flags(ioctl->ioc); 406 mptctl_free_tm_flags(ioctl->ioc);
401 return retval; 407 return retval;
402} 408}
@@ -472,6 +478,69 @@ mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
472} 478}
473 479
474/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 480/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
481/* ASYNC Event Notification Support */
482static int
483mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
484{
485 u8 event;
486
487 event = le32_to_cpu(pEvReply->Event) & 0xFF;
488
489 dctlprintk(("%s() called\n", __FUNCTION__));
490 if(async_queue == NULL)
491 return 1;
492
493 /* Raise SIGIO for persistent events.
494 * TODO - this define is not in MPI spec yet,
495 * but they plan to set it to 0x21
496 */
497 if (event == 0x21 ) {
498 ioc->aen_event_read_flag=1;
499 dctlprintk(("Raised SIGIO to application\n"));
500 devtprintk(("Raised SIGIO to application\n"));
501 kill_fasync(&async_queue, SIGIO, POLL_IN);
502 return 1;
503 }
504
505 /* This flag is set after SIGIO was raised, and
506 * remains set until the application has read
507 * the event log via ioctl=MPTEVENTREPORT
508 */
509 if(ioc->aen_event_read_flag)
510 return 1;
511
512 /* Signal only for the events that are
513 * requested for by the application
514 */
515 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
516 ioc->aen_event_read_flag=1;
517 dctlprintk(("Raised SIGIO to application\n"));
518 devtprintk(("Raised SIGIO to application\n"));
519 kill_fasync(&async_queue, SIGIO, POLL_IN);
520 }
521 return 1;
522}
523
524static int
525mptctl_fasync(int fd, struct file *filep, int mode)
526{
527 MPT_ADAPTER *ioc;
528
529 list_for_each_entry(ioc, &ioc_list, list)
530 ioc->aen_event_read_flag=0;
531
532 dctlprintk(("%s() called\n", __FUNCTION__));
533 return fasync_helper(fd, filep, mode, &async_queue);
534}
535
536static int
537mptctl_release(struct inode *inode, struct file *filep)
538{
539 dctlprintk(("%s() called\n", __FUNCTION__));
540 return fasync_helper(-1, filep, 0, &async_queue);
541}
542
543/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
475/* 544/*
476 * MPT ioctl handler 545 * MPT ioctl handler
477 * cmd - specify the particular IOCTL command to be issued 546 * cmd - specify the particular IOCTL command to be issued
@@ -674,22 +743,23 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
674 u16 iocstat; 743 u16 iocstat;
675 pFWDownloadReply_t ReplyMsg = NULL; 744 pFWDownloadReply_t ReplyMsg = NULL;
676 745
677 dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 746 dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id));
678 747
679 dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); 748 dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf));
680 dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 749 dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen));
681 dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); 750 dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc));
682 751
683 if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { 752 if (mpt_verify_adapter(ioc, &iocp) < 0) {
684 dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", 753 dctlprintk(("ioctl_fwdl - ioc%d not found!\n",
685 __FILE__, __LINE__, ioc)); 754 ioc));
686 return -ENODEV; /* (-6) No such device or address */ 755 return -ENODEV; /* (-6) No such device or address */
687 } 756 } else {
688 757
689 /* Valid device. Get a message frame and construct the FW download message. 758 /* Valid device. Get a message frame and construct the FW download message.
690 */ 759 */
691 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 760 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
692 return -EAGAIN; 761 return -EAGAIN;
762 }
693 dlmsg = (FWDownload_t*) mf; 763 dlmsg = (FWDownload_t*) mf;
694 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 764 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
695 sgOut = (char *) (ptsge + 1); 765 sgOut = (char *) (ptsge + 1);
@@ -702,7 +772,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
702 dlmsg->ChainOffset = 0; 772 dlmsg->ChainOffset = 0;
703 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 773 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
704 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 774 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
705 dlmsg->MsgFlags = 0; 775 if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
776 dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
777 else
778 dlmsg->MsgFlags = 0;
779
706 780
707 /* Set up the Transaction SGE. 781 /* Set up the Transaction SGE.
708 */ 782 */
@@ -754,7 +828,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
754 goto fwdl_out; 828 goto fwdl_out;
755 } 829 }
756 830
757 dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 831 dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags));
758 832
759 /* 833 /*
760 * Parse SG list, copying sgl itself, 834 * Parse SG list, copying sgl itself,
@@ -803,11 +877,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
803 /* 877 /*
804 * Finally, perform firmware download. 878 * Finally, perform firmware download.
805 */ 879 */
806 iocp->ioctl->wait_done = 0; 880 ReplyMsg = NULL;
807 mpt_put_msg_frame(mptctl_id, iocp, mf); 881 mpt_put_msg_frame(mptctl_id, iocp, mf);
808 882
809 /* Now wait for the command to complete */ 883 /* Now wait for the command to complete */
810 ret = wait_event_interruptible_timeout(mptctl_wait, 884 ret = wait_event_timeout(mptctl_wait,
811 iocp->ioctl->wait_done == 1, 885 iocp->ioctl->wait_done == 1,
812 HZ*60); 886 HZ*60);
813 887
@@ -1145,7 +1219,9 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1145 /* Fill in the data and return the structure to the calling 1219 /* Fill in the data and return the structure to the calling
1146 * program 1220 * program
1147 */ 1221 */
1148 if (ioc->bus_type == FC) 1222 if (ioc->bus_type == SAS)
1223 karg->adapterType = MPT_IOCTL_INTERFACE_SAS;
1224 else if (ioc->bus_type == FC)
1149 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1225 karg->adapterType = MPT_IOCTL_INTERFACE_FC;
1150 else 1226 else
1151 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; 1227 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
@@ -1170,12 +1246,11 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1170 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1246 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1171 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1247 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1172 } else if (cim_rev == 2) { 1248 } else if (cim_rev == 2) {
1173 /* Get the PCI bus, device, function and segment ID numbers 1249 /* Get the PCI bus, device, function and segment ID numbers
1174 for the IOC */ 1250 for the IOC */
1175 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1251 karg->pciInfo.u.bits.busNumber = pdev->bus->number;
1176 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1252 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1177 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1253 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1178 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1179 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1254 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
1180 } 1255 }
1181 1256
@@ -1500,7 +1575,7 @@ mptctl_eventquery (unsigned long arg)
1500 return -ENODEV; 1575 return -ENODEV;
1501 } 1576 }
1502 1577
1503 karg.eventEntries = ioc->eventLogSize; 1578 karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
1504 karg.eventTypes = ioc->eventTypes; 1579 karg.eventTypes = ioc->eventTypes;
1505 1580
1506 /* Copy the data from kernel memory to user memory 1581 /* Copy the data from kernel memory to user memory
@@ -1550,7 +1625,6 @@ mptctl_eventenable (unsigned long arg)
1550 memset(ioc->events, 0, sz); 1625 memset(ioc->events, 0, sz);
1551 ioc->alloc_total += sz; 1626 ioc->alloc_total += sz;
1552 1627
1553 ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE;
1554 ioc->eventContext = 0; 1628 ioc->eventContext = 0;
1555 } 1629 }
1556 1630
@@ -1590,7 +1664,7 @@ mptctl_eventreport (unsigned long arg)
1590 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1664 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
1591 1665
1592 1666
1593 max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; 1667 max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
1594 1668
1595 /* If fewer than 1 event is requested, there must have 1669 /* If fewer than 1 event is requested, there must have
1596 * been some type of error. 1670 * been some type of error.
@@ -1598,6 +1672,9 @@ mptctl_eventreport (unsigned long arg)
1598 if ((max < 1) || !ioc->events) 1672 if ((max < 1) || !ioc->events)
1599 return -ENODATA; 1673 return -ENODATA;
1600 1674
1675 /* reset this flag so SIGIO can restart */
1676 ioc->aen_event_read_flag=0;
1677
1601 /* Copy the data from kernel memory to user memory 1678 /* Copy the data from kernel memory to user memory
1602 */ 1679 */
1603 numBytes = max * sizeof(MPT_IOCTL_EVENTS); 1680 numBytes = max * sizeof(MPT_IOCTL_EVENTS);
@@ -1817,6 +1894,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1817 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1894 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1818 case MPI_FUNCTION_FW_DOWNLOAD: 1895 case MPI_FUNCTION_FW_DOWNLOAD:
1819 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1896 case MPI_FUNCTION_FC_PRIMITIVE_SEND:
1897 case MPI_FUNCTION_TOOLBOX:
1898 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
1820 break; 1899 break;
1821 1900
1822 case MPI_FUNCTION_SCSI_IO_REQUEST: 1901 case MPI_FUNCTION_SCSI_IO_REQUEST:
@@ -1837,7 +1916,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1837 goto done_free_mem; 1916 goto done_free_mem;
1838 } 1917 }
1839 1918
1840 pScsiReq->MsgFlags = mpt_msg_flags(); 1919 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1920 pScsiReq->MsgFlags |= mpt_msg_flags();
1921
1841 1922
1842 /* verify that app has not requested 1923 /* verify that app has not requested
1843 * more sense data than driver 1924 * more sense data than driver
@@ -1888,6 +1969,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1888 } 1969 }
1889 break; 1970 break;
1890 1971
1972 case MPI_FUNCTION_SMP_PASSTHROUGH:
1973 /* Check mf->PassthruFlags to determine if
1974 * transfer is ImmediateMode or not.
1975 * Immediate mode returns data in the ReplyFrame.
1976 * Else, we are sending request and response data
1977 * in two SGLs at the end of the mf.
1978 */
1979 break;
1980
1981 case MPI_FUNCTION_SATA_PASSTHROUGH:
1982 if (!ioc->sh) {
1983 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1984 "SCSI driver is not loaded. \n",
1985 __FILE__, __LINE__);
1986 rc = -EFAULT;
1987 goto done_free_mem;
1988 }
1989 break;
1990
1891 case MPI_FUNCTION_RAID_ACTION: 1991 case MPI_FUNCTION_RAID_ACTION:
1892 /* Just add a SGE 1992 /* Just add a SGE
1893 */ 1993 */
@@ -1900,7 +2000,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1900 int scsidir = MPI_SCSIIO_CONTROL_READ; 2000 int scsidir = MPI_SCSIIO_CONTROL_READ;
1901 int dataSize; 2001 int dataSize;
1902 2002
1903 pScsiReq->MsgFlags = mpt_msg_flags(); 2003 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
2004 pScsiReq->MsgFlags |= mpt_msg_flags();
2005
1904 2006
1905 /* verify that app has not requested 2007 /* verify that app has not requested
1906 * more sense data than driver 2008 * more sense data than driver
@@ -2130,7 +2232,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2130 2232
2131 /* Now wait for the command to complete */ 2233 /* Now wait for the command to complete */
2132 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2234 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2133 timeout = wait_event_interruptible_timeout(mptctl_wait, 2235 timeout = wait_event_timeout(mptctl_wait,
2134 ioc->ioctl->wait_done == 1, 2236 ioc->ioctl->wait_done == 1,
2135 HZ*timeout); 2237 HZ*timeout);
2136 2238
@@ -2246,13 +2348,16 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2246 hp_host_info_t __user *uarg = (void __user *) arg; 2348 hp_host_info_t __user *uarg = (void __user *) arg;
2247 MPT_ADAPTER *ioc; 2349 MPT_ADAPTER *ioc;
2248 struct pci_dev *pdev; 2350 struct pci_dev *pdev;
2249 char *pbuf; 2351 char *pbuf=NULL;
2250 dma_addr_t buf_dma; 2352 dma_addr_t buf_dma;
2251 hp_host_info_t karg; 2353 hp_host_info_t karg;
2252 CONFIGPARMS cfg; 2354 CONFIGPARMS cfg;
2253 ConfigPageHeader_t hdr; 2355 ConfigPageHeader_t hdr;
2254 int iocnum; 2356 int iocnum;
2255 int rc, cim_rev; 2357 int rc, cim_rev;
2358 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2359 MPT_FRAME_HDR *mf = NULL;
2360 MPIHeader_t *mpi_hdr;
2256 2361
2257 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2362 dctlprintk((": mptctl_hp_hostinfo called.\n"));
2258 /* Reset long to int. Should affect IA64 and SPARC only 2363 /* Reset long to int. Should affect IA64 and SPARC only
@@ -2370,7 +2475,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2370 2475
2371 karg.base_io_addr = pci_resource_start(pdev, 0); 2476 karg.base_io_addr = pci_resource_start(pdev, 0);
2372 2477
2373 if (ioc->bus_type == FC) 2478 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2374 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2479 karg.bus_phys_width = HP_BUS_WIDTH_UNK;
2375 else 2480 else
2376 karg.bus_phys_width = HP_BUS_WIDTH_16; 2481 karg.bus_phys_width = HP_BUS_WIDTH_16;
@@ -2388,20 +2493,67 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2388 } 2493 }
2389 } 2494 }
2390 2495
2391 cfg.pageAddr = 0; 2496 /*
2392 cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2497 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2393 cfg.dir = MPI_TB_ISTWI_FLAGS_READ; 2498 */
2394 cfg.timeout = 10; 2499 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2500 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
2501 ioc->name,__FUNCTION__));
2502 goto out;
2503 }
2504
2505 IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
2506 mpi_hdr = (MPIHeader_t *) mf;
2507 memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
2508 IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
2509 IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
2510 IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
2511 IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
2512 IstwiRWRequest->NumAddressBytes = 0x01;
2513 IstwiRWRequest->DataLength = cpu_to_le16(0x04);
2514 if (pdev->devfn & 1)
2515 IstwiRWRequest->DeviceAddr = 0xB2;
2516 else
2517 IstwiRWRequest->DeviceAddr = 0xB0;
2518
2395 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2519 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2396 if (pbuf) { 2520 if (!pbuf)
2397 cfg.physAddr = buf_dma; 2521 goto out;
2398 if ((mpt_toolbox(ioc, &cfg)) == 0) { 2522 mpt_add_sge((char *)&IstwiRWRequest->SGL,
2399 karg.rsvd = *(u32 *)pbuf; 2523 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2400 } 2524
2401 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2525 ioc->ioctl->wait_done = 0;
2402 pbuf = NULL; 2526 mpt_put_msg_frame(mptctl_id, ioc, mf);
2527
2528 rc = wait_event_timeout(mptctl_wait,
2529 ioc->ioctl->wait_done == 1,
2530 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */);
2531
2532 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) {
2533 /*
2534 * Now we need to reset the board
2535 */
2536 mpt_free_msg_frame(ioc, mf);
2537 mptctl_timeout_expired(ioc->ioctl);
2538 goto out;
2403 } 2539 }
2404 2540
2541 /*
2542 *ISTWI Data Definition
2543 * pbuf[0] = FW_VERSION = 0x4
2544 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on
2545 * the config, you should be seeing one out of these three values
2546 * pbuf[2] = Drive Installed Map = bit pattern depend on which
2547 * bays have drives in them
2548 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2549 */
2550 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID)
2551 karg.rsvd = *(u32 *)pbuf;
2552
2553 out:
2554 if (pbuf)
2555 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2556
2405 /* Copy the data from kernel memory to user memory 2557 /* Copy the data from kernel memory to user memory
2406 */ 2558 */
2407 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { 2559 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
@@ -2459,7 +2611,7 @@ mptctl_hp_targetinfo(unsigned long arg)
2459 2611
2460 /* There is nothing to do for FCP parts. 2612 /* There is nothing to do for FCP parts.
2461 */ 2613 */
2462 if (ioc->bus_type == FC) 2614 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2463 return 0; 2615 return 0;
2464 2616
2465 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) 2617 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
@@ -2569,6 +2721,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2569static struct file_operations mptctl_fops = { 2721static struct file_operations mptctl_fops = {
2570 .owner = THIS_MODULE, 2722 .owner = THIS_MODULE,
2571 .llseek = no_llseek, 2723 .llseek = no_llseek,
2724 .release = mptctl_release,
2725 .fasync = mptctl_fasync,
2572 .unlocked_ioctl = mptctl_ioctl, 2726 .unlocked_ioctl = mptctl_ioctl,
2573#ifdef CONFIG_COMPAT 2727#ifdef CONFIG_COMPAT
2574 .compat_ioctl = compat_mpctl_ioctl, 2728 .compat_ioctl = compat_mpctl_ioctl,
@@ -2813,6 +2967,11 @@ static int __init mptctl_init(void)
2813 /* FIXME! */ 2967 /* FIXME! */
2814 } 2968 }
2815 2969
2970 if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) {
2971 devtprintk((KERN_INFO MYNAM
2972 ": Registered for IOC event notifications\n"));
2973 }
2974
2816 return 0; 2975 return 0;
2817 2976
2818out_fail: 2977out_fail:
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index 518996e03481..a2f8a97992e6 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -169,8 +169,10 @@ struct mpt_ioctl_pci_info2 {
169 * Read only. 169 * Read only.
170 * Data starts at offset 0xC 170 * Data starts at offset 0xC
171 */ 171 */
172#define MPT_IOCTL_INTERFACE_FC (0x01)
173#define MPT_IOCTL_INTERFACE_SCSI (0x00) 172#define MPT_IOCTL_INTERFACE_SCSI (0x00)
173#define MPT_IOCTL_INTERFACE_FC (0x01)
174#define MPT_IOCTL_INTERFACE_FC_IP (0x02)
175#define MPT_IOCTL_INTERFACE_SAS (0x03)
174#define MPT_IOCTL_VERSION_LENGTH (32) 176#define MPT_IOCTL_VERSION_LENGTH (32)
175 177
176struct mpt_ioctl_iocinfo { 178struct mpt_ioctl_iocinfo {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 05789e505464..4fee6befc93d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2489,7 +2489,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2489 int idx; 2489 int idx;
2490 MPT_ADAPTER *ioc = hd->ioc; 2490 MPT_ADAPTER *ioc = hd->ioc;
2491 2491
2492 idx = ioc->eventContext % ioc->eventLogSize; 2492 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
2494 ioc->events[idx].eventContext = ioc->eventContext; 2494 ioc->events[idx].eventContext = ioc->eventContext;
2495 2495
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 4d7d47cf2394..a5f2ba9a8fdb 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -710,10 +710,9 @@ static inline void
710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
711 struct zfcp_adapter *adapter, 711 struct zfcp_adapter *adapter,
712 struct scsi_cmnd *scsi_cmnd, 712 struct scsi_cmnd *scsi_cmnd,
713 struct zfcp_fsf_req *new_fsf_req) 713 struct zfcp_fsf_req *fsf_req,
714 struct zfcp_fsf_req *old_fsf_req)
714{ 715{
715 struct zfcp_fsf_req *fsf_req =
716 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
717 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 716 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
718 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 717 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
719 unsigned long flags; 718 unsigned long flags;
@@ -727,19 +726,20 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
727 if (offset == 0) { 726 if (offset == 0) {
728 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 727 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
729 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 728 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
730 if (scsi_cmnd->device) { 729 if (scsi_cmnd != NULL) {
731 rec->scsi_id = scsi_cmnd->device->id; 730 if (scsi_cmnd->device) {
732 rec->scsi_lun = scsi_cmnd->device->lun; 731 rec->scsi_id = scsi_cmnd->device->id;
732 rec->scsi_lun = scsi_cmnd->device->lun;
733 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd,
738 min((int)scsi_cmnd->cmd_len,
739 ZFCP_DBF_SCSI_OPCODE));
740 rec->scsi_retries = scsi_cmnd->retries;
741 rec->scsi_allowed = scsi_cmnd->allowed;
733 } 742 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode,
738 &scsi_cmnd->cmnd,
739 min((int)scsi_cmnd->cmd_len,
740 ZFCP_DBF_SCSI_OPCODE));
741 rec->scsi_retries = scsi_cmnd->retries;
742 rec->scsi_allowed = scsi_cmnd->allowed;
743 if (fsf_req != NULL) { 743 if (fsf_req != NULL) {
744 fcp_rsp = (struct fcp_rsp_iu *) 744 fcp_rsp = (struct fcp_rsp_iu *)
745 &(fsf_req->qtcb->bottom.io.fcp_rsp); 745 &(fsf_req->qtcb->bottom.io.fcp_rsp);
@@ -772,15 +772,8 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
772 rec->fsf_seqno = fsf_req->seq_no; 772 rec->fsf_seqno = fsf_req->seq_no;
773 rec->fsf_issued = fsf_req->issued; 773 rec->fsf_issued = fsf_req->issued;
774 } 774 }
775 if (new_fsf_req != NULL) { 775 rec->type.old_fsf_reqid =
776 rec->type.new_fsf_req.fsf_reqid = 776 (unsigned long) old_fsf_req;
777 (unsigned long)
778 new_fsf_req;
779 rec->type.new_fsf_req.fsf_seqno =
780 new_fsf_req->seq_no;
781 rec->type.new_fsf_req.fsf_issued =
782 new_fsf_req->issued;
783 }
784 } else { 777 } else {
785 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 778 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
786 dump->total_size = buflen; 779 dump->total_size = buflen;
@@ -801,19 +794,21 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
801inline void 794inline void
802zfcp_scsi_dbf_event_result(const char *tag, int level, 795zfcp_scsi_dbf_event_result(const char *tag, int level,
803 struct zfcp_adapter *adapter, 796 struct zfcp_adapter *adapter,
804 struct scsi_cmnd *scsi_cmnd) 797 struct scsi_cmnd *scsi_cmnd,
798 struct zfcp_fsf_req *fsf_req)
805{ 799{
806 _zfcp_scsi_dbf_event_common("rslt", 800 _zfcp_scsi_dbf_event_common("rslt", tag, level,
807 tag, level, adapter, scsi_cmnd, NULL); 801 adapter, scsi_cmnd, fsf_req, NULL);
808} 802}
809 803
810inline void 804inline void
811zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 805zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
812 struct scsi_cmnd *scsi_cmnd, 806 struct scsi_cmnd *scsi_cmnd,
813 struct zfcp_fsf_req *new_fsf_req) 807 struct zfcp_fsf_req *new_fsf_req,
808 struct zfcp_fsf_req *old_fsf_req)
814{ 809{
815 _zfcp_scsi_dbf_event_common("abrt", 810 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
816 tag, 1, adapter, scsi_cmnd, new_fsf_req); 811 adapter, scsi_cmnd, new_fsf_req, old_fsf_req);
817} 812}
818 813
819inline void 814inline void
@@ -823,7 +818,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct zfcp_adapter *adapter = unit->port->adapter; 818 struct zfcp_adapter *adapter = unit->port->adapter;
824 819
825 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 820 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
826 tag, 1, adapter, scsi_cmnd, NULL); 821 tag, 1, adapter, scsi_cmnd, NULL, NULL);
827} 822}
828 823
829static int 824static int
@@ -856,6 +851,10 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
856 rec->scsi_retries); 851 rec->scsi_retries);
857 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 852 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
858 rec->scsi_allowed); 853 rec->scsi_allowed);
854 if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
855 len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx",
856 rec->type.old_fsf_reqid);
857 }
859 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 858 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
860 rec->fsf_reqid); 859 rec->fsf_reqid);
861 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", 860 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
@@ -883,21 +882,6 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
883 min((int)rec->type.fcp.sns_info_len, 882 min((int)rec->type.fcp.sns_info_len,
884 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 883 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
885 rec->type.fcp.sns_info_len); 884 rec->type.fcp.sns_info_len);
886 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
887 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
888 rec->type.new_fsf_req.fsf_reqid);
889 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
890 rec->type.new_fsf_req.fsf_seqno);
891 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
892 rec->type.new_fsf_req.fsf_issued);
893 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
894 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
895 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
896 rec->type.new_fsf_req.fsf_reqid);
897 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
898 rec->type.new_fsf_req.fsf_seqno);
899 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
900 rec->type.new_fsf_req.fsf_issued);
901 } 885 }
902 886
903 len += sprintf(out_buf + len, "\n"); 887 len += sprintf(out_buf + len, "\n");
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e260d19fa717..7f551d66f47f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -152,11 +152,6 @@ typedef u32 scsi_lun_t;
152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
154 154
155/* Retry 5 times every 2 second, then every minute */
156#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
157#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
158#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
159
160/* timeout value for "default timer" for fsf requests */ 155/* timeout value for "default timer" for fsf requests */
161#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 156#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
162 157
@@ -429,11 +424,7 @@ struct zfcp_scsi_dbf_record {
429 u32 fsf_seqno; 424 u32 fsf_seqno;
430 u64 fsf_issued; 425 u64 fsf_issued;
431 union { 426 union {
432 struct { 427 u64 old_fsf_reqid;
433 u64 fsf_reqid;
434 u32 fsf_seqno;
435 u64 fsf_issued;
436 } new_fsf_req;
437 struct { 428 struct {
438 u8 rsp_validity; 429 u8 rsp_validity;
439 u8 rsp_scsi_status; 430 u8 rsp_scsi_status;
@@ -915,8 +906,6 @@ struct zfcp_adapter {
915 wwn_t peer_wwnn; /* P2P peer WWNN */ 906 wwn_t peer_wwnn; /* P2P peer WWNN */
916 wwn_t peer_wwpn; /* P2P peer WWPN */ 907 wwn_t peer_wwpn; /* P2P peer WWPN */
917 u32 peer_d_id; /* P2P peer D_ID */ 908 u32 peer_d_id; /* P2P peer D_ID */
918 wwn_t physical_wwpn; /* WWPN of physical port */
919 u32 physical_s_id; /* local FC port ID */
920 struct ccw_device *ccw_device; /* S/390 ccw device */ 909 struct ccw_device *ccw_device; /* S/390 ccw device */
921 u8 fc_service_class; 910 u8 fc_service_class;
922 u32 hydra_version; /* Hydra version */ 911 u32 hydra_version; /* Hydra version */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index da947e662031..e3c4bdd29a60 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -2246,15 +2246,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2246{ 2246{
2247 int retval; 2247 int retval;
2248 2248
2249 if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2250 &erp_action->adapter->status)) &&
2251 (erp_action->adapter->adapter_features &
2252 FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2253 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2254 atomic_set(&erp_action->adapter->erp_counter, 0);
2255 return ZFCP_ERP_FAILED;
2256 }
2257
2258 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2249 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2259 if (retval == ZFCP_ERP_FAILED) 2250 if (retval == ZFCP_ERP_FAILED)
2260 return ZFCP_ERP_FAILED; 2251 return ZFCP_ERP_FAILED;
@@ -2266,13 +2257,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2266 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2257 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2267} 2258}
2268 2259
2269/*
2270 * function:
2271 *
2272 * purpose:
2273 *
2274 * returns:
2275 */
2276static int 2260static int
2277zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2261zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2278{ 2262{
@@ -2350,48 +2334,40 @@ static int
2350zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2334zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2351{ 2335{
2352 int ret; 2336 int ret;
2353 int retries; 2337 struct zfcp_adapter *adapter;
2354 int sleep;
2355 struct zfcp_adapter *adapter = erp_action->adapter;
2356 2338
2339 adapter = erp_action->adapter;
2357 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2340 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2358 2341
2359 retries = 0; 2342 write_lock(&adapter->erp_lock);
2360 do { 2343 zfcp_erp_action_to_running(erp_action);
2361 write_lock(&adapter->erp_lock); 2344 write_unlock(&adapter->erp_lock);
2362 zfcp_erp_action_to_running(erp_action);
2363 write_unlock(&adapter->erp_lock);
2364 zfcp_erp_timeout_init(erp_action);
2365 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2366 if (ret == -EOPNOTSUPP) {
2367 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2368 return ZFCP_ERP_SUCCEEDED;
2369 } else if (ret) {
2370 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2371 return ZFCP_ERP_FAILED;
2372 }
2373 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2374 2345
2375 down(&adapter->erp_ready_sem); 2346 zfcp_erp_timeout_init(erp_action);
2376 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2347 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2377 ZFCP_LOG_INFO("error: exchange of port data " 2348 if (ret == -EOPNOTSUPP) {
2378 "for adapter %s timed out\n", 2349 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2379 zfcp_get_busid_by_adapter(adapter)); 2350 return ZFCP_ERP_SUCCEEDED;
2380 break; 2351 } else if (ret) {
2381 } 2352 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2382 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2353 return ZFCP_ERP_FAILED;
2383 &adapter->status)) 2354 }
2384 break; 2355 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2385 2356
2386 if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2357 ret = ZFCP_ERP_SUCCEEDED;
2387 sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2358 down(&adapter->erp_ready_sem);
2388 retries++; 2359 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2389 } else 2360 ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
2390 sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2361 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2391 schedule_timeout(sleep); 2362 ret = ZFCP_ERP_FAILED;
2392 } while (1); 2363 }
2364 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) {
2365 ZFCP_LOG_INFO("error: exchange port data failed (adapter "
2366 "%s\n", zfcp_get_busid_by_adapter(adapter));
2367 ret = ZFCP_ERP_FAILED;
2368 }
2393 2369
2394 return ZFCP_ERP_SUCCEEDED; 2370 return ret;
2395} 2371}
2396 2372
2397/* 2373/*
@@ -3439,6 +3415,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3439 "(adapter %s, wwpn=0x%016Lx)\n", 3415 "(adapter %s, wwpn=0x%016Lx)\n",
3440 zfcp_get_busid_by_port(port), 3416 zfcp_get_busid_by_port(port),
3441 port->wwpn); 3417 port->wwpn);
3418 else
3419 scsi_flush_work(adapter->scsi_host);
3442 } 3420 }
3443 zfcp_port_put(port); 3421 zfcp_port_put(port);
3444 break; 3422 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c1ba7cf1b496..700f5402a978 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -194,9 +194,10 @@ extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
195 195
196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
197 struct scsi_cmnd *); 197 struct scsi_cmnd *,
198 struct zfcp_fsf_req *);
198extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 199extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
199 struct scsi_cmnd *, 200 struct scsi_cmnd *, struct zfcp_fsf_req *,
200 struct zfcp_fsf_req *); 201 struct zfcp_fsf_req *);
201extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 202extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
202 struct scsi_cmnd *); 203 struct scsi_cmnd *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9f0cb3d820c0..662ec571d73b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -388,6 +388,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
388 case FSF_PROT_LINK_DOWN: 388 case FSF_PROT_LINK_DOWN:
389 zfcp_fsf_link_down_info_eval(adapter, 389 zfcp_fsf_link_down_info_eval(adapter,
390 &prot_status_qual->link_down_info); 390 &prot_status_qual->link_down_info);
391 zfcp_erp_adapter_reopen(adapter, 0);
391 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 392 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
392 break; 393 break;
393 394
@@ -558,10 +559,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
558 559
559 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 560 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
560 561
561 if (link_down == NULL) { 562 if (link_down == NULL)
562 zfcp_erp_adapter_reopen(adapter, 0); 563 goto out;
563 return;
564 }
565 564
566 switch (link_down->error_code) { 565 switch (link_down->error_code) {
567 case FSF_PSQ_LINK_NO_LIGHT: 566 case FSF_PSQ_LINK_NO_LIGHT:
@@ -643,16 +642,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
643 link_down->explanation_code, 642 link_down->explanation_code,
644 link_down->vendor_specific_code); 643 link_down->vendor_specific_code);
645 644
646 switch (link_down->error_code) { 645 out:
647 case FSF_PSQ_LINK_NO_LIGHT: 646 zfcp_erp_adapter_failed(adapter);
648 case FSF_PSQ_LINK_WRAP_PLUG:
649 case FSF_PSQ_LINK_NO_FCP:
650 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
651 zfcp_erp_adapter_reopen(adapter, 0);
652 break;
653 default:
654 zfcp_erp_adapter_failed(adapter);
655 }
656} 647}
657 648
658/* 649/*
@@ -2304,6 +2295,35 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2304 return retval; 2295 return retval;
2305} 2296}
2306 2297
2298/**
2299 * zfcp_fsf_exchange_port_evaluate
2300 * @fsf_req: fsf_req which belongs to xchg port data request
2301 * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
2302 */
2303static void
2304zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2305{
2306 struct zfcp_adapter *adapter;
2307 struct fsf_qtcb *qtcb;
2308 struct fsf_qtcb_bottom_port *bottom, *data;
2309 struct Scsi_Host *shost;
2310
2311 adapter = fsf_req->adapter;
2312 qtcb = fsf_req->qtcb;
2313 bottom = &qtcb->bottom.port;
2314 shost = adapter->scsi_host;
2315
2316 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2317 if (data)
2318 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2319
2320 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2321 fc_host_permanent_port_name(shost) = bottom->wwpn;
2322 else
2323 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2324 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2325 fc_host_supported_speeds(shost) = bottom->supported_speed;
2326}
2307 2327
2308/** 2328/**
2309 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request 2329 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
@@ -2312,38 +2332,26 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2312static void 2332static void
2313zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2333zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2314{ 2334{
2315 struct zfcp_adapter *adapter = fsf_req->adapter; 2335 struct zfcp_adapter *adapter;
2316 struct Scsi_Host *shost = adapter->scsi_host; 2336 struct fsf_qtcb *qtcb;
2317 struct fsf_qtcb *qtcb = fsf_req->qtcb; 2337
2318 struct fsf_qtcb_bottom_port *bottom, *data; 2338 adapter = fsf_req->adapter;
2339 qtcb = fsf_req->qtcb;
2319 2340
2320 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2341 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2321 return; 2342 return;
2322 2343
2323 switch (qtcb->header.fsf_status) { 2344 switch (qtcb->header.fsf_status) {
2324 case FSF_GOOD: 2345 case FSF_GOOD:
2346 zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
2325 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2347 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2326
2327 bottom = &qtcb->bottom.port;
2328 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2329 if (data)
2330 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2331 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2332 fc_host_permanent_port_name(shost) = bottom->wwpn;
2333 else
2334 fc_host_permanent_port_name(shost) =
2335 fc_host_port_name(shost);
2336 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2337 fc_host_supported_speeds(shost) = bottom->supported_speed;
2338 break; 2348 break;
2339
2340 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2349 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2350 zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
2341 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2351 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2342
2343 zfcp_fsf_link_down_info_eval(adapter, 2352 zfcp_fsf_link_down_info_eval(adapter,
2344 &qtcb->header.fsf_status_qual.link_down_info); 2353 &qtcb->header.fsf_status_qual.link_down_info);
2345 break; 2354 break;
2346
2347 default: 2355 default:
2348 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2356 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2349 debug_event(adapter->erp_dbf, 0, 2357 debug_event(adapter->erp_dbf, 0,
@@ -4203,11 +4211,11 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4203 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4211 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4204 4212
4205 if (scpnt->result != 0) 4213 if (scpnt->result != 0)
4206 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); 4214 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req);
4207 else if (scpnt->retries > 0) 4215 else if (scpnt->retries > 0)
4208 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); 4216 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req);
4209 else 4217 else
4210 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); 4218 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req);
4211 4219
4212 /* cleanup pointer (need this especially for abort) */ 4220 /* cleanup pointer (need this especially for abort) */
4213 scpnt->host_scribble = NULL; 4221 scpnt->host_scribble = NULL;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e0803757c0fa..9f6b4d7a46f3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -242,7 +242,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
243 zfcp_scsi_dbf_event_result("fail", 4, 243 zfcp_scsi_dbf_event_result("fail", 4,
244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
245 scpnt); 245 scpnt, NULL);
246 /* return directly */ 246 /* return directly */
247 scpnt->scsi_done(scpnt); 247 scpnt->scsi_done(scpnt);
248} 248}
@@ -446,7 +446,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
447 if (!old_fsf_req) { 447 if (!old_fsf_req) {
448 write_unlock_irqrestore(&adapter->abort_lock, flags); 448 write_unlock_irqrestore(&adapter->abort_lock, flags);
449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); 449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL);
450 retval = SUCCESS; 450 retval = SUCCESS;
451 goto out; 451 goto out;
452 } 452 }
@@ -460,6 +460,8 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
460 adapter, unit, 0); 460 adapter, unit, 0);
461 if (!new_fsf_req) { 461 if (!new_fsf_req) {
462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
463 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
464 old_fsf_req);
463 retval = FAILED; 465 retval = FAILED;
464 goto out; 466 goto out;
465 } 467 }
@@ -470,13 +472,16 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
470 472
471 /* status should be valid since signals were not permitted */ 473 /* status should be valid since signals were not permitted */
472 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 474 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
473 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); 475 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
476 NULL);
474 retval = SUCCESS; 477 retval = SUCCESS;
475 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 478 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
476 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); 479 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req,
480 NULL);
477 retval = SUCCESS; 481 retval = SUCCESS;
478 } else { 482 } else {
479 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); 483 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req,
484 NULL);
480 retval = FAILED; 485 retval = FAILED;
481 } 486 }
482 zfcp_fsf_req_free(new_fsf_req); 487 zfcp_fsf_req_free(new_fsf_req);
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index dfc07370f412..b29ac25e07f3 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -55,8 +55,6 @@ ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
58ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
59ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
60ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 58ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
61ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 59ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
62ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 60ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
@@ -241,8 +239,6 @@ static struct attribute *zfcp_adapter_attrs[] = {
241 &dev_attr_peer_wwnn.attr, 239 &dev_attr_peer_wwnn.attr,
242 &dev_attr_peer_wwpn.attr, 240 &dev_attr_peer_wwpn.attr,
243 &dev_attr_peer_d_id.attr, 241 &dev_attr_peer_d_id.attr,
244 &dev_attr_physical_wwpn.attr,
245 &dev_attr_physical_s_id.attr,
246 &dev_attr_card_version.attr, 242 &dev_attr_card_version.attr,
247 &dev_attr_lic_version.attr, 243 &dev_attr_lic_version.attr,
248 &dev_attr_status.attr, 244 &dev_attr_status.attr,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 31c497542272..d9152d02088c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -61,6 +61,7 @@
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers. 63 2.26.02.004 - Add support for 9550SX controllers.
64 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64*/ 65*/
65 66
66#include <linux/module.h> 67#include <linux/module.h>
@@ -84,7 +85,7 @@
84#include "3w-9xxx.h" 85#include "3w-9xxx.h"
85 86
86/* Globals */ 87/* Globals */
87#define TW_DRIVER_VERSION "2.26.02.004" 88#define TW_DRIVER_VERSION "2.26.02.005"
88static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 89static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
89static unsigned int twa_device_extension_count; 90static unsigned int twa_device_extension_count;
90static int twa_major = -1; 91static int twa_major = -1;
@@ -1408,7 +1409,7 @@ static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int requ
1408 dma_addr_t mapping; 1409 dma_addr_t mapping;
1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1410 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1411 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1411 int retval = 0; 1412 dma_addr_t retval = 0;
1412 1413
1413 if (cmd->request_bufflen == 0) { 1414 if (cmd->request_bufflen == 0) {
1414 retval = 0; 1415 retval = 0;
@@ -1798,7 +1799,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1798 int i, sg_count; 1799 int i, sg_count;
1799 struct scsi_cmnd *srb = NULL; 1800 struct scsi_cmnd *srb = NULL;
1800 struct scatterlist *sglist = NULL; 1801 struct scatterlist *sglist = NULL;
1801 u32 buffaddr = 0x0; 1802 dma_addr_t buffaddr = 0x0;
1802 int retval = 1; 1803 int retval = 1;
1803 1804
1804 if (tw_dev->srb[request_id]) { 1805 if (tw_dev->srb[request_id]) {
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7139659dd952..a16f8ded8f1d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -173,10 +173,10 @@ int aac_get_config_status(struct aac_dev *dev)
173 int status = 0; 173 int status = 0;
174 struct fib * fibptr; 174 struct fib * fibptr;
175 175
176 if (!(fibptr = fib_alloc(dev))) 176 if (!(fibptr = aac_fib_alloc(dev)))
177 return -ENOMEM; 177 return -ENOMEM;
178 178
179 fib_init(fibptr); 179 aac_fib_init(fibptr);
180 { 180 {
181 struct aac_get_config_status *dinfo; 181 struct aac_get_config_status *dinfo;
182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
@@ -186,7 +186,7 @@ int aac_get_config_status(struct aac_dev *dev)
186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
187 } 187 }
188 188
189 status = fib_send(ContainerCommand, 189 status = aac_fib_send(ContainerCommand,
190 fibptr, 190 fibptr,
191 sizeof (struct aac_get_config_status), 191 sizeof (struct aac_get_config_status),
192 FsaNormal, 192 FsaNormal,
@@ -209,30 +209,30 @@ int aac_get_config_status(struct aac_dev *dev)
209 status = -EINVAL; 209 status = -EINVAL;
210 } 210 }
211 } 211 }
212 fib_complete(fibptr); 212 aac_fib_complete(fibptr);
213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
214 if (status >= 0) { 214 if (status >= 0) {
215 if (commit == 1) { 215 if (commit == 1) {
216 struct aac_commit_config * dinfo; 216 struct aac_commit_config * dinfo;
217 fib_init(fibptr); 217 aac_fib_init(fibptr);
218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr);
219 219
220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 220 dinfo->command = cpu_to_le32(VM_ContainerConfig);
221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
222 222
223 status = fib_send(ContainerCommand, 223 status = aac_fib_send(ContainerCommand,
224 fibptr, 224 fibptr,
225 sizeof (struct aac_commit_config), 225 sizeof (struct aac_commit_config),
226 FsaNormal, 226 FsaNormal,
227 1, 1, 227 1, 1,
228 NULL, NULL); 228 NULL, NULL);
229 fib_complete(fibptr); 229 aac_fib_complete(fibptr);
230 } else if (commit == 0) { 230 } else if (commit == 0) {
231 printk(KERN_WARNING 231 printk(KERN_WARNING
232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 232 "aac_get_config_status: Foreign device configurations are being ignored\n");
233 } 233 }
234 } 234 }
235 fib_free(fibptr); 235 aac_fib_free(fibptr);
236 return status; 236 return status;
237} 237}
238 238
@@ -255,15 +255,15 @@ int aac_get_containers(struct aac_dev *dev)
255 255
256 instance = dev->scsi_host_ptr->unique_id; 256 instance = dev->scsi_host_ptr->unique_id;
257 257
258 if (!(fibptr = fib_alloc(dev))) 258 if (!(fibptr = aac_fib_alloc(dev)))
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 fib_init(fibptr); 261 aac_fib_init(fibptr);
262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig);
264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
265 265
266 status = fib_send(ContainerCommand, 266 status = aac_fib_send(ContainerCommand,
267 fibptr, 267 fibptr,
268 sizeof (struct aac_get_container_count), 268 sizeof (struct aac_get_container_count),
269 FsaNormal, 269 FsaNormal,
@@ -272,7 +272,7 @@ int aac_get_containers(struct aac_dev *dev)
272 if (status >= 0) { 272 if (status >= 0) {
273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
275 fib_complete(fibptr); 275 aac_fib_complete(fibptr);
276 } 276 }
277 277
278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
@@ -280,7 +280,7 @@ int aac_get_containers(struct aac_dev *dev)
280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
282 if (!fsa_dev_ptr) { 282 if (!fsa_dev_ptr) {
283 fib_free(fibptr); 283 aac_fib_free(fibptr);
284 return -ENOMEM; 284 return -ENOMEM;
285 } 285 }
286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
@@ -294,14 +294,14 @@ int aac_get_containers(struct aac_dev *dev)
294 294
295 fsa_dev_ptr[index].devname[0] = '\0'; 295 fsa_dev_ptr[index].devname[0] = '\0';
296 296
297 fib_init(fibptr); 297 aac_fib_init(fibptr);
298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr);
299 299
300 dinfo->command = cpu_to_le32(VM_NameServe); 300 dinfo->command = cpu_to_le32(VM_NameServe);
301 dinfo->count = cpu_to_le32(index); 301 dinfo->count = cpu_to_le32(index);
302 dinfo->type = cpu_to_le32(FT_FILESYS); 302 dinfo->type = cpu_to_le32(FT_FILESYS);
303 303
304 status = fib_send(ContainerCommand, 304 status = aac_fib_send(ContainerCommand,
305 fibptr, 305 fibptr,
306 sizeof (struct aac_query_mount), 306 sizeof (struct aac_query_mount),
307 FsaNormal, 307 FsaNormal,
@@ -319,7 +319,7 @@ int aac_get_containers(struct aac_dev *dev)
319 dinfo->count = cpu_to_le32(index); 319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS); 320 dinfo->type = cpu_to_le32(FT_FILESYS);
321 321
322 if (fib_send(ContainerCommand, 322 if (aac_fib_send(ContainerCommand,
323 fibptr, 323 fibptr,
324 sizeof(struct aac_query_mount), 324 sizeof(struct aac_query_mount),
325 FsaNormal, 325 FsaNormal,
@@ -347,7 +347,7 @@ int aac_get_containers(struct aac_dev *dev)
347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
348 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
349 } 349 }
350 fib_complete(fibptr); 350 aac_fib_complete(fibptr);
351 /* 351 /*
352 * If there are no more containers, then stop asking. 352 * If there are no more containers, then stop asking.
353 */ 353 */
@@ -355,7 +355,7 @@ int aac_get_containers(struct aac_dev *dev)
355 break; 355 break;
356 } 356 }
357 } 357 }
358 fib_free(fibptr); 358 aac_fib_free(fibptr);
359 return status; 359 return status;
360} 360}
361 361
@@ -413,8 +413,8 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
413 413
414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
415 415
416 fib_complete(fibptr); 416 aac_fib_complete(fibptr);
417 fib_free(fibptr); 417 aac_fib_free(fibptr);
418 scsicmd->scsi_done(scsicmd); 418 scsicmd->scsi_done(scsicmd);
419} 419}
420 420
@@ -430,10 +430,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
430 430
431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
432 432
433 if (!(cmd_fibcontext = fib_alloc(dev))) 433 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
434 return -ENOMEM; 434 return -ENOMEM;
435 435
436 fib_init(cmd_fibcontext); 436 aac_fib_init(cmd_fibcontext);
437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
438 438
439 dinfo->command = cpu_to_le32(VM_ContainerConfig); 439 dinfo->command = cpu_to_le32(VM_ContainerConfig);
@@ -441,7 +441,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
441 dinfo->cid = cpu_to_le32(cid); 441 dinfo->cid = cpu_to_le32(cid);
442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
443 443
444 status = fib_send(ContainerCommand, 444 status = aac_fib_send(ContainerCommand,
445 cmd_fibcontext, 445 cmd_fibcontext,
446 sizeof (struct aac_get_name), 446 sizeof (struct aac_get_name),
447 FsaNormal, 447 FsaNormal,
@@ -455,14 +455,14 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
455 if (status == -EINPROGRESS) 455 if (status == -EINPROGRESS)
456 return 0; 456 return 0;
457 457
458 printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 458 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
459 fib_complete(cmd_fibcontext); 459 aac_fib_complete(cmd_fibcontext);
460 fib_free(cmd_fibcontext); 460 aac_fib_free(cmd_fibcontext);
461 return -1; 461 return -1;
462} 462}
463 463
464/** 464/**
465 * probe_container - query a logical volume 465 * aac_probe_container - query a logical volume
466 * @dev: device to query 466 * @dev: device to query
467 * @cid: container identifier 467 * @cid: container identifier
468 * 468 *
@@ -470,7 +470,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
470 * is updated in the struct fsa_dev_info structure rather than returned. 470 * is updated in the struct fsa_dev_info structure rather than returned.
471 */ 471 */
472 472
473int probe_container(struct aac_dev *dev, int cid) 473int aac_probe_container(struct aac_dev *dev, int cid)
474{ 474{
475 struct fsa_dev_info *fsa_dev_ptr; 475 struct fsa_dev_info *fsa_dev_ptr;
476 int status; 476 int status;
@@ -482,10 +482,10 @@ int probe_container(struct aac_dev *dev, int cid)
482 fsa_dev_ptr = dev->fsa_dev; 482 fsa_dev_ptr = dev->fsa_dev;
483 instance = dev->scsi_host_ptr->unique_id; 483 instance = dev->scsi_host_ptr->unique_id;
484 484
485 if (!(fibptr = fib_alloc(dev))) 485 if (!(fibptr = aac_fib_alloc(dev)))
486 return -ENOMEM; 486 return -ENOMEM;
487 487
488 fib_init(fibptr); 488 aac_fib_init(fibptr);
489 489
490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 490 dinfo = (struct aac_query_mount *)fib_data(fibptr);
491 491
@@ -493,14 +493,14 @@ int probe_container(struct aac_dev *dev, int cid)
493 dinfo->count = cpu_to_le32(cid); 493 dinfo->count = cpu_to_le32(cid);
494 dinfo->type = cpu_to_le32(FT_FILESYS); 494 dinfo->type = cpu_to_le32(FT_FILESYS);
495 495
496 status = fib_send(ContainerCommand, 496 status = aac_fib_send(ContainerCommand,
497 fibptr, 497 fibptr,
498 sizeof(struct aac_query_mount), 498 sizeof(struct aac_query_mount),
499 FsaNormal, 499 FsaNormal,
500 1, 1, 500 1, 1,
501 NULL, NULL); 501 NULL, NULL);
502 if (status < 0) { 502 if (status < 0) {
503 printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 503 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n");
504 goto error; 504 goto error;
505 } 505 }
506 506
@@ -512,7 +512,7 @@ int probe_container(struct aac_dev *dev, int cid)
512 dinfo->count = cpu_to_le32(cid); 512 dinfo->count = cpu_to_le32(cid);
513 dinfo->type = cpu_to_le32(FT_FILESYS); 513 dinfo->type = cpu_to_le32(FT_FILESYS);
514 514
515 if (fib_send(ContainerCommand, 515 if (aac_fib_send(ContainerCommand,
516 fibptr, 516 fibptr,
517 sizeof(struct aac_query_mount), 517 sizeof(struct aac_query_mount),
518 FsaNormal, 518 FsaNormal,
@@ -535,8 +535,8 @@ int probe_container(struct aac_dev *dev, int cid)
535 } 535 }
536 536
537error: 537error:
538 fib_complete(fibptr); 538 aac_fib_complete(fibptr);
539 fib_free(fibptr); 539 aac_fib_free(fibptr);
540 540
541 return status; 541 return status;
542} 542}
@@ -700,14 +700,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
700 struct aac_bus_info *command; 700 struct aac_bus_info *command;
701 struct aac_bus_info_response *bus_info; 701 struct aac_bus_info_response *bus_info;
702 702
703 if (!(fibptr = fib_alloc(dev))) 703 if (!(fibptr = aac_fib_alloc(dev)))
704 return -ENOMEM; 704 return -ENOMEM;
705 705
706 fib_init(fibptr); 706 aac_fib_init(fibptr);
707 info = (struct aac_adapter_info *) fib_data(fibptr); 707 info = (struct aac_adapter_info *) fib_data(fibptr);
708 memset(info,0,sizeof(*info)); 708 memset(info,0,sizeof(*info));
709 709
710 rcode = fib_send(RequestAdapterInfo, 710 rcode = aac_fib_send(RequestAdapterInfo,
711 fibptr, 711 fibptr,
712 sizeof(*info), 712 sizeof(*info),
713 FsaNormal, 713 FsaNormal,
@@ -716,8 +716,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
716 NULL); 716 NULL);
717 717
718 if (rcode < 0) { 718 if (rcode < 0) {
719 fib_complete(fibptr); 719 aac_fib_complete(fibptr);
720 fib_free(fibptr); 720 aac_fib_free(fibptr);
721 return rcode; 721 return rcode;
722 } 722 }
723 memcpy(&dev->adapter_info, info, sizeof(*info)); 723 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -725,13 +725,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
726 struct aac_supplement_adapter_info * info; 726 struct aac_supplement_adapter_info * info;
727 727
728 fib_init(fibptr); 728 aac_fib_init(fibptr);
729 729
730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
731 731
732 memset(info,0,sizeof(*info)); 732 memset(info,0,sizeof(*info));
733 733
734 rcode = fib_send(RequestSupplementAdapterInfo, 734 rcode = aac_fib_send(RequestSupplementAdapterInfo,
735 fibptr, 735 fibptr,
736 sizeof(*info), 736 sizeof(*info),
737 FsaNormal, 737 FsaNormal,
@@ -748,7 +748,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
748 * GetBusInfo 748 * GetBusInfo
749 */ 749 */
750 750
751 fib_init(fibptr); 751 aac_fib_init(fibptr);
752 752
753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
754 754
@@ -761,7 +761,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
761 command->MethodId = cpu_to_le32(1); 761 command->MethodId = cpu_to_le32(1);
762 command->CtlCmd = cpu_to_le32(GetBusInfo); 762 command->CtlCmd = cpu_to_le32(GetBusInfo);
763 763
764 rcode = fib_send(ContainerCommand, 764 rcode = aac_fib_send(ContainerCommand,
765 fibptr, 765 fibptr,
766 sizeof (*bus_info), 766 sizeof (*bus_info),
767 FsaNormal, 767 FsaNormal,
@@ -891,8 +891,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
891 } 891 }
892 } 892 }
893 893
894 fib_complete(fibptr); 894 aac_fib_complete(fibptr);
895 fib_free(fibptr); 895 aac_fib_free(fibptr);
896 896
897 return rcode; 897 return rcode;
898} 898}
@@ -976,8 +976,8 @@ static void io_callback(void *context, struct fib * fibptr)
976 ? sizeof(scsicmd->sense_buffer) 976 ? sizeof(scsicmd->sense_buffer)
977 : sizeof(dev->fsa_dev[cid].sense_data)); 977 : sizeof(dev->fsa_dev[cid].sense_data));
978 } 978 }
979 fib_complete(fibptr); 979 aac_fib_complete(fibptr);
980 fib_free(fibptr); 980 aac_fib_free(fibptr);
981 981
982 scsicmd->scsi_done(scsicmd); 982 scsicmd->scsi_done(scsicmd);
983} 983}
@@ -1062,11 +1062,11 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1062 /* 1062 /*
1063 * Alocate and initialize a Fib 1063 * Alocate and initialize a Fib
1064 */ 1064 */
1065 if (!(cmd_fibcontext = fib_alloc(dev))) { 1065 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1066 return -1; 1066 return -1;
1067 } 1067 }
1068 1068
1069 fib_init(cmd_fibcontext); 1069 aac_fib_init(cmd_fibcontext);
1070 1070
1071 if (dev->raw_io_interface) { 1071 if (dev->raw_io_interface) {
1072 struct aac_raw_io *readcmd; 1072 struct aac_raw_io *readcmd;
@@ -1086,7 +1086,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1086 /* 1086 /*
1087 * Now send the Fib to the adapter 1087 * Now send the Fib to the adapter
1088 */ 1088 */
1089 status = fib_send(ContainerRawIo, 1089 status = aac_fib_send(ContainerRawIo,
1090 cmd_fibcontext, 1090 cmd_fibcontext,
1091 fibsize, 1091 fibsize,
1092 FsaNormal, 1092 FsaNormal,
@@ -1112,7 +1112,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1112 /* 1112 /*
1113 * Now send the Fib to the adapter 1113 * Now send the Fib to the adapter
1114 */ 1114 */
1115 status = fib_send(ContainerCommand64, 1115 status = aac_fib_send(ContainerCommand64,
1116 cmd_fibcontext, 1116 cmd_fibcontext,
1117 fibsize, 1117 fibsize,
1118 FsaNormal, 1118 FsaNormal,
@@ -1136,7 +1136,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1136 /* 1136 /*
1137 * Now send the Fib to the adapter 1137 * Now send the Fib to the adapter
1138 */ 1138 */
1139 status = fib_send(ContainerCommand, 1139 status = aac_fib_send(ContainerCommand,
1140 cmd_fibcontext, 1140 cmd_fibcontext,
1141 fibsize, 1141 fibsize,
1142 FsaNormal, 1142 FsaNormal,
@@ -1153,14 +1153,14 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1153 if (status == -EINPROGRESS) 1153 if (status == -EINPROGRESS)
1154 return 0; 1154 return 0;
1155 1155
1156 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1156 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
1157 /* 1157 /*
1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL
1159 */ 1159 */
1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1161 scsicmd->scsi_done(scsicmd); 1161 scsicmd->scsi_done(scsicmd);
1162 fib_complete(cmd_fibcontext); 1162 aac_fib_complete(cmd_fibcontext);
1163 fib_free(cmd_fibcontext); 1163 aac_fib_free(cmd_fibcontext);
1164 return 0; 1164 return 0;
1165} 1165}
1166 1166
@@ -1228,12 +1228,12 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1228 /* 1228 /*
1229 * Allocate and initialize a Fib then setup a BlockWrite command 1229 * Allocate and initialize a Fib then setup a BlockWrite command
1230 */ 1230 */
1231 if (!(cmd_fibcontext = fib_alloc(dev))) { 1231 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1232 scsicmd->result = DID_ERROR << 16; 1232 scsicmd->result = DID_ERROR << 16;
1233 scsicmd->scsi_done(scsicmd); 1233 scsicmd->scsi_done(scsicmd);
1234 return 0; 1234 return 0;
1235 } 1235 }
1236 fib_init(cmd_fibcontext); 1236 aac_fib_init(cmd_fibcontext);
1237 1237
1238 if (dev->raw_io_interface) { 1238 if (dev->raw_io_interface) {
1239 struct aac_raw_io *writecmd; 1239 struct aac_raw_io *writecmd;
@@ -1253,7 +1253,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1253 /* 1253 /*
1254 * Now send the Fib to the adapter 1254 * Now send the Fib to the adapter
1255 */ 1255 */
1256 status = fib_send(ContainerRawIo, 1256 status = aac_fib_send(ContainerRawIo,
1257 cmd_fibcontext, 1257 cmd_fibcontext,
1258 fibsize, 1258 fibsize,
1259 FsaNormal, 1259 FsaNormal,
@@ -1279,7 +1279,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1279 /* 1279 /*
1280 * Now send the Fib to the adapter 1280 * Now send the Fib to the adapter
1281 */ 1281 */
1282 status = fib_send(ContainerCommand64, 1282 status = aac_fib_send(ContainerCommand64,
1283 cmd_fibcontext, 1283 cmd_fibcontext,
1284 fibsize, 1284 fibsize,
1285 FsaNormal, 1285 FsaNormal,
@@ -1305,7 +1305,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1305 /* 1305 /*
1306 * Now send the Fib to the adapter 1306 * Now send the Fib to the adapter
1307 */ 1307 */
1308 status = fib_send(ContainerCommand, 1308 status = aac_fib_send(ContainerCommand,
1309 cmd_fibcontext, 1309 cmd_fibcontext,
1310 fibsize, 1310 fibsize,
1311 FsaNormal, 1311 FsaNormal,
@@ -1322,15 +1322,15 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1322 return 0; 1322 return 0;
1323 } 1323 }
1324 1324
1325 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1325 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
1326 /* 1326 /*
1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL
1328 */ 1328 */
1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1330 scsicmd->scsi_done(scsicmd); 1330 scsicmd->scsi_done(scsicmd);
1331 1331
1332 fib_complete(cmd_fibcontext); 1332 aac_fib_complete(cmd_fibcontext);
1333 fib_free(cmd_fibcontext); 1333 aac_fib_free(cmd_fibcontext);
1334 return 0; 1334 return 0;
1335} 1335}
1336 1336
@@ -1369,8 +1369,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1369 sizeof(cmd->sense_buffer))); 1369 sizeof(cmd->sense_buffer)));
1370 } 1370 }
1371 1371
1372 fib_complete(fibptr); 1372 aac_fib_complete(fibptr);
1373 fib_free(fibptr); 1373 aac_fib_free(fibptr);
1374 cmd->scsi_done(cmd); 1374 cmd->scsi_done(cmd);
1375} 1375}
1376 1376
@@ -1407,10 +1407,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1407 * Allocate and initialize a Fib 1407 * Allocate and initialize a Fib
1408 */ 1408 */
1409 if (!(cmd_fibcontext = 1409 if (!(cmd_fibcontext =
1410 fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1410 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1411 return SCSI_MLQUEUE_HOST_BUSY; 1411 return SCSI_MLQUEUE_HOST_BUSY;
1412 1412
1413 fib_init(cmd_fibcontext); 1413 aac_fib_init(cmd_fibcontext);
1414 1414
1415 synchronizecmd = fib_data(cmd_fibcontext); 1415 synchronizecmd = fib_data(cmd_fibcontext);
1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
@@ -1422,7 +1422,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1422 /* 1422 /*
1423 * Now send the Fib to the adapter 1423 * Now send the Fib to the adapter
1424 */ 1424 */
1425 status = fib_send(ContainerCommand, 1425 status = aac_fib_send(ContainerCommand,
1426 cmd_fibcontext, 1426 cmd_fibcontext,
1427 sizeof(struct aac_synchronize), 1427 sizeof(struct aac_synchronize),
1428 FsaNormal, 1428 FsaNormal,
@@ -1437,9 +1437,9 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1437 return 0; 1437 return 0;
1438 1438
1439 printk(KERN_WARNING 1439 printk(KERN_WARNING
1440 "aac_synchronize: fib_send failed with status: %d.\n", status); 1440 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
1441 fib_complete(cmd_fibcontext); 1441 aac_fib_complete(cmd_fibcontext);
1442 fib_free(cmd_fibcontext); 1442 aac_fib_free(cmd_fibcontext);
1443 return SCSI_MLQUEUE_HOST_BUSY; 1443 return SCSI_MLQUEUE_HOST_BUSY;
1444} 1444}
1445 1445
@@ -1465,7 +1465,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1465 * itself. 1465 * itself.
1466 */ 1466 */
1467 if (scmd_id(scsicmd) != host->this_id) { 1467 if (scmd_id(scsicmd) != host->this_id) {
1468 if ((scsicmd->device->channel == 0) ){ 1468 if ((scsicmd->device->channel == CONTAINER_CHANNEL)) {
1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
1470 scsicmd->result = DID_NO_CONNECT << 16; 1470 scsicmd->result = DID_NO_CONNECT << 16;
1471 scsicmd->scsi_done(scsicmd); 1471 scsicmd->scsi_done(scsicmd);
@@ -1488,7 +1488,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1488 case READ_CAPACITY: 1488 case READ_CAPACITY:
1489 case TEST_UNIT_READY: 1489 case TEST_UNIT_READY:
1490 spin_unlock_irq(host->host_lock); 1490 spin_unlock_irq(host->host_lock);
1491 probe_container(dev, cid); 1491 aac_probe_container(dev, cid);
1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1493 fsa_dev_ptr[cid].valid = 0; 1493 fsa_dev_ptr[cid].valid = 0;
1494 spin_lock_irq(host->host_lock); 1494 spin_lock_irq(host->host_lock);
@@ -1935,33 +1935,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1935 case SRB_STATUS_ERROR_RECOVERY: 1935 case SRB_STATUS_ERROR_RECOVERY:
1936 case SRB_STATUS_PENDING: 1936 case SRB_STATUS_PENDING:
1937 case SRB_STATUS_SUCCESS: 1937 case SRB_STATUS_SUCCESS:
1938 if(scsicmd->cmnd[0] == INQUIRY ){ 1938 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1939 u8 b;
1940 u8 b1;
1941 /* We can't expose disk devices because we can't tell whether they
1942 * are the raw container drives or stand alone drives. If they have
1943 * the removable bit set then we should expose them though.
1944 */
1945 b = (*(u8*)scsicmd->buffer)&0x1f;
1946 b1 = ((u8*)scsicmd->buffer)[1];
1947 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1948 || (b==TYPE_DISK && (b1&0x80)) ){
1949 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1950 /*
1951 * We will allow disk devices if in RAID/SCSI mode and
1952 * the channel is 2
1953 */
1954 } else if ((dev->raid_scsi_mode) &&
1955 (scmd_channel(scsicmd) == 2)) {
1956 scsicmd->result = DID_OK << 16 |
1957 COMMAND_COMPLETE << 8;
1958 } else {
1959 scsicmd->result = DID_NO_CONNECT << 16 |
1960 COMMAND_COMPLETE << 8;
1961 }
1962 } else {
1963 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1964 }
1965 break; 1939 break;
1966 case SRB_STATUS_DATA_OVERRUN: 1940 case SRB_STATUS_DATA_OVERRUN:
1967 switch(scsicmd->cmnd[0]){ 1941 switch(scsicmd->cmnd[0]){
@@ -1981,28 +1955,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1981 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1955 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1982 break; 1956 break;
1983 case INQUIRY: { 1957 case INQUIRY: {
1984 u8 b; 1958 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1985 u8 b1;
1986 /* We can't expose disk devices because we can't tell whether they
1987 * are the raw container drives or stand alone drives
1988 */
1989 b = (*(u8*)scsicmd->buffer)&0x0f;
1990 b1 = ((u8*)scsicmd->buffer)[1];
1991 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1992 || (b==TYPE_DISK && (b1&0x80)) ){
1993 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1994 /*
1995 * We will allow disk devices if in RAID/SCSI mode and
1996 * the channel is 2
1997 */
1998 } else if ((dev->raid_scsi_mode) &&
1999 (scmd_channel(scsicmd) == 2)) {
2000 scsicmd->result = DID_OK << 16 |
2001 COMMAND_COMPLETE << 8;
2002 } else {
2003 scsicmd->result = DID_NO_CONNECT << 16 |
2004 COMMAND_COMPLETE << 8;
2005 }
2006 break; 1959 break;
2007 } 1960 }
2008 default: 1961 default:
@@ -2089,8 +2042,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2089 */ 2042 */
2090 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2043 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
2091 2044
2092 fib_complete(fibptr); 2045 aac_fib_complete(fibptr);
2093 fib_free(fibptr); 2046 aac_fib_free(fibptr);
2094 scsicmd->scsi_done(scsicmd); 2047 scsicmd->scsi_done(scsicmd);
2095} 2048}
2096 2049
@@ -2142,10 +2095,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2142 /* 2095 /*
2143 * Allocate and initialize a Fib then setup a BlockWrite command 2096 * Allocate and initialize a Fib then setup a BlockWrite command
2144 */ 2097 */
2145 if (!(cmd_fibcontext = fib_alloc(dev))) { 2098 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2146 return -1; 2099 return -1;
2147 } 2100 }
2148 fib_init(cmd_fibcontext); 2101 aac_fib_init(cmd_fibcontext);
2149 2102
2150 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2103 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2151 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 2104 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
@@ -2179,7 +2132,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2179 /* 2132 /*
2180 * Now send the Fib to the adapter 2133 * Now send the Fib to the adapter
2181 */ 2134 */
2182 status = fib_send(ScsiPortCommand64, cmd_fibcontext, 2135 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2183 fibsize, FsaNormal, 0, 1, 2136 fibsize, FsaNormal, 0, 1,
2184 (fib_callback) aac_srb_callback, 2137 (fib_callback) aac_srb_callback,
2185 (void *) scsicmd); 2138 (void *) scsicmd);
@@ -2201,7 +2154,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2201 /* 2154 /*
2202 * Now send the Fib to the adapter 2155 * Now send the Fib to the adapter
2203 */ 2156 */
2204 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2157 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2205 (fib_callback) aac_srb_callback, (void *) scsicmd); 2158 (fib_callback) aac_srb_callback, (void *) scsicmd);
2206 } 2159 }
2207 /* 2160 /*
@@ -2211,9 +2164,9 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2211 return 0; 2164 return 0;
2212 } 2165 }
2213 2166
2214 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 2167 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
2215 fib_complete(cmd_fibcontext); 2168 aac_fib_complete(cmd_fibcontext);
2216 fib_free(cmd_fibcontext); 2169 aac_fib_free(cmd_fibcontext);
2217 2170
2218 return -1; 2171 return -1;
2219} 2172}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 66dbb6d2c506..2d430b7e8cf4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1774,16 +1774,16 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1774struct scsi_cmnd; 1774struct scsi_cmnd;
1775 1775
1776const char *aac_driverinfo(struct Scsi_Host *); 1776const char *aac_driverinfo(struct Scsi_Host *);
1777struct fib *fib_alloc(struct aac_dev *dev); 1777struct fib *aac_fib_alloc(struct aac_dev *dev);
1778int fib_setup(struct aac_dev *dev); 1778int aac_fib_setup(struct aac_dev *dev);
1779void fib_map_free(struct aac_dev *dev); 1779void aac_fib_map_free(struct aac_dev *dev);
1780void fib_free(struct fib * context); 1780void aac_fib_free(struct fib * context);
1781void fib_init(struct fib * context); 1781void aac_fib_init(struct fib * context);
1782void aac_printf(struct aac_dev *dev, u32 val); 1782void aac_printf(struct aac_dev *dev, u32 val);
1783int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1783int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1786int fib_complete(struct fib * context); 1786int aac_fib_complete(struct fib * context);
1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1788struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1788struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1789int aac_get_config_status(struct aac_dev *dev); 1789int aac_get_config_status(struct aac_dev *dev);
@@ -1799,11 +1799,11 @@ unsigned int aac_command_normal(struct aac_queue * q);
1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1800int aac_command_thread(struct aac_dev * dev); 1800int aac_command_thread(struct aac_dev * dev);
1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1802int fib_adapter_complete(struct fib * fibptr, unsigned short size); 1802int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
1803struct aac_driver_ident* aac_get_driver_ident(int devtype); 1803struct aac_driver_ident* aac_get_driver_ident(int devtype);
1804int aac_get_adapter_info(struct aac_dev* dev); 1804int aac_get_adapter_info(struct aac_dev* dev);
1805int aac_send_shutdown(struct aac_dev *dev); 1805int aac_send_shutdown(struct aac_dev *dev);
1806int probe_container(struct aac_dev *dev, int cid); 1806int aac_probe_container(struct aac_dev *dev, int cid);
1807extern int numacb; 1807extern int numacb;
1808extern int acbsize; 1808extern int acbsize;
1809extern char aac_driver_version[]; 1809extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 4fe79cd7c957..47fefca72695 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
63 unsigned size; 63 unsigned size;
64 int retval; 64 int retval;
65 65
66 fibptr = fib_alloc(dev); 66 fibptr = aac_fib_alloc(dev);
67 if(fibptr == NULL) { 67 if(fibptr == NULL) {
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
@@ -73,7 +73,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
73 * First copy in the header so that we can check the size field. 73 * First copy in the header so that we can check the size field.
74 */ 74 */
75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
76 fib_free(fibptr); 76 aac_fib_free(fibptr);
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 /* 79 /*
@@ -110,13 +110,13 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
110 */ 110 */
111 kfib->header.XferState = 0; 111 kfib->header.XferState = 0;
112 } else { 112 } else {
113 retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, 113 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
114 le16_to_cpu(kfib->header.Size) , FsaNormal, 114 le16_to_cpu(kfib->header.Size) , FsaNormal,
115 1, 1, NULL, NULL); 115 1, 1, NULL, NULL);
116 if (retval) { 116 if (retval) {
117 goto cleanup; 117 goto cleanup;
118 } 118 }
119 if (fib_complete(fibptr) != 0) { 119 if (aac_fib_complete(fibptr) != 0) {
120 retval = -EINVAL; 120 retval = -EINVAL;
121 goto cleanup; 121 goto cleanup;
122 } 122 }
@@ -138,7 +138,7 @@ cleanup:
138 fibptr->hw_fib_pa = hw_fib_pa; 138 fibptr->hw_fib_pa = hw_fib_pa;
139 fibptr->hw_fib = hw_fib; 139 fibptr->hw_fib = hw_fib;
140 } 140 }
141 fib_free(fibptr); 141 aac_fib_free(fibptr);
142 return retval; 142 return retval;
143} 143}
144 144
@@ -464,10 +464,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
464 /* 464 /*
465 * Allocate and initialize a Fib then setup a BlockWrite command 465 * Allocate and initialize a Fib then setup a BlockWrite command
466 */ 466 */
467 if (!(srbfib = fib_alloc(dev))) { 467 if (!(srbfib = aac_fib_alloc(dev))) {
468 return -ENOMEM; 468 return -ENOMEM;
469 } 469 }
470 fib_init(srbfib); 470 aac_fib_init(srbfib);
471 471
472 srbcmd = (struct aac_srb*) fib_data(srbfib); 472 srbcmd = (struct aac_srb*) fib_data(srbfib);
473 473
@@ -601,7 +601,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
601 601
602 srbcmd->count = cpu_to_le32(byte_count); 602 srbcmd->count = cpu_to_le32(byte_count);
603 psg->count = cpu_to_le32(sg_indx+1); 603 psg->count = cpu_to_le32(sg_indx+1);
604 status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 604 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
605 } else { 605 } else {
606 struct user_sgmap* upsg = &user_srbcmd->sg; 606 struct user_sgmap* upsg = &user_srbcmd->sg;
607 struct sgmap* psg = &srbcmd->sg; 607 struct sgmap* psg = &srbcmd->sg;
@@ -649,7 +649,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
649 } 649 }
650 srbcmd->count = cpu_to_le32(byte_count); 650 srbcmd->count = cpu_to_le32(byte_count);
651 psg->count = cpu_to_le32(sg_indx+1); 651 psg->count = cpu_to_le32(sg_indx+1);
652 status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 652 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
653 } 653 }
654 654
655 if (status != 0){ 655 if (status != 0){
@@ -684,8 +684,8 @@ cleanup:
684 for(i=0; i <= sg_indx; i++){ 684 for(i=0; i <= sg_indx; i++){
685 kfree(sg_list[i]); 685 kfree(sg_list[i]);
686 } 686 }
687 fib_complete(srbfib); 687 aac_fib_complete(srbfib);
688 fib_free(srbfib); 688 aac_fib_free(srbfib);
689 689
690 return rcode; 690 return rcode;
691} 691}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 82821d331c07..1628d094943d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -185,17 +185,17 @@ int aac_send_shutdown(struct aac_dev * dev)
185 struct aac_close *cmd; 185 struct aac_close *cmd;
186 int status; 186 int status;
187 187
188 fibctx = fib_alloc(dev); 188 fibctx = aac_fib_alloc(dev);
189 if (!fibctx) 189 if (!fibctx)
190 return -ENOMEM; 190 return -ENOMEM;
191 fib_init(fibctx); 191 aac_fib_init(fibctx);
192 192
193 cmd = (struct aac_close *) fib_data(fibctx); 193 cmd = (struct aac_close *) fib_data(fibctx);
194 194
195 cmd->command = cpu_to_le32(VM_CloseAll); 195 cmd->command = cpu_to_le32(VM_CloseAll);
196 cmd->cid = cpu_to_le32(0xffffffff); 196 cmd->cid = cpu_to_le32(0xffffffff);
197 197
198 status = fib_send(ContainerCommand, 198 status = aac_fib_send(ContainerCommand,
199 fibctx, 199 fibctx,
200 sizeof(struct aac_close), 200 sizeof(struct aac_close),
201 FsaNormal, 201 FsaNormal,
@@ -203,8 +203,8 @@ int aac_send_shutdown(struct aac_dev * dev)
203 NULL, NULL); 203 NULL, NULL);
204 204
205 if (status == 0) 205 if (status == 0)
206 fib_complete(fibctx); 206 aac_fib_complete(fibctx);
207 fib_free(fibctx); 207 aac_fib_free(fibctx);
208 return status; 208 return status;
209} 209}
210 210
@@ -427,7 +427,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
427 /* 427 /*
428 * Initialize the list of fibs 428 * Initialize the list of fibs
429 */ 429 */
430 if(fib_setup(dev)<0){ 430 if (aac_fib_setup(dev) < 0) {
431 kfree(dev->queues); 431 kfree(dev->queues);
432 return NULL; 432 return NULL;
433 } 433 }
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 014cc8d54a9f..609fd19b1844 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -67,27 +67,27 @@ static int fib_map_alloc(struct aac_dev *dev)
67} 67}
68 68
69/** 69/**
70 * fib_map_free - free the fib objects 70 * aac_fib_map_free - free the fib objects
71 * @dev: Adapter to free 71 * @dev: Adapter to free
72 * 72 *
73 * Free the PCI mappings and the memory allocated for FIB blocks 73 * Free the PCI mappings and the memory allocated for FIB blocks
74 * on this adapter. 74 * on this adapter.
75 */ 75 */
76 76
77void fib_map_free(struct aac_dev *dev) 77void aac_fib_map_free(struct aac_dev *dev)
78{ 78{
79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
80} 80}
81 81
82/** 82/**
83 * fib_setup - setup the fibs 83 * aac_fib_setup - setup the fibs
84 * @dev: Adapter to set up 84 * @dev: Adapter to set up
85 * 85 *
86 * Allocate the PCI space for the fibs, map it and then intialise the 86 * Allocate the PCI space for the fibs, map it and then intialise the
87 * fib area, the unmapped fib data and also the free list 87 * fib area, the unmapped fib data and also the free list
88 */ 88 */
89 89
90int fib_setup(struct aac_dev * dev) 90int aac_fib_setup(struct aac_dev * dev)
91{ 91{
92 struct fib *fibptr; 92 struct fib *fibptr;
93 struct hw_fib *hw_fib_va; 93 struct hw_fib *hw_fib_va;
@@ -134,14 +134,14 @@ int fib_setup(struct aac_dev * dev)
134} 134}
135 135
136/** 136/**
137 * fib_alloc - allocate a fib 137 * aac_fib_alloc - allocate a fib
138 * @dev: Adapter to allocate the fib for 138 * @dev: Adapter to allocate the fib for
139 * 139 *
140 * Allocate a fib from the adapter fib pool. If the pool is empty we 140 * Allocate a fib from the adapter fib pool. If the pool is empty we
141 * return NULL. 141 * return NULL.
142 */ 142 */
143 143
144struct fib * fib_alloc(struct aac_dev *dev) 144struct fib *aac_fib_alloc(struct aac_dev *dev)
145{ 145{
146 struct fib * fibptr; 146 struct fib * fibptr;
147 unsigned long flags; 147 unsigned long flags;
@@ -170,14 +170,14 @@ struct fib * fib_alloc(struct aac_dev *dev)
170} 170}
171 171
172/** 172/**
173 * fib_free - free a fib 173 * aac_fib_free - free a fib
174 * @fibptr: fib to free up 174 * @fibptr: fib to free up
175 * 175 *
176 * Frees up a fib and places it on the appropriate queue 176 * Frees up a fib and places it on the appropriate queue
177 * (either free or timed out) 177 * (either free or timed out)
178 */ 178 */
179 179
180void fib_free(struct fib * fibptr) 180void aac_fib_free(struct fib *fibptr)
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 183
@@ -188,7 +188,7 @@ void fib_free(struct fib * fibptr)
188 fibptr->dev->timeout_fib = fibptr; 188 fibptr->dev->timeout_fib = fibptr;
189 } else { 189 } else {
190 if (fibptr->hw_fib->header.XferState != 0) { 190 if (fibptr->hw_fib->header.XferState != 0) {
191 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 191 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
192 (void*)fibptr, 192 (void*)fibptr,
193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 193 le32_to_cpu(fibptr->hw_fib->header.XferState));
194 } 194 }
@@ -199,13 +199,13 @@ void fib_free(struct fib * fibptr)
199} 199}
200 200
201/** 201/**
202 * fib_init - initialise a fib 202 * aac_fib_init - initialise a fib
203 * @fibptr: The fib to initialize 203 * @fibptr: The fib to initialize
204 * 204 *
205 * Set up the generic fib fields ready for use 205 * Set up the generic fib fields ready for use
206 */ 206 */
207 207
208void fib_init(struct fib *fibptr) 208void aac_fib_init(struct fib *fibptr)
209{ 209{
210 struct hw_fib *hw_fib = fibptr->hw_fib; 210 struct hw_fib *hw_fib = fibptr->hw_fib;
211 211
@@ -362,7 +362,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
362 */ 362 */
363 363
364/** 364/**
365 * fib_send - send a fib to the adapter 365 * aac_fib_send - send a fib to the adapter
366 * @command: Command to send 366 * @command: Command to send
367 * @fibptr: The fib 367 * @fibptr: The fib
368 * @size: Size of fib data area 368 * @size: Size of fib data area
@@ -378,7 +378,9 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
378 * response FIB is received from the adapter. 378 * response FIB is received from the adapter.
379 */ 379 */
380 380
381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
382 int priority, int wait, int reply, fib_callback callback,
383 void *callback_data)
382{ 384{
383 struct aac_dev * dev = fibptr->dev; 385 struct aac_dev * dev = fibptr->dev;
384 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
@@ -493,7 +495,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
493 q->numpending++; 495 q->numpending++;
494 *(q->headers.producer) = cpu_to_le32(index + 1); 496 *(q->headers.producer) = cpu_to_le32(index + 1);
495 spin_unlock_irqrestore(q->lock, qflags); 497 spin_unlock_irqrestore(q->lock, qflags);
496 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 498 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
497 if (!(nointr & aac_config.irq_mod)) 499 if (!(nointr & aac_config.irq_mod))
498 aac_adapter_notify(dev, AdapNormCmdQueue); 500 aac_adapter_notify(dev, AdapNormCmdQueue);
499 } 501 }
@@ -520,7 +522,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
520 list_del(&fibptr->queue); 522 list_del(&fibptr->queue);
521 spin_unlock_irqrestore(q->lock, qflags); 523 spin_unlock_irqrestore(q->lock, qflags);
522 if (wait == -1) { 524 if (wait == -1) {
523 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 525 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
524 "Usually a result of a PCI interrupt routing problem;\n" 526 "Usually a result of a PCI interrupt routing problem;\n"
525 "update mother board BIOS or consider utilizing one of\n" 527 "update mother board BIOS or consider utilizing one of\n"
526 "the SAFE mode kernel options (acpi, apic etc)\n"); 528 "the SAFE mode kernel options (acpi, apic etc)\n");
@@ -624,7 +626,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
624} 626}
625 627
626/** 628/**
627 * fib_adapter_complete - complete adapter issued fib 629 * aac_fib_adapter_complete - complete adapter issued fib
628 * @fibptr: fib to complete 630 * @fibptr: fib to complete
629 * @size: size of fib 631 * @size: size of fib
630 * 632 *
@@ -632,7 +634,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
632 * the adapter. 634 * the adapter.
633 */ 635 */
634 636
635int fib_adapter_complete(struct fib * fibptr, unsigned short size) 637int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
636{ 638{
637 struct hw_fib * hw_fib = fibptr->hw_fib; 639 struct hw_fib * hw_fib = fibptr->hw_fib;
638 struct aac_dev * dev = fibptr->dev; 640 struct aac_dev * dev = fibptr->dev;
@@ -683,20 +685,20 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
683 } 685 }
684 else 686 else
685 { 687 {
686 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 688 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
687 BUG(); 689 BUG();
688 } 690 }
689 return 0; 691 return 0;
690} 692}
691 693
692/** 694/**
693 * fib_complete - fib completion handler 695 * aac_fib_complete - fib completion handler
694 * @fib: FIB to complete 696 * @fib: FIB to complete
695 * 697 *
696 * Will do all necessary work to complete a FIB. 698 * Will do all necessary work to complete a FIB.
697 */ 699 */
698 700
699int fib_complete(struct fib * fibptr) 701int aac_fib_complete(struct fib *fibptr)
700{ 702{
701 struct hw_fib * hw_fib = fibptr->hw_fib; 703 struct hw_fib * hw_fib = fibptr->hw_fib;
702 704
@@ -995,14 +997,14 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
995 if (!dev || !dev->scsi_host_ptr) 997 if (!dev || !dev->scsi_host_ptr)
996 return; 998 return;
997 /* 999 /*
998 * force reload of disk info via probe_container 1000 * force reload of disk info via aac_probe_container
999 */ 1001 */
1000 if ((device_config_needed == CHANGE) 1002 if ((device_config_needed == CHANGE)
1001 && (dev->fsa_dev[container].valid == 1)) 1003 && (dev->fsa_dev[container].valid == 1))
1002 dev->fsa_dev[container].valid = 2; 1004 dev->fsa_dev[container].valid = 2;
1003 if ((device_config_needed == CHANGE) || 1005 if ((device_config_needed == CHANGE) ||
1004 (device_config_needed == ADD)) 1006 (device_config_needed == ADD))
1005 probe_container(dev, container); 1007 aac_probe_container(dev, container);
1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1008 device = scsi_device_lookup(dev->scsi_host_ptr,
1007 CONTAINER_TO_CHANNEL(container), 1009 CONTAINER_TO_CHANNEL(container),
1008 CONTAINER_TO_ID(container), 1010 CONTAINER_TO_ID(container),
@@ -1104,7 +1106,7 @@ int aac_command_thread(struct aac_dev * dev)
1104 /* Handle Driver Notify Events */ 1106 /* Handle Driver Notify Events */
1105 aac_handle_aif(dev, fib); 1107 aac_handle_aif(dev, fib);
1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1108 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1107 fib_adapter_complete(fib, (u16)sizeof(u32)); 1109 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1108 } else { 1110 } else {
1109 struct list_head *entry; 1111 struct list_head *entry;
1110 /* The u32 here is important and intended. We are using 1112 /* The u32 here is important and intended. We are using
@@ -1241,7 +1243,7 @@ int aac_command_thread(struct aac_dev * dev)
1241 * Set the status of this FIB 1243 * Set the status of this FIB
1242 */ 1244 */
1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1245 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1244 fib_adapter_complete(fib, sizeof(u32)); 1246 aac_fib_adapter_complete(fib, sizeof(u32));
1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1247 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1246 /* Free up the remaining resources */ 1248 /* Free up the remaining resources */
1247 hw_fib_p = hw_fib_pool; 1249 hw_fib_p = hw_fib_pool;
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 439948ef8251..f6bcb9486f85 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -206,7 +206,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
206 * Set the status of this FIB 206 * Set the status of this FIB
207 */ 207 */
208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
209 fib_adapter_complete(fib, sizeof(u32)); 209 aac_fib_adapter_complete(fib, sizeof(u32));
210 spin_lock_irqsave(q->lock, flags); 210 spin_lock_irqsave(q->lock, flags);
211 } 211 }
212 } 212 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0bf5f9a943e8..271617890562 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -385,17 +385,45 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
385 385
386static int aac_slave_configure(struct scsi_device *sdev) 386static int aac_slave_configure(struct scsi_device *sdev)
387{ 387{
388 struct Scsi_Host *host = sdev->host; 388 if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
389 sdev->skip_ms_page_8 = 1;
390 sdev->skip_ms_page_3f = 1;
391 }
392 if ((sdev->type == TYPE_DISK) &&
393 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
394 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
395 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
396 sdev->no_uld_attach = 1;
397 }
398 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
399 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
400 struct scsi_device * dev;
401 struct Scsi_Host *host = sdev->host;
402 unsigned num_lsu = 0;
403 unsigned num_one = 0;
404 unsigned depth;
389 405
390 if (sdev->tagged_supported) 406 __shost_for_each_device(dev, host) {
391 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); 407 if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
392 else 408 (sdev_channel(dev) == CONTAINER_CHANNEL))
409 ++num_lsu;
410 else
411 ++num_one;
412 }
413 if (num_lsu == 0)
414 ++num_lsu;
415 depth = (host->can_queue - num_one) / num_lsu;
416 if (depth > 256)
417 depth = 256;
418 else if (depth < 2)
419 depth = 2;
420 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
421 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
422 AAC_OPT_NEW_COMM))
423 blk_queue_max_segment_size(sdev->request_queue, 65536);
424 } else
393 scsi_adjust_queue_depth(sdev, 0, 1); 425 scsi_adjust_queue_depth(sdev, 0, 1);
394 426
395 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options
396 & AAC_OPT_NEW_COMM))
397 blk_queue_max_segment_size(sdev->request_queue, 65536);
398
399 return 0; 427 return 0;
400} 428}
401 429
@@ -870,7 +898,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
870 898
871 /* 899 /*
872 * max channel will be the physical channels plus 1 virtual channel 900 * max channel will be the physical channels plus 1 virtual channel
873 * all containers are on the virtual channel 0 901 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
874 * physical channels are address by their actual physical number+1 902 * physical channels are address by their actual physical number+1
875 */ 903 */
876 if (aac->nondasd_support == 1) 904 if (aac->nondasd_support == 1)
@@ -913,7 +941,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
913 aac_adapter_disable_int(aac); 941 aac_adapter_disable_int(aac);
914 free_irq(pdev->irq, aac); 942 free_irq(pdev->irq, aac);
915 out_unmap: 943 out_unmap:
916 fib_map_free(aac); 944 aac_fib_map_free(aac);
917 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 945 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
918 kfree(aac->queues); 946 kfree(aac->queues);
919 iounmap(aac->regs.sa); 947 iounmap(aac->regs.sa);
@@ -947,7 +975,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
947 975
948 aac_send_shutdown(aac); 976 aac_send_shutdown(aac);
949 aac_adapter_disable_int(aac); 977 aac_adapter_disable_int(aac);
950 fib_map_free(aac); 978 aac_fib_map_free(aac);
951 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 979 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
952 aac->comm_phys); 980 aac->comm_phys);
953 kfree(aac->queues); 981 kfree(aac->queues);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index bd3ffdf6c800..62e3cda859af 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2816,7 +2816,7 @@ static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
2816 } 2816 }
2817#endif 2817#endif
2818 2818
2819 } else { 2819 } else if (scp->request_bufflen) {
2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2820 scp->SCp.Status = GDTH_MAP_SINGLE;
2821 scp->SCp.Message = (read_write == 1 ? 2821 scp->SCp.Message = (read_write == 1 ?
2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 27acf78cf8d8..2bba5e55d7bc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4236,35 +4236,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4236} 4236}
4237 4237
4238/** 4238/**
4239 * ipr_save_ioafp_mode_select - Save adapters mode select data
4240 * @ioa_cfg: ioa config struct
4241 * @scsi_cmd: scsi command struct
4242 *
4243 * This function saves mode select data for the adapter to
4244 * use following an adapter reset.
4245 *
4246 * Return value:
4247 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4248 **/
4249static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4250 struct scsi_cmnd *scsi_cmd)
4251{
4252 if (!ioa_cfg->saved_mode_pages) {
4253 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4254 GFP_ATOMIC);
4255 if (!ioa_cfg->saved_mode_pages) {
4256 dev_err(&ioa_cfg->pdev->dev,
4257 "IOA mode select buffer allocation failed\n");
4258 return SCSI_MLQUEUE_HOST_BUSY;
4259 }
4260 }
4261
4262 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4263 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4264 return 0;
4265}
4266
4267/**
4268 * ipr_queuecommand - Queue a mid-layer request 4239 * ipr_queuecommand - Queue a mid-layer request
4269 * @scsi_cmd: scsi command struct 4240 * @scsi_cmd: scsi command struct
4270 * @done: done function 4241 * @done: done function
@@ -4338,9 +4309,6 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4338 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4309 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4310 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4340 4311
4341 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4342 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4343
4344 if (likely(rc == 0)) 4312 if (likely(rc == 0))
4345 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 4313 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4346 4314
@@ -4829,17 +4797,11 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4829 int length; 4797 int length;
4830 4798
4831 ENTER; 4799 ENTER;
4832 if (ioa_cfg->saved_mode_pages) { 4800 ipr_scsi_bus_speed_limit(ioa_cfg);
4833 memcpy(mode_pages, ioa_cfg->saved_mode_pages, 4801 ipr_check_term_power(ioa_cfg, mode_pages);
4834 ioa_cfg->saved_mode_page_len); 4802 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4835 length = ioa_cfg->saved_mode_page_len; 4803 length = mode_pages->hdr.length + 1;
4836 } else { 4804 mode_pages->hdr.length = 0;
4837 ipr_scsi_bus_speed_limit(ioa_cfg);
4838 ipr_check_term_power(ioa_cfg, mode_pages);
4839 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4840 length = mode_pages->hdr.length + 1;
4841 mode_pages->hdr.length = 0;
4842 }
4843 4805
4844 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4806 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4845 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 4807 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
@@ -5969,7 +5931,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5969 } 5931 }
5970 5932
5971 ipr_free_dump(ioa_cfg); 5933 ipr_free_dump(ioa_cfg);
5972 kfree(ioa_cfg->saved_mode_pages);
5973 kfree(ioa_cfg->trace); 5934 kfree(ioa_cfg->trace);
5974} 5935}
5975 5936
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b639332131f1..fd360bfe56dd 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.1" 39#define IPR_DRIVER_VERSION "2.1.2"
40#define IPR_DRIVER_DATE "(November 15, 2005)" 40#define IPR_DRIVER_DATE "(February 8, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1000,7 +1000,6 @@ struct ipr_ioa_cfg {
1000 struct Scsi_Host *host; 1000 struct Scsi_Host *host;
1001 struct pci_dev *pdev; 1001 struct pci_dev *pdev;
1002 struct ipr_sglist *ucode_sglist; 1002 struct ipr_sglist *ucode_sglist;
1003 struct ipr_mode_pages *saved_mode_pages;
1004 u8 saved_mode_page_len; 1003 u8 saved_mode_page_len;
1005 1004
1006 struct work_struct work_q; 1005 struct work_struct work_q;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 780bfcc67096..ff79e68b347c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -146,7 +146,7 @@ iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
146 spin_unlock_irqrestore(&session->lock, flags); 146 spin_unlock_irqrestore(&session->lock, flags);
147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 147 set_bit(SUSPEND_BIT, &conn->suspend_tx);
148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 148 set_bit(SUSPEND_BIT, &conn->suspend_rx);
149 iscsi_conn_error(iscsi_handle(conn), err); 149 iscsi_conn_error(conn->cls_conn, err);
150} 150}
151 151
152static inline int 152static inline int
@@ -244,12 +244,10 @@ iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 244 if (sc->sc_data_direction == DMA_TO_DEVICE) {
245 struct iscsi_data_task *dtask, *n; 245 struct iscsi_data_task *dtask, *n;
246 /* WRITE: cleanup Data-Out's if any */ 246 /* WRITE: cleanup Data-Out's if any */
247 spin_lock(&conn->lock);
248 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 247 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
249 list_del(&dtask->item); 248 list_del(&dtask->item);
250 mempool_free(dtask, ctask->datapool); 249 mempool_free(dtask, ctask->datapool);
251 } 250 }
252 spin_unlock(&conn->lock);
253 } 251 }
254 ctask->xmstate = XMSTATE_IDLE; 252 ctask->xmstate = XMSTATE_IDLE;
255 ctask->r2t = NULL; 253 ctask->r2t = NULL;
@@ -689,7 +687,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
689 break; 687 break;
690 688
691 if (!conn->in.datalen) { 689 if (!conn->in.datalen) {
692 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 690 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
693 NULL, 0); 691 NULL, 0);
694 if (conn->login_mtask != mtask) { 692 if (conn->login_mtask != mtask) {
695 spin_lock(&session->lock); 693 spin_lock(&session->lock);
@@ -737,7 +735,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
737 if (!conn->in.datalen) { 735 if (!conn->in.datalen) {
738 struct iscsi_mgmt_task *mtask; 736 struct iscsi_mgmt_task *mtask;
739 737
740 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 738 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
741 NULL, 0); 739 NULL, 0);
742 mtask = (struct iscsi_mgmt_task *) 740 mtask = (struct iscsi_mgmt_task *)
743 session->mgmt_cmds[conn->in.itt - 741 session->mgmt_cmds[conn->in.itt -
@@ -761,7 +759,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
761 rc = iscsi_check_assign_cmdsn(session, 759 rc = iscsi_check_assign_cmdsn(session,
762 (struct iscsi_nopin*)hdr); 760 (struct iscsi_nopin*)hdr);
763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 761 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
764 rc = iscsi_recv_pdu(iscsi_handle(conn), 762 rc = iscsi_recv_pdu(conn->cls_conn,
765 hdr, NULL, 0); 763 hdr, NULL, 0);
766 } else 764 } else
767 rc = ISCSI_ERR_PROTO; 765 rc = ISCSI_ERR_PROTO;
@@ -1044,7 +1042,7 @@ iscsi_data_recv(struct iscsi_conn *conn)
1044 goto exit; 1042 goto exit;
1045 } 1043 }
1046 1044
1047 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, 1045 rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr,
1048 conn->data, conn->in.datalen); 1046 conn->data, conn->in.datalen);
1049 1047
1050 if (!rc && conn->datadgst_en && 1048 if (!rc && conn->datadgst_en &&
@@ -2428,19 +2426,20 @@ iscsi_pool_free(struct iscsi_queue *q, void **items)
2428} 2426}
2429 2427
2430static struct iscsi_cls_conn * 2428static struct iscsi_cls_conn *
2431iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) 2429iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
2432{ 2430{
2431 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2433 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2432 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2434 struct iscsi_conn *conn; 2433 struct iscsi_conn *conn;
2435 struct iscsi_cls_conn *cls_conn; 2434 struct iscsi_cls_conn *cls_conn;
2436 2435
2437 cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), 2436 cls_conn = iscsi_create_conn(cls_session, conn_idx);
2438 conn_idx);
2439 if (!cls_conn) 2437 if (!cls_conn)
2440 return NULL; 2438 return NULL;
2441 conn = cls_conn->dd_data; 2439 conn = cls_conn->dd_data;
2440 memset(conn, 0, sizeof(*conn));
2442 2441
2443 memset(conn, 0, sizeof(struct iscsi_conn)); 2442 conn->cls_conn = cls_conn;
2444 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2443 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2445 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2444 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2446 conn->id = conn_idx; 2445 conn->id = conn_idx;
@@ -2452,8 +2451,6 @@ iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx)
2452 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2451 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2453 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2452 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2454 2453
2455 spin_lock_init(&conn->lock);
2456
2457 /* initialize general xmit PDU commands queue */ 2454 /* initialize general xmit PDU commands queue */
2458 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), 2455 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2459 GFP_KERNEL, NULL); 2456 GFP_KERNEL, NULL);
@@ -2625,11 +2622,13 @@ iscsi_conn_destroy(struct iscsi_cls_conn *cls_conn)
2625} 2622}
2626 2623
2627static int 2624static int
2628iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, 2625iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2629 uint32_t transport_fd, int is_leading) 2626 struct iscsi_cls_conn *cls_conn, uint32_t transport_fd,
2627 int is_leading)
2630{ 2628{
2631 struct iscsi_session *session = iscsi_ptr(sessionh); 2629 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2632 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); 2630 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2631 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data;
2633 struct sock *sk; 2632 struct sock *sk;
2634 struct socket *sock; 2633 struct socket *sock;
2635 int err; 2634 int err;
@@ -2703,9 +2702,9 @@ iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2703} 2702}
2704 2703
2705static int 2704static int
2706iscsi_conn_start(iscsi_connh_t connh) 2705iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2707{ 2706{
2708 struct iscsi_conn *conn = iscsi_ptr(connh); 2707 struct iscsi_conn *conn = cls_conn->dd_data;
2709 struct iscsi_session *session = conn->session; 2708 struct iscsi_session *session = conn->session;
2710 struct sock *sk; 2709 struct sock *sk;
2711 2710
@@ -2754,9 +2753,9 @@ iscsi_conn_start(iscsi_connh_t connh)
2754} 2753}
2755 2754
2756static void 2755static void
2757iscsi_conn_stop(iscsi_connh_t connh, int flag) 2756iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2758{ 2757{
2759 struct iscsi_conn *conn = iscsi_ptr(connh); 2758 struct iscsi_conn *conn = cls_conn->dd_data;
2760 struct iscsi_session *session = conn->session; 2759 struct iscsi_session *session = conn->session;
2761 struct sock *sk; 2760 struct sock *sk;
2762 unsigned long flags; 2761 unsigned long flags;
@@ -3253,9 +3252,9 @@ static struct scsi_host_template iscsi_sht = {
3253 3252
3254static struct iscsi_transport iscsi_tcp_transport; 3253static struct iscsi_transport iscsi_tcp_transport;
3255 3254
3256static struct Scsi_Host * 3255static struct iscsi_cls_session *
3257iscsi_session_create(struct scsi_transport_template *scsit, 3256iscsi_session_create(struct scsi_transport_template *scsit,
3258 uint32_t initial_cmdsn) 3257 uint32_t initial_cmdsn, uint32_t *sid)
3259{ 3258{
3260 struct Scsi_Host *shost; 3259 struct Scsi_Host *shost;
3261 struct iscsi_session *session; 3260 struct iscsi_session *session;
@@ -3268,13 +3267,14 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3268 session = iscsi_hostdata(shost->hostdata); 3267 session = iscsi_hostdata(shost->hostdata);
3269 memset(session, 0, sizeof(struct iscsi_session)); 3268 memset(session, 0, sizeof(struct iscsi_session));
3270 session->host = shost; 3269 session->host = shost;
3271 session->state = ISCSI_STATE_LOGGED_IN; 3270 session->state = ISCSI_STATE_FREE;
3272 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3271 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3273 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3272 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3274 session->cmdsn = initial_cmdsn; 3273 session->cmdsn = initial_cmdsn;
3275 session->exp_cmdsn = initial_cmdsn + 1; 3274 session->exp_cmdsn = initial_cmdsn + 1;
3276 session->max_cmdsn = initial_cmdsn + 1; 3275 session->max_cmdsn = initial_cmdsn + 1;
3277 session->max_r2t = 1; 3276 session->max_r2t = 1;
3277 *sid = shost->host_no;
3278 3278
3279 /* initialize SCSI PDU commands pool */ 3279 /* initialize SCSI PDU commands pool */
3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -3311,22 +3311,24 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3311 if (iscsi_r2tpool_alloc(session)) 3311 if (iscsi_r2tpool_alloc(session))
3312 goto r2tpool_alloc_fail; 3312 goto r2tpool_alloc_fail;
3313 3313
3314 return shost; 3314 return hostdata_session(shost->hostdata);
3315 3315
3316r2tpool_alloc_fail: 3316r2tpool_alloc_fail:
3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3318 kfree(session->mgmt_cmds[cmd_i]->data); 3318 kfree(session->mgmt_cmds[cmd_i]->data);
3319 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3320immdata_alloc_fail: 3319immdata_alloc_fail:
3320 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3321mgmtpool_alloc_fail: 3321mgmtpool_alloc_fail:
3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3323cmdpool_alloc_fail: 3323cmdpool_alloc_fail:
3324 iscsi_transport_destroy_session(shost);
3324 return NULL; 3325 return NULL;
3325} 3326}
3326 3327
3327static void 3328static void
3328iscsi_session_destroy(struct Scsi_Host *shost) 3329iscsi_session_destroy(struct iscsi_cls_session *cls_session)
3329{ 3330{
3331 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3330 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3332 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3331 int cmd_i; 3333 int cmd_i;
3332 struct iscsi_data_task *dtask, *n; 3334 struct iscsi_data_task *dtask, *n;
@@ -3350,10 +3352,10 @@ iscsi_session_destroy(struct Scsi_Host *shost)
3350} 3352}
3351 3353
3352static int 3354static int
3353iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, 3355iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
3354 uint32_t value) 3356 uint32_t value)
3355{ 3357{
3356 struct iscsi_conn *conn = iscsi_ptr(connh); 3358 struct iscsi_conn *conn = cls_conn->dd_data;
3357 struct iscsi_session *session = conn->session; 3359 struct iscsi_session *session = conn->session;
3358 3360
3359 spin_lock_bh(&session->lock); 3361 spin_lock_bh(&session->lock);
@@ -3495,9 +3497,10 @@ iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3495} 3497}
3496 3498
3497static int 3499static int
3498iscsi_session_get_param(struct Scsi_Host *shost, 3500iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3499 enum iscsi_param param, uint32_t *value) 3501 enum iscsi_param param, uint32_t *value)
3500{ 3502{
3503 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3501 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3504 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3502 3505
3503 switch(param) { 3506 switch(param) {
@@ -3539,9 +3542,10 @@ iscsi_session_get_param(struct Scsi_Host *shost,
3539} 3542}
3540 3543
3541static int 3544static int
3542iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) 3545iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3546 enum iscsi_param param, uint32_t *value)
3543{ 3547{
3544 struct iscsi_conn *conn = data; 3548 struct iscsi_conn *conn = cls_conn->dd_data;
3545 3549
3546 switch(param) { 3550 switch(param) {
3547 case ISCSI_PARAM_MAX_RECV_DLENGTH: 3551 case ISCSI_PARAM_MAX_RECV_DLENGTH:
@@ -3564,9 +3568,9 @@ iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value)
3564} 3568}
3565 3569
3566static void 3570static void
3567iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) 3571iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
3568{ 3572{
3569 struct iscsi_conn *conn = iscsi_ptr(connh); 3573 struct iscsi_conn *conn = cls_conn->dd_data;
3570 3574
3571 stats->txdata_octets = conn->txdata_octets; 3575 stats->txdata_octets = conn->txdata_octets;
3572 stats->rxdata_octets = conn->rxdata_octets; 3576 stats->rxdata_octets = conn->rxdata_octets;
@@ -3587,10 +3591,10 @@ iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3587} 3591}
3588 3592
3589static int 3593static int
3590iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, 3594iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
3591 uint32_t data_size) 3595 char *data, uint32_t data_size)
3592{ 3596{
3593 struct iscsi_conn *conn = iscsi_ptr(connh); 3597 struct iscsi_conn *conn = cls_conn->dd_data;
3594 int rc; 3598 int rc;
3595 3599
3596 mutex_lock(&conn->xmitmutex); 3600 mutex_lock(&conn->xmitmutex);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index f95e61b76f70..ba26741ac154 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -113,7 +113,10 @@ struct iscsi_tcp_recv {
113 int datadgst; 113 int datadgst;
114}; 114};
115 115
116struct iscsi_cls_conn;
117
116struct iscsi_conn { 118struct iscsi_conn {
119 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
117 struct iscsi_hdr hdr; /* header placeholder */ 120 struct iscsi_hdr hdr; /* header placeholder */
118 char hdrext[4*sizeof(__u16) + 121 char hdrext[4*sizeof(__u16) +
119 sizeof(__u32)]; 122 sizeof(__u32)];
@@ -143,7 +146,6 @@ struct iscsi_conn {
143 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
144 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
145 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
146 spinlock_t lock; /* FIXME: to be removed */
147 149
148 /* old values for socket callbacks */ 150 /* old values for socket callbacks */
149 void (*old_data_ready)(struct sock *, int); 151 void (*old_data_ready)(struct sock *, int);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index d101a8a6f4e8..7144674bc8e6 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -5049,7 +5049,7 @@ static struct pci_device_id megaraid_pci_tbl[] = {
5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
5050 5050
5051static struct pci_driver megaraid_pci_driver = { 5051static struct pci_driver megaraid_pci_driver = {
5052 .name = "megaraid", 5052 .name = "megaraid_legacy",
5053 .id_table = megaraid_pci_tbl, 5053 .id_table = megaraid_pci_tbl,
5054 .probe = megaraid_probe_one, 5054 .probe = megaraid_probe_one,
5055 .remove = __devexit_p(megaraid_remove_one), 5055 .remove = __devexit_p(megaraid_remove_one),
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4b3e0d6e5afa..4b75fe619d9c 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -5,7 +5,7 @@
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6 6
7#define MEGARAID_VERSION \ 7#define MEGARAID_VERSION \
8 "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" 8 "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
9 9
10/* 10/*
11 * Driver features - change the values to enable or disable features in the 11 * Driver features - change the values to enable or disable features in the
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a487f414960e..7de267e14458 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.02 13 * Version : v00.00.02.04
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -60,6 +60,12 @@ static struct pci_device_id megasas_pci_table[] = {
60 PCI_ANY_ID, 60 PCI_ANY_ID,
61 }, 61 },
62 { 62 {
63 PCI_VENDOR_ID_LSI_LOGIC,
64 PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
63 PCI_VENDOR_ID_DELL, 69 PCI_VENDOR_ID_DELL,
64 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP 70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP
65 PCI_ANY_ID, 71 PCI_ANY_ID,
@@ -199,6 +205,86 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
199*/ 205*/
200 206
201/** 207/**
208* The following functions are defined for ppc (deviceid : 0x60)
209* controllers
210*/
211
212/**
213 * megasas_enable_intr_ppc - Enables interrupts
214 * @regs: MFI register set
215 */
216static inline void
217megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
218{
219 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
220
221 writel(~0x80000004, &(regs)->outbound_intr_mask);
222
223 /* Dummy readl to force pci flush */
224 readl(&regs->outbound_intr_mask);
225}
226
227/**
228 * megasas_read_fw_status_reg_ppc - returns the current FW status value
229 * @regs: MFI register set
230 */
231static u32
232megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
233{
234 return readl(&(regs)->outbound_scratch_pad);
235}
236
237/**
238 * megasas_clear_interrupt_ppc - Check & clear interrupt
239 * @regs: MFI register set
240 */
241static int
242megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
243{
244 u32 status;
245 /*
246 * Check if it is our interrupt
247 */
248 status = readl(&regs->outbound_intr_status);
249
250 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
251 return 1;
252 }
253
254 /*
255 * Clear the interrupt by writing back the same value
256 */
257 writel(status, &regs->outbound_doorbell_clear);
258
259 return 0;
260}
261/**
262 * megasas_fire_cmd_ppc - Sends command to the FW
263 * @frame_phys_addr : Physical address of cmd
264 * @frame_count : Number of frames for the command
265 * @regs : MFI register set
266 */
267static inline void
268megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
269{
270 writel((frame_phys_addr | (frame_count<<1))|1,
271 &(regs)->inbound_queue_port);
272}
273
274static struct megasas_instance_template megasas_instance_template_ppc = {
275
276 .fire_cmd = megasas_fire_cmd_ppc,
277 .enable_intr = megasas_enable_intr_ppc,
278 .clear_intr = megasas_clear_intr_ppc,
279 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
280};
281
282/**
283* This is the end of set of functions & definitions
284* specific to ppc (deviceid : 0x60) controllers
285*/
286
287/**
202 * megasas_disable_intr - Disables interrupts 288 * megasas_disable_intr - Disables interrupts
203 * @regs: MFI register set 289 * @regs: MFI register set
204 */ 290 */
@@ -1607,7 +1693,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1607 1693
1608 reg_set = instance->reg_set; 1694 reg_set = instance->reg_set;
1609 1695
1610 instance->instancet = &megasas_instance_template_xscale; 1696 switch(instance->pdev->device)
1697 {
1698 case PCI_DEVICE_ID_LSI_SAS1078R:
1699 instance->instancet = &megasas_instance_template_ppc;
1700 break;
1701 case PCI_DEVICE_ID_LSI_SAS1064R:
1702 case PCI_DEVICE_ID_DELL_PERC5:
1703 default:
1704 instance->instancet = &megasas_instance_template_xscale;
1705 break;
1706 }
1611 1707
1612 /* 1708 /*
1613 * We expect the FW state to be READY 1709 * We expect the FW state to be READY
@@ -1983,6 +2079,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
1983 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 2079 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1984 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 2080 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1985 host->max_lun = MEGASAS_MAX_LUN; 2081 host->max_lun = MEGASAS_MAX_LUN;
2082 host->max_cmd_len = 16;
1986 2083
1987 /* 2084 /*
1988 * Notify the mid-layer about the new controller 2085 * Notify the mid-layer about the new controller
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index d6d166c0663f..89639f0c38ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.02.02" 21#define MEGASAS_VERSION "00.00.02.04"
22#define MEGASAS_RELDATE "Jan 23, 2006" 22#define MEGASAS_RELDATE "Feb 03, 2006"
23#define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" 23#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006"
24/* 24/*
25 * ===================================== 25 * =====================================
26 * MegaRAID SAS MFI firmware definitions 26 * MegaRAID SAS MFI firmware definitions
@@ -553,31 +553,46 @@ struct megasas_ctrl_info {
553#define MFI_OB_INTR_STATUS_MASK 0x00000002 553#define MFI_OB_INTR_STATUS_MASK 0x00000002
554#define MFI_POLL_TIMEOUT_SECS 10 554#define MFI_POLL_TIMEOUT_SECS 10
555 555
556#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
557#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060
558
556struct megasas_register_set { 559struct megasas_register_set {
560 u32 reserved_0[4]; /*0000h*/
557 561
558 u32 reserved_0[4]; /*0000h */ 562 u32 inbound_msg_0; /*0010h*/
563 u32 inbound_msg_1; /*0014h*/
564 u32 outbound_msg_0; /*0018h*/
565 u32 outbound_msg_1; /*001Ch*/
559 566
560 u32 inbound_msg_0; /*0010h */ 567 u32 inbound_doorbell; /*0020h*/
561 u32 inbound_msg_1; /*0014h */ 568 u32 inbound_intr_status; /*0024h*/
562 u32 outbound_msg_0; /*0018h */ 569 u32 inbound_intr_mask; /*0028h*/
563 u32 outbound_msg_1; /*001Ch */
564 570
565 u32 inbound_doorbell; /*0020h */ 571 u32 outbound_doorbell; /*002Ch*/
566 u32 inbound_intr_status; /*0024h */ 572 u32 outbound_intr_status; /*0030h*/
567 u32 inbound_intr_mask; /*0028h */ 573 u32 outbound_intr_mask; /*0034h*/
568 574
569 u32 outbound_doorbell; /*002Ch */ 575 u32 reserved_1[2]; /*0038h*/
570 u32 outbound_intr_status; /*0030h */
571 u32 outbound_intr_mask; /*0034h */
572 576
573 u32 reserved_1[2]; /*0038h */ 577 u32 inbound_queue_port; /*0040h*/
578 u32 outbound_queue_port; /*0044h*/
574 579
575 u32 inbound_queue_port; /*0040h */ 580 u32 reserved_2[22]; /*0048h*/
576 u32 outbound_queue_port; /*0044h */
577 581
578 u32 reserved_2; /*004Ch */ 582 u32 outbound_doorbell_clear; /*00A0h*/
579 583
580 u32 index_registers[1004]; /*0050h */ 584 u32 reserved_3[3]; /*00A4h*/
585
586 u32 outbound_scratch_pad ; /*00B0h*/
587
588 u32 reserved_4[3]; /*00B4h*/
589
590 u32 inbound_low_queue_port ; /*00C0h*/
591
592 u32 inbound_high_queue_port ; /*00C4h*/
593
594 u32 reserved_5; /*00C8h*/
595 u32 index_registers[820]; /*00CCh*/
581 596
582} __attribute__ ((packed)); 597} __attribute__ ((packed));
583 598
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b17ee62dd1a9..92b3e13e9061 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12/* SYSFS attributes --------------------------------------------------------- */ 11/* SYSFS attributes --------------------------------------------------------- */
13 12
@@ -114,7 +113,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
114 struct device, kobj))); 113 struct device, kobj)));
115 unsigned long flags; 114 unsigned long flags;
116 115
117 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 116 if (!capable(CAP_SYS_ADMIN) || off != 0)
118 return 0; 117 return 0;
119 118
120 /* Read NVRAM. */ 119 /* Read NVRAM. */
@@ -123,7 +122,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
123 ha->nvram_size); 122 ha->nvram_size);
124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 123 spin_unlock_irqrestore(&ha->hardware_lock, flags);
125 124
126 return (count); 125 return ha->nvram_size;
127} 126}
128 127
129static ssize_t 128static ssize_t
@@ -175,19 +174,150 @@ static struct bin_attribute sysfs_nvram_attr = {
175 .mode = S_IRUSR | S_IWUSR, 174 .mode = S_IRUSR | S_IWUSR,
176 .owner = THIS_MODULE, 175 .owner = THIS_MODULE,
177 }, 176 },
178 .size = 0, 177 .size = 512,
179 .read = qla2x00_sysfs_read_nvram, 178 .read = qla2x00_sysfs_read_nvram,
180 .write = qla2x00_sysfs_write_nvram, 179 .write = qla2x00_sysfs_write_nvram,
181}; 180};
182 181
182static ssize_t
183qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
184 size_t count)
185{
186 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
187 struct device, kobj)));
188
189 if (ha->optrom_state != QLA_SREADING)
190 return 0;
191 if (off > ha->optrom_size)
192 return 0;
193 if (off + count > ha->optrom_size)
194 count = ha->optrom_size - off;
195
196 memcpy(buf, &ha->optrom_buffer[off], count);
197
198 return count;
199}
200
201static ssize_t
202qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off,
203 size_t count)
204{
205 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
206 struct device, kobj)));
207
208 if (ha->optrom_state != QLA_SWRITING)
209 return -EINVAL;
210 if (off > ha->optrom_size)
211 return -ERANGE;
212 if (off + count > ha->optrom_size)
213 count = ha->optrom_size - off;
214
215 memcpy(&ha->optrom_buffer[off], buf, count);
216
217 return count;
218}
219
220static struct bin_attribute sysfs_optrom_attr = {
221 .attr = {
222 .name = "optrom",
223 .mode = S_IRUSR | S_IWUSR,
224 .owner = THIS_MODULE,
225 },
226 .size = OPTROM_SIZE_24XX,
227 .read = qla2x00_sysfs_read_optrom,
228 .write = qla2x00_sysfs_write_optrom,
229};
230
231static ssize_t
232qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
233 size_t count)
234{
235 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
236 struct device, kobj)));
237 int val;
238
239 if (off)
240 return 0;
241
242 if (sscanf(buf, "%d", &val) != 1)
243 return -EINVAL;
244
245 switch (val) {
246 case 0:
247 if (ha->optrom_state != QLA_SREADING &&
248 ha->optrom_state != QLA_SWRITING)
249 break;
250
251 ha->optrom_state = QLA_SWAITING;
252 vfree(ha->optrom_buffer);
253 ha->optrom_buffer = NULL;
254 break;
255 case 1:
256 if (ha->optrom_state != QLA_SWAITING)
257 break;
258
259 ha->optrom_state = QLA_SREADING;
260 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
261 if (ha->optrom_buffer == NULL) {
262 qla_printk(KERN_WARNING, ha,
263 "Unable to allocate memory for optrom retrieval "
264 "(%x).\n", ha->optrom_size);
265
266 ha->optrom_state = QLA_SWAITING;
267 return count;
268 }
269
270 memset(ha->optrom_buffer, 0, ha->optrom_size);
271 ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0,
272 ha->optrom_size);
273 break;
274 case 2:
275 if (ha->optrom_state != QLA_SWAITING)
276 break;
277
278 ha->optrom_state = QLA_SWRITING;
279 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
280 if (ha->optrom_buffer == NULL) {
281 qla_printk(KERN_WARNING, ha,
282 "Unable to allocate memory for optrom update "
283 "(%x).\n", ha->optrom_size);
284
285 ha->optrom_state = QLA_SWAITING;
286 return count;
287 }
288 memset(ha->optrom_buffer, 0, ha->optrom_size);
289 break;
290 case 3:
291 if (ha->optrom_state != QLA_SWRITING)
292 break;
293
294 ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0,
295 ha->optrom_size);
296 break;
297 }
298 return count;
299}
300
301static struct bin_attribute sysfs_optrom_ctl_attr = {
302 .attr = {
303 .name = "optrom_ctl",
304 .mode = S_IWUSR,
305 .owner = THIS_MODULE,
306 },
307 .size = 0,
308 .write = qla2x00_sysfs_write_optrom_ctl,
309};
310
183void 311void
184qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 312qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
185{ 313{
186 struct Scsi_Host *host = ha->host; 314 struct Scsi_Host *host = ha->host;
187 315
188 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 316 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
189 sysfs_nvram_attr.size = ha->nvram_size;
190 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 317 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
318 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
319 sysfs_create_bin_file(&host->shost_gendev.kobj,
320 &sysfs_optrom_ctl_attr);
191} 321}
192 322
193void 323void
@@ -197,6 +327,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
197 327
198 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 328 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
199 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 329 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
330 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
331 sysfs_remove_bin_file(&host->shost_gendev.kobj,
332 &sysfs_optrom_ctl_attr);
333
334 if (ha->beacon_blink_led == 1)
335 ha->isp_ops.beacon_off(ha);
200} 336}
201 337
202/* Scsi_Host attributes. */ 338/* Scsi_Host attributes. */
@@ -384,6 +520,50 @@ qla2x00_zio_timer_store(struct class_device *cdev, const char *buf,
384 return strlen(buf); 520 return strlen(buf);
385} 521}
386 522
523static ssize_t
524qla2x00_beacon_show(struct class_device *cdev, char *buf)
525{
526 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
527 int len = 0;
528
529 if (ha->beacon_blink_led)
530 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
531 else
532 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
533 return len;
534}
535
536static ssize_t
537qla2x00_beacon_store(struct class_device *cdev, const char *buf,
538 size_t count)
539{
540 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
541 int val = 0;
542 int rval;
543
544 if (IS_QLA2100(ha) || IS_QLA2200(ha))
545 return -EPERM;
546
547 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
548 qla_printk(KERN_WARNING, ha,
549 "Abort ISP active -- ignoring beacon request.\n");
550 return -EBUSY;
551 }
552
553 if (sscanf(buf, "%d", &val) != 1)
554 return -EINVAL;
555
556 if (val)
557 rval = ha->isp_ops.beacon_on(ha);
558 else
559 rval = ha->isp_ops.beacon_off(ha);
560
561 if (rval != QLA_SUCCESS)
562 count = 0;
563
564 return count;
565}
566
387static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 567static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
388 NULL); 568 NULL);
389static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 569static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
@@ -398,6 +578,8 @@ static CLASS_DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show,
398 qla2x00_zio_store); 578 qla2x00_zio_store);
399static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 579static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
400 qla2x00_zio_timer_store); 580 qla2x00_zio_timer_store);
581static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
582 qla2x00_beacon_store);
401 583
402struct class_device_attribute *qla2x00_host_attrs[] = { 584struct class_device_attribute *qla2x00_host_attrs[] = {
403 &class_device_attr_driver_version, 585 &class_device_attr_driver_version,
@@ -411,6 +593,7 @@ struct class_device_attribute *qla2x00_host_attrs[] = {
411 &class_device_attr_state, 593 &class_device_attr_state,
412 &class_device_attr_zio, 594 &class_device_attr_zio,
413 &class_device_attr_zio_timer, 595 &class_device_attr_zio_timer,
596 &class_device_attr_beacon,
414 NULL, 597 NULL,
415}; 598};
416 599
@@ -426,6 +609,49 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
426} 609}
427 610
428static void 611static void
612qla2x00_get_host_speed(struct Scsi_Host *shost)
613{
614 scsi_qla_host_t *ha = to_qla_host(shost);
615 uint32_t speed = 0;
616
617 switch (ha->link_data_rate) {
618 case LDR_1GB:
619 speed = 1;
620 break;
621 case LDR_2GB:
622 speed = 2;
623 break;
624 case LDR_4GB:
625 speed = 4;
626 break;
627 }
628 fc_host_speed(shost) = speed;
629}
630
631static void
632qla2x00_get_host_port_type(struct Scsi_Host *shost)
633{
634 scsi_qla_host_t *ha = to_qla_host(shost);
635 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
636
637 switch (ha->current_topology) {
638 case ISP_CFG_NL:
639 port_type = FC_PORTTYPE_LPORT;
640 break;
641 case ISP_CFG_FL:
642 port_type = FC_PORTTYPE_NLPORT;
643 break;
644 case ISP_CFG_N:
645 port_type = FC_PORTTYPE_PTP;
646 break;
647 case ISP_CFG_F:
648 port_type = FC_PORTTYPE_NPORT;
649 break;
650 }
651 fc_host_port_type(shost) = port_type;
652}
653
654static void
429qla2x00_get_starget_node_name(struct scsi_target *starget) 655qla2x00_get_starget_node_name(struct scsi_target *starget)
430{ 656{
431 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 657 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
@@ -512,6 +738,41 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
512 return 0; 738 return 0;
513} 739}
514 740
741static struct fc_host_statistics *
742qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
743{
744 scsi_qla_host_t *ha = to_qla_host(shost);
745 int rval;
746 uint16_t mb_stat[1];
747 link_stat_t stat_buf;
748 struct fc_host_statistics *pfc_host_stat;
749
750 pfc_host_stat = &ha->fc_host_stat;
751 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
752
753 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
754 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
755 sizeof(stat_buf) / 4, mb_stat);
756 } else {
757 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
758 mb_stat);
759 }
760 if (rval != 0) {
761 qla_printk(KERN_WARNING, ha,
762 "Unable to retrieve host statistics (%d).\n", mb_stat[0]);
763 return pfc_host_stat;
764 }
765
766 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
767 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
768 pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt;
769 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
770 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
771 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
772
773 return pfc_host_stat;
774}
775
515struct fc_function_template qla2xxx_transport_functions = { 776struct fc_function_template qla2xxx_transport_functions = {
516 777
517 .show_host_node_name = 1, 778 .show_host_node_name = 1,
@@ -520,6 +781,10 @@ struct fc_function_template qla2xxx_transport_functions = {
520 781
521 .get_host_port_id = qla2x00_get_host_port_id, 782 .get_host_port_id = qla2x00_get_host_port_id,
522 .show_host_port_id = 1, 783 .show_host_port_id = 1,
784 .get_host_speed = qla2x00_get_host_speed,
785 .show_host_speed = 1,
786 .get_host_port_type = qla2x00_get_host_port_type,
787 .show_host_port_type = 1,
523 788
524 .dd_fcrport_size = sizeof(struct fc_port *), 789 .dd_fcrport_size = sizeof(struct fc_port *),
525 .show_rport_supported_classes = 1, 790 .show_rport_supported_classes = 1,
@@ -536,6 +801,7 @@ struct fc_function_template qla2xxx_transport_functions = {
536 .show_rport_dev_loss_tmo = 1, 801 .show_rport_dev_loss_tmo = 1,
537 802
538 .issue_fc_host_lip = qla2x00_issue_lip, 803 .issue_fc_host_lip = qla2x00_issue_lip,
804 .get_fc_host_stats = qla2x00_get_fc_host_stats,
539}; 805};
540 806
541void 807void
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index bad066e5772a..b31a03bbd14f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
31#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_transport_fc.h>
32 33
33#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 34#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
34#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) 35#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE)
@@ -181,6 +182,13 @@
181#define WRT_REG_DWORD(addr, data) writel(data,addr) 182#define WRT_REG_DWORD(addr, data) writel(data,addr)
182 183
183/* 184/*
185 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
186 * 133Mhz slot.
187 */
188#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
189#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr))
190
191/*
184 * Fibre Channel device definitions. 192 * Fibre Channel device definitions.
185 */ 193 */
186#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ 194#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
@@ -432,6 +440,9 @@ struct device_reg_2xxx {
432#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 440#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
433#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 441#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080
434#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 442#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0
443#define GPIO_LED_ALL_OFF 0x0000
444#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */
445#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */
435 446
436 union { 447 union {
437 struct { 448 struct {
@@ -2199,6 +2210,15 @@ struct isp_operations {
2199 2210
2200 void (*fw_dump) (struct scsi_qla_host *, int); 2211 void (*fw_dump) (struct scsi_qla_host *, int);
2201 void (*ascii_fw_dump) (struct scsi_qla_host *); 2212 void (*ascii_fw_dump) (struct scsi_qla_host *);
2213
2214 int (*beacon_on) (struct scsi_qla_host *);
2215 int (*beacon_off) (struct scsi_qla_host *);
2216 void (*beacon_blink) (struct scsi_qla_host *);
2217
2218 uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
2219 uint32_t, uint32_t);
2220 int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
2221 uint32_t);
2202}; 2222};
2203 2223
2204/* 2224/*
@@ -2331,6 +2351,10 @@ typedef struct scsi_qla_host {
2331 uint16_t min_external_loopid; /* First external loop Id */ 2351 uint16_t min_external_loopid; /* First external loop Id */
2332 2352
2333 uint16_t link_data_rate; /* F/W operating speed */ 2353 uint16_t link_data_rate; /* F/W operating speed */
2354#define LDR_1GB 0
2355#define LDR_2GB 1
2356#define LDR_4GB 3
2357#define LDR_UNKNOWN 0xFFFF
2334 2358
2335 uint8_t current_topology; 2359 uint8_t current_topology;
2336 uint8_t prev_topology; 2360 uint8_t prev_topology;
@@ -2486,12 +2510,26 @@ typedef struct scsi_qla_host {
2486 uint8_t *port_name; 2510 uint8_t *port_name;
2487 uint32_t isp_abort_cnt; 2511 uint32_t isp_abort_cnt;
2488 2512
2513 /* Option ROM information. */
2514 char *optrom_buffer;
2515 uint32_t optrom_size;
2516 int optrom_state;
2517#define QLA_SWAITING 0
2518#define QLA_SREADING 1
2519#define QLA_SWRITING 2
2520
2489 /* Needed for BEACON */ 2521 /* Needed for BEACON */
2490 uint16_t beacon_blink_led; 2522 uint16_t beacon_blink_led;
2491 uint16_t beacon_green_on; 2523 uint8_t beacon_color_state;
2524#define QLA_LED_GRN_ON 0x01
2525#define QLA_LED_YLW_ON 0x02
2526#define QLA_LED_ABR_ON 0x04
2527#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2528 /* ISP2322: red, green, amber. */
2492 2529
2493 uint16_t zio_mode; 2530 uint16_t zio_mode;
2494 uint16_t zio_timer; 2531 uint16_t zio_timer;
2532 struct fc_host_statistics fc_host_stat;
2495} scsi_qla_host_t; 2533} scsi_qla_host_t;
2496 2534
2497 2535
@@ -2557,7 +2595,9 @@ struct _qla2x00stats {
2557/* 2595/*
2558 * Flash support definitions 2596 * Flash support definitions
2559 */ 2597 */
2560#define FLASH_IMAGE_SIZE 131072 2598#define OPTROM_SIZE_2300 0x20000
2599#define OPTROM_SIZE_2322 0x100000
2600#define OPTROM_SIZE_24XX 0x100000
2561 2601
2562#include "qla_gbl.h" 2602#include "qla_gbl.h"
2563#include "qla_dbg.h" 2603#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 35266bd5d538..ffdc2680f049 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -75,12 +75,12 @@ extern void qla2x00_cmd_timeout(srb_t *);
75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
77 77
78extern void qla2x00_blink_led(scsi_qla_host_t *);
79
80extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 78extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
81 79
82extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 80extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
83 81
82extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
83
84/* 84/*
85 * Global Function Prototypes in qla_iocb.c source file. 85 * Global Function Prototypes in qla_iocb.c source file.
86 */ 86 */
@@ -185,6 +185,13 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
185extern int 185extern int
186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
187 187
188extern int
189qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *,
190 uint16_t *);
191
192extern int
193qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *);
194
188extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 195extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
189extern int qla24xx_abort_target(fc_port_t *); 196extern int qla24xx_abort_target(fc_port_t *);
190 197
@@ -228,6 +235,22 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
228extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 235extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
229 uint32_t); 236 uint32_t);
230 237
238extern int qla2x00_beacon_on(struct scsi_qla_host *);
239extern int qla2x00_beacon_off(struct scsi_qla_host *);
240extern void qla2x00_beacon_blink(struct scsi_qla_host *);
241extern int qla24xx_beacon_on(struct scsi_qla_host *);
242extern int qla24xx_beacon_off(struct scsi_qla_host *);
243extern void qla24xx_beacon_blink(struct scsi_qla_host *);
244
245extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
246 uint32_t, uint32_t);
247extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
248 uint32_t, uint32_t);
249extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
250 uint32_t, uint32_t);
251extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
252 uint32_t, uint32_t);
253
231/* 254/*
232 * Global Function Prototypes in qla_dbg.c source file. 255 * Global Function Prototypes in qla_dbg.c source file.
233 */ 256 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e67bb0997818..634ee174bff2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <scsi/scsi_transport_fc.h>
12 11
13#include "qla_devtbl.h" 12#include "qla_devtbl.h"
14 13
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7ec0b8d6f07b..6544b6d0891d 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -814,6 +814,7 @@ qla24xx_start_scsi(srb_t *sp)
814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
815 815
816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
817 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
817 818
818 /* Load SCSI command packet. */ 819 /* Load SCSI command packet. */
819 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 820 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71a46fcee8cc..42aa7a7c1a73 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -402,9 +402,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
402 break; 402 break;
403 403
404 case MBA_LOOP_UP: /* Loop Up Event */ 404 case MBA_LOOP_UP: /* Loop Up Event */
405 ha->link_data_rate = 0;
406 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
407 link_speed = link_speeds[0]; 406 link_speed = link_speeds[0];
407 ha->link_data_rate = LDR_1GB;
408 } else { 408 } else {
409 link_speed = link_speeds[LS_UNKNOWN]; 409 link_speed = link_speeds[LS_UNKNOWN];
410 if (mb[1] < 5) 410 if (mb[1] < 5)
@@ -436,7 +436,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
436 } 436 }
437 437
438 ha->flags.management_server_logged_in = 0; 438 ha->flags.management_server_logged_in = 0;
439 ha->link_data_rate = 0; 439 ha->link_data_rate = LDR_UNKNOWN;
440 if (ql2xfdmienable) 440 if (ql2xfdmienable)
441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
442 442
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3099b379de9d..363dfdd042b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12static void 11static void
13qla2x00_mbx_sem_timeout(unsigned long data) 12qla2x00_mbx_sem_timeout(unsigned long data)
@@ -1874,7 +1873,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1874 mcp->mb[3] = LSW(id_list_dma); 1873 mcp->mb[3] = LSW(id_list_dma);
1875 mcp->mb[6] = MSW(MSD(id_list_dma)); 1874 mcp->mb[6] = MSW(MSD(id_list_dma));
1876 mcp->mb[7] = LSW(MSD(id_list_dma)); 1875 mcp->mb[7] = LSW(MSD(id_list_dma));
1877 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; 1876 mcp->mb[8] = 0;
1877 mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1878 } else { 1878 } else {
1879 mcp->mb[1] = MSW(id_list_dma); 1879 mcp->mb[1] = MSW(id_list_dma);
1880 mcp->mb[2] = LSW(id_list_dma); 1880 mcp->mb[2] = LSW(id_list_dma);
@@ -2017,8 +2017,109 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2017 2017
2018 return rval; 2018 return rval;
2019} 2019}
2020#endif
2021
2022/*
2023 * qla2x00_get_link_status
2024 *
2025 * Input:
2026 * ha = adapter block pointer.
2027 * loop_id = device loop ID.
2028 * ret_buf = pointer to link status return buffer.
2029 *
2030 * Returns:
2031 * 0 = success.
2032 * BIT_0 = mem alloc error.
2033 * BIT_1 = mailbox error.
2034 */
2035int
2036qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2037 link_stat_t *ret_buf, uint16_t *status)
2038{
2039 int rval;
2040 mbx_cmd_t mc;
2041 mbx_cmd_t *mcp = &mc;
2042 link_stat_t *stat_buf;
2043 dma_addr_t stat_buf_dma;
2044
2045 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);)
2046
2047 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2048 if (stat_buf == NULL) {
2049 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
2050 __func__, ha->host_no));
2051 return BIT_0;
2052 }
2053 memset(stat_buf, 0, sizeof(link_stat_t));
2054
2055 mcp->mb[0] = MBC_GET_LINK_STATUS;
2056 mcp->mb[2] = MSW(stat_buf_dma);
2057 mcp->mb[3] = LSW(stat_buf_dma);
2058 mcp->mb[6] = MSW(MSD(stat_buf_dma));
2059 mcp->mb[7] = LSW(MSD(stat_buf_dma));
2060 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2061 mcp->in_mb = MBX_0;
2062 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
2063 mcp->mb[1] = loop_id;
2064 mcp->mb[4] = 0;
2065 mcp->mb[10] = 0;
2066 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2067 mcp->in_mb |= MBX_1;
2068 } else if (HAS_EXTENDED_IDS(ha)) {
2069 mcp->mb[1] = loop_id;
2070 mcp->mb[10] = 0;
2071 mcp->out_mb |= MBX_10|MBX_1;
2072 } else {
2073 mcp->mb[1] = loop_id << 8;
2074 mcp->out_mb |= MBX_1;
2075 }
2076 mcp->tov = 30;
2077 mcp->flags = IOCTL_CMD;
2078 rval = qla2x00_mailbox_command(ha, mcp);
2079
2080 if (rval == QLA_SUCCESS) {
2081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2082 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2083 __func__, ha->host_no, mcp->mb[0]);)
2084 status[0] = mcp->mb[0];
2085 rval = BIT_1;
2086 } else {
2087 /* copy over data -- firmware data is LE. */
2088 ret_buf->link_fail_cnt =
2089 le32_to_cpu(stat_buf->link_fail_cnt);
2090 ret_buf->loss_sync_cnt =
2091 le32_to_cpu(stat_buf->loss_sync_cnt);
2092 ret_buf->loss_sig_cnt =
2093 le32_to_cpu(stat_buf->loss_sig_cnt);
2094 ret_buf->prim_seq_err_cnt =
2095 le32_to_cpu(stat_buf->prim_seq_err_cnt);
2096 ret_buf->inval_xmit_word_cnt =
2097 le32_to_cpu(stat_buf->inval_xmit_word_cnt);
2098 ret_buf->inval_crc_cnt =
2099 le32_to_cpu(stat_buf->inval_crc_cnt);
2100
2101 DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d "
2102 "loss_sync=%d loss_sig=%d seq_err=%d "
2103 "inval_xmt_word=%d inval_crc=%d.\n", __func__,
2104 ha->host_no, stat_buf->link_fail_cnt,
2105 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2106 stat_buf->prim_seq_err_cnt,
2107 stat_buf->inval_xmit_word_cnt,
2108 stat_buf->inval_crc_cnt);)
2109 }
2110 } else {
2111 /* Failed. */
2112 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2113 ha->host_no, rval);)
2114 rval = BIT_1;
2115 }
2116
2117 dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
2020 2118
2021uint8_t 2119 return rval;
2120}
2121
2122int
2022qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2123qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2023 uint16_t *status) 2124 uint16_t *status)
2024{ 2125{
@@ -2080,7 +2181,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2080 2181
2081 return rval; 2182 return rval;
2082} 2183}
2083#endif
2084 2184
2085int 2185int
2086qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2186qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5866a7c706a8..9f91f1a20542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -366,6 +366,12 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
366 goto qc_fail_command; 366 goto qc_fail_command;
367 } 367 }
368 368
369 /* Close window on fcport/rport state-transitioning. */
370 if (!*(fc_port_t **)rport->dd_data) {
371 cmd->result = DID_IMM_RETRY << 16;
372 goto qc_fail_command;
373 }
374
369 if (atomic_read(&fcport->state) != FCS_ONLINE) { 375 if (atomic_read(&fcport->state) != FCS_ONLINE) {
370 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 376 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
371 atomic_read(&ha->loop_state) == LOOP_DEAD) { 377 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -421,6 +427,12 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
421 goto qc24_fail_command; 427 goto qc24_fail_command;
422 } 428 }
423 429
430 /* Close window on fcport/rport state-transitioning. */
431 if (!*(fc_port_t **)rport->dd_data) {
432 cmd->result = DID_IMM_RETRY << 16;
433 goto qc24_fail_command;
434 }
435
424 if (atomic_read(&fcport->state) != FCS_ONLINE) { 436 if (atomic_read(&fcport->state) != FCS_ONLINE) {
425 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 437 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
426 atomic_read(&ha->loop_state) == LOOP_DEAD) { 438 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -513,7 +525,7 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
513 * Success (Adapter is online) : 0 525 * Success (Adapter is online) : 0
514 * Failed (Adapter is offline/disabled) : 1 526 * Failed (Adapter is offline/disabled) : 1
515 */ 527 */
516static int 528int
517qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 529qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
518{ 530{
519 int return_status; 531 int return_status;
@@ -1312,6 +1324,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1312 ha->ports = MAX_BUSES; 1324 ha->ports = MAX_BUSES;
1313 ha->init_cb_size = sizeof(init_cb_t); 1325 ha->init_cb_size = sizeof(init_cb_t);
1314 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1326 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1327 ha->link_data_rate = LDR_UNKNOWN;
1328 ha->optrom_size = OPTROM_SIZE_2300;
1315 1329
1316 /* Assign ISP specific operations. */ 1330 /* Assign ISP specific operations. */
1317 ha->isp_ops.pci_config = qla2100_pci_config; 1331 ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1339,6 +1353,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1339 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1353 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1340 ha->isp_ops.fw_dump = qla2100_fw_dump; 1354 ha->isp_ops.fw_dump = qla2100_fw_dump;
1341 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1355 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1356 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1357 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1342 if (IS_QLA2100(ha)) { 1358 if (IS_QLA2100(ha)) {
1343 host->max_id = MAX_TARGETS_2100; 1359 host->max_id = MAX_TARGETS_2100;
1344 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1360 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1364,7 +1380,12 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1364 ha->isp_ops.intr_handler = qla2300_intr_handler; 1380 ha->isp_ops.intr_handler = qla2300_intr_handler;
1365 ha->isp_ops.fw_dump = qla2300_fw_dump; 1381 ha->isp_ops.fw_dump = qla2300_fw_dump;
1366 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1382 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1383 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1384 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1385 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
1367 ha->gid_list_info_size = 6; 1386 ha->gid_list_info_size = 6;
1387 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1388 ha->optrom_size = OPTROM_SIZE_2322;
1368 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1389 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1369 host->max_id = MAX_TARGETS_2200; 1390 host->max_id = MAX_TARGETS_2200;
1370 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1391 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1400,7 +1421,13 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1400 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1421 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1401 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1422 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1402 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1423 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1424 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1425 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1426 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1427 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1428 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1403 ha->gid_list_info_size = 8; 1429 ha->gid_list_info_size = 8;
1430 ha->optrom_size = OPTROM_SIZE_24XX;
1404 } 1431 }
1405 host->can_queue = ha->request_q_length + 128; 1432 host->can_queue = ha->request_q_length + 128;
1406 1433
@@ -1657,11 +1684,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1657 spin_lock_irqsave(&fcport->rport_lock, flags); 1684 spin_lock_irqsave(&fcport->rport_lock, flags);
1658 fcport->drport = rport; 1685 fcport->drport = rport;
1659 fcport->rport = NULL; 1686 fcport->rport = NULL;
1687 *(fc_port_t **)rport->dd_data = NULL;
1660 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1688 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1661 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1689 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
1662 } else { 1690 } else {
1663 spin_lock_irqsave(&fcport->rport_lock, flags); 1691 spin_lock_irqsave(&fcport->rport_lock, flags);
1664 fcport->rport = NULL; 1692 fcport->rport = NULL;
1693 *(fc_port_t **)rport->dd_data = NULL;
1665 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1694 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1666 fc_remote_port_delete(rport); 1695 fc_remote_port_delete(rport);
1667 } 1696 }
@@ -2066,6 +2095,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2066 ha->fw_dumped = 0; 2095 ha->fw_dumped = 0;
2067 ha->fw_dump_reading = 0; 2096 ha->fw_dump_reading = 0;
2068 ha->fw_dump_buffer = NULL; 2097 ha->fw_dump_buffer = NULL;
2098
2099 vfree(ha->optrom_buffer);
2069} 2100}
2070 2101
2071/* 2102/*
@@ -2314,6 +2345,9 @@ qla2x00_do_dpc(void *data)
2314 if (!ha->interrupts_on) 2345 if (!ha->interrupts_on)
2315 ha->isp_ops.enable_intrs(ha); 2346 ha->isp_ops.enable_intrs(ha);
2316 2347
2348 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2349 ha->isp_ops.beacon_blink(ha);
2350
2317 ha->dpc_active = 0; 2351 ha->dpc_active = 0;
2318 } /* End of while(1) */ 2352 } /* End of while(1) */
2319 2353
@@ -2491,6 +2525,12 @@ qla2x00_timer(scsi_qla_host_t *ha)
2491 atomic_read(&ha->loop_down_timer))); 2525 atomic_read(&ha->loop_down_timer)));
2492 } 2526 }
2493 2527
2528 /* Check if beacon LED needs to be blinked */
2529 if (ha->beacon_blink_led == 1) {
2530 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags);
2531 start_dpc++;
2532 }
2533
2494 /* Schedule the DPC routine if needed */ 2534 /* Schedule the DPC routine if needed */
2495 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2535 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2496 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2536 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
@@ -2499,6 +2539,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2499 start_dpc || 2539 start_dpc ||
2500 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2540 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2501 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2541 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
2542 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
2502 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2543 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
2503 ha->dpc_wait && !ha->dpc_active) { 2544 ha->dpc_wait && !ha->dpc_active) {
2504 2545
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 2c3342108dd8..b70bebe18c01 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -6,8 +6,6 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <scsi/scsi_transport_fc.h>
10
11/** 9/**
12 * IO descriptor handle definitions. 10 * IO descriptor handle definitions.
13 * 11 *
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f4d755a643e4..3866a5760f15 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -695,3 +695,966 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
695 695
696 return ret; 696 return ret;
697} 697}
698
699
700static inline void
701qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
702{
703 if (IS_QLA2322(ha)) {
704 /* Flip all colors. */
705 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
706 /* Turn off. */
707 ha->beacon_color_state = 0;
708 *pflags = GPIO_LED_ALL_OFF;
709 } else {
710 /* Turn on. */
711 ha->beacon_color_state = QLA_LED_ALL_ON;
712 *pflags = GPIO_LED_RGA_ON;
713 }
714 } else {
715 /* Flip green led only. */
716 if (ha->beacon_color_state == QLA_LED_GRN_ON) {
717 /* Turn off. */
718 ha->beacon_color_state = 0;
719 *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF;
720 } else {
721 /* Turn on. */
722 ha->beacon_color_state = QLA_LED_GRN_ON;
723 *pflags = GPIO_LED_GREEN_ON_AMBER_OFF;
724 }
725 }
726}
727
728void
729qla2x00_beacon_blink(struct scsi_qla_host *ha)
730{
731 uint16_t gpio_enable;
732 uint16_t gpio_data;
733 uint16_t led_color = 0;
734 unsigned long flags;
735 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
736
737 if (ha->pio_address)
738 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
739
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741
742 /* Save the Original GPIOE. */
743 if (ha->pio_address) {
744 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
745 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
746 } else {
747 gpio_enable = RD_REG_WORD(&reg->gpioe);
748 gpio_data = RD_REG_WORD(&reg->gpiod);
749 }
750
751 /* Set the modified gpio_enable values */
752 gpio_enable |= GPIO_LED_MASK;
753
754 if (ha->pio_address) {
755 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
756 } else {
757 WRT_REG_WORD(&reg->gpioe, gpio_enable);
758 RD_REG_WORD(&reg->gpioe);
759 }
760
761 qla2x00_flip_colors(ha, &led_color);
762
763 /* Clear out any previously set LED color. */
764 gpio_data &= ~GPIO_LED_MASK;
765
766 /* Set the new input LED color to GPIOD. */
767 gpio_data |= led_color;
768
769 /* Set the modified gpio_data values */
770 if (ha->pio_address) {
771 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
772 } else {
773 WRT_REG_WORD(&reg->gpiod, gpio_data);
774 RD_REG_WORD(&reg->gpiod);
775 }
776
777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
778}
779
780int
781qla2x00_beacon_on(struct scsi_qla_host *ha)
782{
783 uint16_t gpio_enable;
784 uint16_t gpio_data;
785 unsigned long flags;
786 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
787
788 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
789 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
790
791 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
792 qla_printk(KERN_WARNING, ha,
793 "Unable to update fw options (beacon on).\n");
794 return QLA_FUNCTION_FAILED;
795 }
796
797 if (ha->pio_address)
798 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
799
800 /* Turn off LEDs. */
801 spin_lock_irqsave(&ha->hardware_lock, flags);
802 if (ha->pio_address) {
803 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
804 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
805 } else {
806 gpio_enable = RD_REG_WORD(&reg->gpioe);
807 gpio_data = RD_REG_WORD(&reg->gpiod);
808 }
809 gpio_enable |= GPIO_LED_MASK;
810
811 /* Set the modified gpio_enable values. */
812 if (ha->pio_address) {
813 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
814 } else {
815 WRT_REG_WORD(&reg->gpioe, gpio_enable);
816 RD_REG_WORD(&reg->gpioe);
817 }
818
819 /* Clear out previously set LED colour. */
820 gpio_data &= ~GPIO_LED_MASK;
821 if (ha->pio_address) {
822 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
823 } else {
824 WRT_REG_WORD(&reg->gpiod, gpio_data);
825 RD_REG_WORD(&reg->gpiod);
826 }
827 spin_unlock_irqrestore(&ha->hardware_lock, flags);
828
829 /*
830 * Let the per HBA timer kick off the blinking process based on
831 * the following flags. No need to do anything else now.
832 */
833 ha->beacon_blink_led = 1;
834 ha->beacon_color_state = 0;
835
836 return QLA_SUCCESS;
837}
838
839int
840qla2x00_beacon_off(struct scsi_qla_host *ha)
841{
842 int rval = QLA_SUCCESS;
843
844 ha->beacon_blink_led = 0;
845
846 /* Set the on flag so when it gets flipped it will be off. */
847 if (IS_QLA2322(ha))
848 ha->beacon_color_state = QLA_LED_ALL_ON;
849 else
850 ha->beacon_color_state = QLA_LED_GRN_ON;
851
852 ha->isp_ops.beacon_blink(ha); /* This turns green LED off */
853
854 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
855 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
856
857 rval = qla2x00_set_fw_options(ha, ha->fw_options);
858 if (rval != QLA_SUCCESS)
859 qla_printk(KERN_WARNING, ha,
860 "Unable to update fw options (beacon off).\n");
861 return rval;
862}
863
864
865static inline void
866qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
867{
868 /* Flip all colors. */
869 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
870 /* Turn off. */
871 ha->beacon_color_state = 0;
872 *pflags = 0;
873 } else {
874 /* Turn on. */
875 ha->beacon_color_state = QLA_LED_ALL_ON;
876 *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON;
877 }
878}
879
880void
881qla24xx_beacon_blink(struct scsi_qla_host *ha)
882{
883 uint16_t led_color = 0;
884 uint32_t gpio_data;
885 unsigned long flags;
886 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
887
888 /* Save the Original GPIOD. */
889 spin_lock_irqsave(&ha->hardware_lock, flags);
890 gpio_data = RD_REG_DWORD(&reg->gpiod);
891
892 /* Enable the gpio_data reg for update. */
893 gpio_data |= GPDX_LED_UPDATE_MASK;
894
895 WRT_REG_DWORD(&reg->gpiod, gpio_data);
896 gpio_data = RD_REG_DWORD(&reg->gpiod);
897
898 /* Set the color bits. */
899 qla24xx_flip_colors(ha, &led_color);
900
901 /* Clear out any previously set LED color. */
902 gpio_data &= ~GPDX_LED_COLOR_MASK;
903
904 /* Set the new input LED color to GPIOD. */
905 gpio_data |= led_color;
906
907 /* Set the modified gpio_data values. */
908 WRT_REG_DWORD(&reg->gpiod, gpio_data);
909 gpio_data = RD_REG_DWORD(&reg->gpiod);
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911}
912
913int
914qla24xx_beacon_on(struct scsi_qla_host *ha)
915{
916 uint32_t gpio_data;
917 unsigned long flags;
918 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
919
920 if (ha->beacon_blink_led == 0) {
921 /* Enable firmware for update */
922 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
923
924 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS)
925 return QLA_FUNCTION_FAILED;
926
927 if (qla2x00_get_fw_options(ha, ha->fw_options) !=
928 QLA_SUCCESS) {
929 qla_printk(KERN_WARNING, ha,
930 "Unable to update fw options (beacon on).\n");
931 return QLA_FUNCTION_FAILED;
932 }
933
934 spin_lock_irqsave(&ha->hardware_lock, flags);
935 gpio_data = RD_REG_DWORD(&reg->gpiod);
936
937 /* Enable the gpio_data reg for update. */
938 gpio_data |= GPDX_LED_UPDATE_MASK;
939 WRT_REG_DWORD(&reg->gpiod, gpio_data);
940 RD_REG_DWORD(&reg->gpiod);
941
942 spin_unlock_irqrestore(&ha->hardware_lock, flags);
943 }
944
945 /* So all colors blink together. */
946 ha->beacon_color_state = 0;
947
948 /* Let the per HBA timer kick off the blinking process. */
949 ha->beacon_blink_led = 1;
950
951 return QLA_SUCCESS;
952}
953
954int
955qla24xx_beacon_off(struct scsi_qla_host *ha)
956{
957 uint32_t gpio_data;
958 unsigned long flags;
959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
960
961 ha->beacon_blink_led = 0;
962 ha->beacon_color_state = QLA_LED_ALL_ON;
963
964 ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */
965
966 /* Give control back to firmware. */
967 spin_lock_irqsave(&ha->hardware_lock, flags);
968 gpio_data = RD_REG_DWORD(&reg->gpiod);
969
970 /* Disable the gpio_data reg for update. */
971 gpio_data &= ~GPDX_LED_UPDATE_MASK;
972 WRT_REG_DWORD(&reg->gpiod, gpio_data);
973 RD_REG_DWORD(&reg->gpiod);
974 spin_unlock_irqrestore(&ha->hardware_lock, flags);
975
976 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
977
978 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
979 qla_printk(KERN_WARNING, ha,
980 "Unable to update fw options (beacon off).\n");
981 return QLA_FUNCTION_FAILED;
982 }
983
984 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
985 qla_printk(KERN_WARNING, ha,
986 "Unable to get fw options (beacon off).\n");
987 return QLA_FUNCTION_FAILED;
988 }
989
990 return QLA_SUCCESS;
991}
992
993
994/*
995 * Flash support routines
996 */
997
998/**
999 * qla2x00_flash_enable() - Setup flash for reading and writing.
1000 * @ha: HA context
1001 */
1002static void
1003qla2x00_flash_enable(scsi_qla_host_t *ha)
1004{
1005 uint16_t data;
1006 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1007
1008 data = RD_REG_WORD(&reg->ctrl_status);
1009 data |= CSR_FLASH_ENABLE;
1010 WRT_REG_WORD(&reg->ctrl_status, data);
1011 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1012}
1013
1014/**
1015 * qla2x00_flash_disable() - Disable flash and allow RISC to run.
1016 * @ha: HA context
1017 */
1018static void
1019qla2x00_flash_disable(scsi_qla_host_t *ha)
1020{
1021 uint16_t data;
1022 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1023
1024 data = RD_REG_WORD(&reg->ctrl_status);
1025 data &= ~(CSR_FLASH_ENABLE);
1026 WRT_REG_WORD(&reg->ctrl_status, data);
1027 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1028}
1029
1030/**
1031 * qla2x00_read_flash_byte() - Reads a byte from flash
1032 * @ha: HA context
1033 * @addr: Address in flash to read
1034 *
1035 * A word is read from the chip, but, only the lower byte is valid.
1036 *
1037 * Returns the byte read from flash @addr.
1038 */
1039static uint8_t
1040qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1041{
1042 uint16_t data;
1043 uint16_t bank_select;
1044 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1045
1046 bank_select = RD_REG_WORD(&reg->ctrl_status);
1047
1048 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1049 /* Specify 64K address range: */
1050 /* clear out Module Select and Flash Address bits [19:16]. */
1051 bank_select &= ~0xf8;
1052 bank_select |= addr >> 12 & 0xf0;
1053 bank_select |= CSR_FLASH_64K_BANK;
1054 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1055 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1056
1057 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1058 data = RD_REG_WORD(&reg->flash_data);
1059
1060 return (uint8_t)data;
1061 }
1062
1063 /* Setup bit 16 of flash address. */
1064 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1065 bank_select |= CSR_FLASH_64K_BANK;
1066 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1067 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1068 } else if (((addr & BIT_16) == 0) &&
1069 (bank_select & CSR_FLASH_64K_BANK)) {
1070 bank_select &= ~(CSR_FLASH_64K_BANK);
1071 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1072 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1073 }
1074
1075 /* Always perform IO mapped accesses to the FLASH registers. */
1076 if (ha->pio_address) {
1077 uint16_t data2;
1078
1079 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1080 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1081 do {
1082 data = RD_REG_WORD_PIO(&reg->flash_data);
1083 barrier();
1084 cpu_relax();
1085 data2 = RD_REG_WORD_PIO(&reg->flash_data);
1086 } while (data != data2);
1087 } else {
1088 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1089 data = qla2x00_debounce_register(&reg->flash_data);
1090 }
1091
1092 return (uint8_t)data;
1093}
1094
1095/**
1096 * qla2x00_write_flash_byte() - Write a byte to flash
1097 * @ha: HA context
1098 * @addr: Address in flash to write
1099 * @data: Data to write
1100 */
1101static void
1102qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1103{
1104 uint16_t bank_select;
1105 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1106
1107 bank_select = RD_REG_WORD(&reg->ctrl_status);
1108 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1109 /* Specify 64K address range: */
1110 /* clear out Module Select and Flash Address bits [19:16]. */
1111 bank_select &= ~0xf8;
1112 bank_select |= addr >> 12 & 0xf0;
1113 bank_select |= CSR_FLASH_64K_BANK;
1114 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1115 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1116
1117 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1118 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1119 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1120 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1121
1122 return;
1123 }
1124
1125 /* Setup bit 16 of flash address. */
1126 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1127 bank_select |= CSR_FLASH_64K_BANK;
1128 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1129 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1130 } else if (((addr & BIT_16) == 0) &&
1131 (bank_select & CSR_FLASH_64K_BANK)) {
1132 bank_select &= ~(CSR_FLASH_64K_BANK);
1133 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1134 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1135 }
1136
1137 /* Always perform IO mapped accesses to the FLASH registers. */
1138 if (ha->pio_address) {
1139 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1140 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1141 WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
1142 } else {
1143 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1144 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1145 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1146 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1147 }
1148}
1149
1150/**
1151 * qla2x00_poll_flash() - Polls flash for completion.
1152 * @ha: HA context
1153 * @addr: Address in flash to poll
1154 * @poll_data: Data to be polled
1155 * @man_id: Flash manufacturer ID
1156 * @flash_id: Flash ID
1157 *
1158 * This function polls the device until bit 7 of what is read matches data
1159 * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
1160 * out (a fatal error). The flash book recommeds reading bit 7 again after
1161 * reading bit 5 as a 1.
1162 *
1163 * Returns 0 on success, else non-zero.
1164 */
1165static int
1166qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1167 uint8_t man_id, uint8_t flash_id)
1168{
1169 int status;
1170 uint8_t flash_data;
1171 uint32_t cnt;
1172
1173 status = 1;
1174
1175 /* Wait for 30 seconds for command to finish. */
1176 poll_data &= BIT_7;
1177 for (cnt = 3000000; cnt; cnt--) {
1178 flash_data = qla2x00_read_flash_byte(ha, addr);
1179 if ((flash_data & BIT_7) == poll_data) {
1180 status = 0;
1181 break;
1182 }
1183
1184 if (man_id != 0x40 && man_id != 0xda) {
1185 if ((flash_data & BIT_5) && cnt > 2)
1186 cnt = 2;
1187 }
1188 udelay(10);
1189 barrier();
1190 }
1191 return status;
1192}
1193
1194#define IS_OEM_001(ha) \
1195 ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \
1196 (ha)->pdev->subsystem_vendor == 0x1028 && \
1197 (ha)->pdev->subsystem_device == 0x0170)
1198
1199/**
1200 * qla2x00_program_flash_address() - Programs a flash address
1201 * @ha: HA context
1202 * @addr: Address in flash to program
1203 * @data: Data to be written in flash
1204 * @man_id: Flash manufacturer ID
1205 * @flash_id: Flash ID
1206 *
1207 * Returns 0 on success, else non-zero.
1208 */
1209static int
1210qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1211 uint8_t man_id, uint8_t flash_id)
1212{
1213 /* Write Program Command Sequence. */
1214 if (IS_OEM_001(ha)) {
1215 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1216 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1217 qla2x00_write_flash_byte(ha, 0xaaa, 0xa0);
1218 qla2x00_write_flash_byte(ha, addr, data);
1219 } else {
1220 if (man_id == 0xda && flash_id == 0xc1) {
1221 qla2x00_write_flash_byte(ha, addr, data);
1222 if (addr & 0x7e)
1223 return 0;
1224 } else {
1225 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1226 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1227 qla2x00_write_flash_byte(ha, 0x5555, 0xa0);
1228 qla2x00_write_flash_byte(ha, addr, data);
1229 }
1230 }
1231
1232 udelay(150);
1233
1234 /* Wait for write to complete. */
1235 return qla2x00_poll_flash(ha, addr, data, man_id, flash_id);
1236}
1237
1238/**
1239 * qla2x00_erase_flash() - Erase the flash.
1240 * @ha: HA context
1241 * @man_id: Flash manufacturer ID
1242 * @flash_id: Flash ID
1243 *
1244 * Returns 0 on success, else non-zero.
1245 */
1246static int
1247qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1248{
1249 /* Individual Sector Erase Command Sequence */
1250 if (IS_OEM_001(ha)) {
1251 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1252 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1253 qla2x00_write_flash_byte(ha, 0xaaa, 0x80);
1254 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1255 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1256 qla2x00_write_flash_byte(ha, 0xaaa, 0x10);
1257 } else {
1258 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1259 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1260 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1261 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1262 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1263 qla2x00_write_flash_byte(ha, 0x5555, 0x10);
1264 }
1265
1266 udelay(150);
1267
1268 /* Wait for erase to complete. */
1269 return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id);
1270}
1271
1272/**
1273 * qla2x00_erase_flash_sector() - Erase a flash sector.
1274 * @ha: HA context
1275 * @addr: Flash sector to erase
1276 * @sec_mask: Sector address mask
1277 * @man_id: Flash manufacturer ID
1278 * @flash_id: Flash ID
1279 *
1280 * Returns 0 on success, else non-zero.
1281 */
1282static int
1283qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1284 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1285{
1286 /* Individual Sector Erase Command Sequence */
1287 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1288 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1289 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1290 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1291 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1292 if (man_id == 0x1f && flash_id == 0x13)
1293 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10);
1294 else
1295 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30);
1296
1297 udelay(150);
1298
1299 /* Wait for erase to complete. */
1300 return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id);
1301}
1302
1303/**
1304 * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
1305 * @man_id: Flash manufacturer ID
1306 * @flash_id: Flash ID
1307 */
1308static void
1309qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1310 uint8_t *flash_id)
1311{
1312 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1313 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1314 qla2x00_write_flash_byte(ha, 0x5555, 0x90);
1315 *man_id = qla2x00_read_flash_byte(ha, 0x0000);
1316 *flash_id = qla2x00_read_flash_byte(ha, 0x0001);
1317 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1318 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1319 qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
1320}
1321
1322
1323static inline void
1324qla2x00_suspend_hba(struct scsi_qla_host *ha)
1325{
1326 int cnt;
1327 unsigned long flags;
1328 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1329
1330 /* Suspend HBA. */
1331 scsi_block_requests(ha->host);
1332 ha->isp_ops.disable_intrs(ha);
1333 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1334
1335 /* Pause RISC. */
1336 spin_lock_irqsave(&ha->hardware_lock, flags);
1337 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1338 RD_REG_WORD(&reg->hccr);
1339 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1340 for (cnt = 0; cnt < 30000; cnt++) {
1341 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
1342 break;
1343 udelay(100);
1344 }
1345 } else {
1346 udelay(10);
1347 }
1348 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1349}
1350
1351static inline void
1352qla2x00_resume_hba(struct scsi_qla_host *ha)
1353{
1354 /* Resume HBA. */
1355 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1356 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1357 up(ha->dpc_wait);
1358 qla2x00_wait_for_hba_online(ha);
1359 scsi_unblock_requests(ha->host);
1360}
1361
1362uint8_t *
1363qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1364 uint32_t offset, uint32_t length)
1365{
1366 unsigned long flags;
1367 uint32_t addr, midpoint;
1368 uint8_t *data;
1369 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1370
1371 /* Suspend HBA. */
1372 qla2x00_suspend_hba(ha);
1373
1374 /* Go with read. */
1375 spin_lock_irqsave(&ha->hardware_lock, flags);
1376 midpoint = ha->optrom_size / 2;
1377
1378 qla2x00_flash_enable(ha);
1379 WRT_REG_WORD(&reg->nvram, 0);
1380 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1381 for (addr = offset, data = buf; addr < length; addr++, data++) {
1382 if (addr == midpoint) {
1383 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1384 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1385 }
1386
1387 *data = qla2x00_read_flash_byte(ha, addr);
1388 }
1389 qla2x00_flash_disable(ha);
1390 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1391
1392 /* Resume HBA. */
1393 qla2x00_resume_hba(ha);
1394
1395 return buf;
1396}
1397
1398int
1399qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1400 uint32_t offset, uint32_t length)
1401{
1402
1403 int rval;
1404 unsigned long flags;
1405 uint8_t man_id, flash_id, sec_number, data;
1406 uint16_t wd;
1407 uint32_t addr, liter, sec_mask, rest_addr;
1408 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1409
1410 /* Suspend HBA. */
1411 qla2x00_suspend_hba(ha);
1412
1413 rval = QLA_SUCCESS;
1414 sec_number = 0;
1415
1416 /* Reset ISP chip. */
1417 spin_lock_irqsave(&ha->hardware_lock, flags);
1418 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1419 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1420
1421 /* Go with write. */
1422 qla2x00_flash_enable(ha);
1423 do { /* Loop once to provide quick error exit */
1424 /* Structure of flash memory based on manufacturer */
1425 if (IS_OEM_001(ha)) {
1426 /* OEM variant with special flash part. */
1427 man_id = flash_id = 0;
1428 rest_addr = 0xffff;
1429 sec_mask = 0x10000;
1430 goto update_flash;
1431 }
1432 qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id);
1433 switch (man_id) {
1434 case 0x20: /* ST flash. */
1435 if (flash_id == 0xd2 || flash_id == 0xe3) {
1436 /*
1437 * ST m29w008at part - 64kb sector size with
1438 * 32kb,8kb,8kb,16kb sectors at memory address
1439 * 0xf0000.
1440 */
1441 rest_addr = 0xffff;
1442 sec_mask = 0x10000;
1443 break;
1444 }
1445 /*
1446 * ST m29w010b part - 16kb sector size
1447 * Default to 16kb sectors
1448 */
1449 rest_addr = 0x3fff;
1450 sec_mask = 0x1c000;
1451 break;
1452 case 0x40: /* Mostel flash. */
1453 /* Mostel v29c51001 part - 512 byte sector size. */
1454 rest_addr = 0x1ff;
1455 sec_mask = 0x1fe00;
1456 break;
1457 case 0xbf: /* SST flash. */
1458 /* SST39sf10 part - 4kb sector size. */
1459 rest_addr = 0xfff;
1460 sec_mask = 0x1f000;
1461 break;
1462 case 0xda: /* Winbond flash. */
1463 /* Winbond W29EE011 part - 256 byte sector size. */
1464 rest_addr = 0x7f;
1465 sec_mask = 0x1ff80;
1466 break;
1467 case 0xc2: /* Macronix flash. */
1468 /* 64k sector size. */
1469 if (flash_id == 0x38 || flash_id == 0x4f) {
1470 rest_addr = 0xffff;
1471 sec_mask = 0x10000;
1472 break;
1473 }
1474 /* Fall through... */
1475
1476 case 0x1f: /* Atmel flash. */
1477 /* 512k sector size. */
1478 if (flash_id == 0x13) {
1479 rest_addr = 0x7fffffff;
1480 sec_mask = 0x80000000;
1481 break;
1482 }
1483 /* Fall through... */
1484
1485 case 0x01: /* AMD flash. */
1486 if (flash_id == 0x38 || flash_id == 0x40 ||
1487 flash_id == 0x4f) {
1488 /* Am29LV081 part - 64kb sector size. */
1489 /* Am29LV002BT part - 64kb sector size. */
1490 rest_addr = 0xffff;
1491 sec_mask = 0x10000;
1492 break;
1493 } else if (flash_id == 0x3e) {
1494 /*
1495 * Am29LV008b part - 64kb sector size with
1496 * 32kb,8kb,8kb,16kb sector at memory address
1497 * h0xf0000.
1498 */
1499 rest_addr = 0xffff;
1500 sec_mask = 0x10000;
1501 break;
1502 } else if (flash_id == 0x20 || flash_id == 0x6e) {
1503 /*
1504 * Am29LV010 part or AM29f010 - 16kb sector
1505 * size.
1506 */
1507 rest_addr = 0x3fff;
1508 sec_mask = 0x1c000;
1509 break;
1510 } else if (flash_id == 0x6d) {
1511 /* Am29LV001 part - 8kb sector size. */
1512 rest_addr = 0x1fff;
1513 sec_mask = 0x1e000;
1514 break;
1515 }
1516 default:
1517 /* Default to 16 kb sector size. */
1518 rest_addr = 0x3fff;
1519 sec_mask = 0x1c000;
1520 break;
1521 }
1522
1523update_flash:
1524 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1525 if (qla2x00_erase_flash(ha, man_id, flash_id)) {
1526 rval = QLA_FUNCTION_FAILED;
1527 break;
1528 }
1529 }
1530
1531 for (addr = offset, liter = 0; liter < length; liter++,
1532 addr++) {
1533 data = buf[liter];
1534 /* Are we at the beginning of a sector? */
1535 if ((addr & rest_addr) == 0) {
1536 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1537 if (addr >= 0x10000UL) {
1538 if (((addr >> 12) & 0xf0) &&
1539 ((man_id == 0x01 &&
1540 flash_id == 0x3e) ||
1541 (man_id == 0x20 &&
1542 flash_id == 0xd2))) {
1543 sec_number++;
1544 if (sec_number == 1) {
1545 rest_addr =
1546 0x7fff;
1547 sec_mask =
1548 0x18000;
1549 } else if (
1550 sec_number == 2 ||
1551 sec_number == 3) {
1552 rest_addr =
1553 0x1fff;
1554 sec_mask =
1555 0x1e000;
1556 } else if (
1557 sec_number == 4) {
1558 rest_addr =
1559 0x3fff;
1560 sec_mask =
1561 0x1c000;
1562 }
1563 }
1564 }
1565 } else if (addr == ha->optrom_size / 2) {
1566 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1567 RD_REG_WORD(&reg->nvram);
1568 }
1569
1570 if (flash_id == 0xda && man_id == 0xc1) {
1571 qla2x00_write_flash_byte(ha, 0x5555,
1572 0xaa);
1573 qla2x00_write_flash_byte(ha, 0x2aaa,
1574 0x55);
1575 qla2x00_write_flash_byte(ha, 0x5555,
1576 0xa0);
1577 } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) {
1578 /* Then erase it */
1579 if (qla2x00_erase_flash_sector(ha,
1580 addr, sec_mask, man_id,
1581 flash_id)) {
1582 rval = QLA_FUNCTION_FAILED;
1583 break;
1584 }
1585 if (man_id == 0x01 && flash_id == 0x6d)
1586 sec_number++;
1587 }
1588 }
1589
1590 if (man_id == 0x01 && flash_id == 0x6d) {
1591 if (sec_number == 1 &&
1592 addr == (rest_addr - 1)) {
1593 rest_addr = 0x0fff;
1594 sec_mask = 0x1f000;
1595 } else if (sec_number == 3 && (addr & 0x7ffe)) {
1596 rest_addr = 0x3fff;
1597 sec_mask = 0x1c000;
1598 }
1599 }
1600
1601 if (qla2x00_program_flash_address(ha, addr, data,
1602 man_id, flash_id)) {
1603 rval = QLA_FUNCTION_FAILED;
1604 break;
1605 }
1606 }
1607 } while (0);
1608 qla2x00_flash_disable(ha);
1609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1610
1611 /* Resume HBA. */
1612 qla2x00_resume_hba(ha);
1613
1614 return rval;
1615}
1616
1617uint8_t *
1618qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1619 uint32_t offset, uint32_t length)
1620{
1621 /* Suspend HBA. */
1622 scsi_block_requests(ha->host);
1623 ha->isp_ops.disable_intrs(ha);
1624 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1625
1626 /* Go with read. */
1627 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2);
1628
1629 /* Resume HBA. */
1630 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1631 ha->isp_ops.enable_intrs(ha);
1632 scsi_unblock_requests(ha->host);
1633
1634 return buf;
1635}
1636
1637int
1638qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1639 uint32_t offset, uint32_t length)
1640{
1641 int rval;
1642
1643 /* Suspend HBA. */
1644 scsi_block_requests(ha->host);
1645 ha->isp_ops.disable_intrs(ha);
1646 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1647
1648 /* Go with write. */
1649 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2,
1650 length >> 2);
1651
1652 /* Resume HBA -- RISC reset needed. */
1653 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1654 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1655 up(ha->dpc_wait);
1656 qla2x00_wait_for_hba_online(ha);
1657 scsi_unblock_requests(ha->host);
1658
1659 return rval;
1660}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4a602853a98e..4362dcde74af 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/hardirq.h>
19 20
20#include <scsi/scsi.h> 21#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h> 22#include <scsi/scsi_dbg.h>
@@ -2248,3 +2249,61 @@ scsi_target_unblock(struct device *dev)
2248 device_for_each_child(dev, NULL, target_unblock); 2249 device_for_each_child(dev, NULL, target_unblock);
2249} 2250}
2250EXPORT_SYMBOL_GPL(scsi_target_unblock); 2251EXPORT_SYMBOL_GPL(scsi_target_unblock);
2252
2253
2254struct work_queue_work {
2255 struct work_struct work;
2256 void (*fn)(void *);
2257 void *data;
2258};
2259
2260static void execute_in_process_context_work(void *data)
2261{
2262 void (*fn)(void *data);
2263 struct work_queue_work *wqw = data;
2264
2265 fn = wqw->fn;
2266 data = wqw->data;
2267
2268 kfree(wqw);
2269
2270 fn(data);
2271}
2272
2273/**
2274 * scsi_execute_in_process_context - reliably execute the routine with user context
2275 * @fn: the function to execute
2276 * @data: data to pass to the function
2277 *
2278 * Executes the function immediately if process context is available,
2279 * otherwise schedules the function for delayed execution.
2280 *
2281 * Returns: 0 - function was executed
2282 * 1 - function was scheduled for execution
2283 * <0 - error
2284 */
2285int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
2286{
2287 struct work_queue_work *wqw;
2288
2289 if (!in_interrupt()) {
2290 fn(data);
2291 return 0;
2292 }
2293
2294 wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
2295
2296 if (unlikely(!wqw)) {
2297 printk(KERN_ERR "Failed to allocate memory\n");
2298 WARN_ON(1);
2299 return -ENOMEM;
2300 }
2301
2302 INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
2303 wqw->fn = fn;
2304 wqw->data = data;
2305 schedule_work(&wqw->work);
2306
2307 return 1;
2308}
2309EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 752fb5da3de4..5acb83ca5ae5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -387,19 +387,12 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
387 return found_target; 387 return found_target;
388} 388}
389 389
390struct work_queue_wrapper { 390static void scsi_target_reap_usercontext(void *data)
391 struct work_struct work; 391{
392 struct scsi_target *starget; 392 struct scsi_target *starget = data;
393};
394
395static void scsi_target_reap_work(void *data) {
396 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
397 struct scsi_target *starget = wqw->starget;
398 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 393 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
399 unsigned long flags; 394 unsigned long flags;
400 395
401 kfree(wqw);
402
403 spin_lock_irqsave(shost->host_lock, flags); 396 spin_lock_irqsave(shost->host_lock, flags);
404 397
405 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { 398 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
@@ -428,18 +421,7 @@ static void scsi_target_reap_work(void *data) {
428 */ 421 */
429void scsi_target_reap(struct scsi_target *starget) 422void scsi_target_reap(struct scsi_target *starget)
430{ 423{
431 struct work_queue_wrapper *wqw = 424 scsi_execute_in_process_context(scsi_target_reap_usercontext, starget);
432 kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
433
434 if (!wqw) {
435 starget_printk(KERN_ERR, starget,
436 "Failed to allocate memory in scsi_reap_target()\n");
437 return;
438 }
439
440 INIT_WORK(&wqw->work, scsi_target_reap_work, wqw);
441 wqw->starget = starget;
442 schedule_work(&wqw->work);
443} 425}
444 426
445/** 427/**
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index a77b32deaf8f..902a5def8e62 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -217,8 +217,9 @@ static void scsi_device_cls_release(struct class_device *class_dev)
217 put_device(&sdev->sdev_gendev); 217 put_device(&sdev->sdev_gendev);
218} 218}
219 219
220static void scsi_device_dev_release(struct device *dev) 220static void scsi_device_dev_release_usercontext(void *data)
221{ 221{
222 struct device *dev = data;
222 struct scsi_device *sdev; 223 struct scsi_device *sdev;
223 struct device *parent; 224 struct device *parent;
224 struct scsi_target *starget; 225 struct scsi_target *starget;
@@ -237,6 +238,7 @@ static void scsi_device_dev_release(struct device *dev)
237 238
238 if (sdev->request_queue) { 239 if (sdev->request_queue) {
239 sdev->request_queue->queuedata = NULL; 240 sdev->request_queue->queuedata = NULL;
241 /* user context needed to free queue */
240 scsi_free_queue(sdev->request_queue); 242 scsi_free_queue(sdev->request_queue);
241 /* temporary expedient, try to catch use of queue lock 243 /* temporary expedient, try to catch use of queue lock
242 * after free of sdev */ 244 * after free of sdev */
@@ -252,6 +254,11 @@ static void scsi_device_dev_release(struct device *dev)
252 put_device(parent); 254 put_device(parent);
253} 255}
254 256
257static void scsi_device_dev_release(struct device *dev)
258{
259 scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev);
260}
261
255static struct class sdev_class = { 262static struct class sdev_class = {
256 .name = "scsi_device", 263 .name = "scsi_device",
257 .release = scsi_device_cls_release, 264 .release = scsi_device_cls_release,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 723f7acbeb12..71e54a64adca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -39,10 +39,6 @@ struct iscsi_internal {
39 struct iscsi_transport *iscsi_transport; 39 struct iscsi_transport *iscsi_transport;
40 struct list_head list; 40 struct list_head list;
41 /* 41 /*
42 * List of sessions for this transport
43 */
44 struct list_head sessions;
45 /*
46 * based on transport capabilities, at register time we set these 42 * based on transport capabilities, at register time we set these
47 * bits to tell the transport class it wants attributes displayed 43 * bits to tell the transport class it wants attributes displayed
48 * in sysfs or that it can support different iSCSI Data-Path 44 * in sysfs or that it can support different iSCSI Data-Path
@@ -164,9 +160,43 @@ static struct mempool_zone *z_reply;
164#define Z_MAX_ERROR 16 160#define Z_MAX_ERROR 16
165#define Z_HIWAT_ERROR 12 161#define Z_HIWAT_ERROR 12
166 162
163static LIST_HEAD(sesslist);
164static DEFINE_SPINLOCK(sesslock);
167static LIST_HEAD(connlist); 165static LIST_HEAD(connlist);
168static DEFINE_SPINLOCK(connlock); 166static DEFINE_SPINLOCK(connlock);
169 167
168static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle)
169{
170 unsigned long flags;
171 struct iscsi_cls_session *sess;
172
173 spin_lock_irqsave(&sesslock, flags);
174 list_for_each_entry(sess, &sesslist, sess_list) {
175 if (sess == iscsi_ptr(handle)) {
176 spin_unlock_irqrestore(&sesslock, flags);
177 return sess;
178 }
179 }
180 spin_unlock_irqrestore(&sesslock, flags);
181 return NULL;
182}
183
184static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle)
185{
186 unsigned long flags;
187 struct iscsi_cls_conn *conn;
188
189 spin_lock_irqsave(&connlock, flags);
190 list_for_each_entry(conn, &connlist, conn_list) {
191 if (conn == iscsi_ptr(handle)) {
192 spin_unlock_irqrestore(&connlock, flags);
193 return conn;
194 }
195 }
196 spin_unlock_irqrestore(&connlock, flags);
197 return NULL;
198}
199
170/* 200/*
171 * The following functions can be used by LLDs that allocate 201 * The following functions can be used by LLDs that allocate
172 * their own scsi_hosts or by software iscsi LLDs 202 * their own scsi_hosts or by software iscsi LLDs
@@ -365,6 +395,7 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
365{ 395{
366 struct iscsi_cls_session *session; 396 struct iscsi_cls_session *session;
367 struct Scsi_Host *shost; 397 struct Scsi_Host *shost;
398 unsigned long flags;
368 399
369 shost = scsi_host_alloc(transport->host_template, 400 shost = scsi_host_alloc(transport->host_template,
370 hostdata_privsize(transport)); 401 hostdata_privsize(transport));
@@ -389,6 +420,9 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
389 goto remove_host; 420 goto remove_host;
390 421
391 *(unsigned long*)shost->hostdata = (unsigned long)session; 422 *(unsigned long*)shost->hostdata = (unsigned long)session;
423 spin_lock_irqsave(&sesslock, flags);
424 list_add(&session->sess_list, &sesslist);
425 spin_unlock_irqrestore(&sesslock, flags);
392 return shost; 426 return shost;
393 427
394remove_host: 428remove_host:
@@ -410,9 +444,13 @@ EXPORT_SYMBOL_GPL(iscsi_transport_create_session);
410int iscsi_transport_destroy_session(struct Scsi_Host *shost) 444int iscsi_transport_destroy_session(struct Scsi_Host *shost)
411{ 445{
412 struct iscsi_cls_session *session; 446 struct iscsi_cls_session *session;
447 unsigned long flags;
413 448
414 scsi_remove_host(shost); 449 scsi_remove_host(shost);
415 session = hostdata_session(shost->hostdata); 450 session = hostdata_session(shost->hostdata);
451 spin_lock_irqsave(&sesslock, flags);
452 list_del(&session->sess_list);
453 spin_unlock_irqrestore(&sesslock, flags);
416 iscsi_destroy_session(session); 454 iscsi_destroy_session(session);
417 /* ref from host alloc */ 455 /* ref from host alloc */
418 scsi_host_put(shost); 456 scsi_host_put(shost);
@@ -424,22 +462,6 @@ EXPORT_SYMBOL_GPL(iscsi_transport_destroy_session);
424/* 462/*
425 * iscsi interface functions 463 * iscsi interface functions
426 */ 464 */
427static struct iscsi_cls_conn*
428iscsi_if_find_conn(uint64_t key)
429{
430 unsigned long flags;
431 struct iscsi_cls_conn *conn;
432
433 spin_lock_irqsave(&connlock, flags);
434 list_for_each_entry(conn, &connlist, conn_list)
435 if (conn->connh == key) {
436 spin_unlock_irqrestore(&connlock, flags);
437 return conn;
438 }
439 spin_unlock_irqrestore(&connlock, flags);
440 return NULL;
441}
442
443static struct iscsi_internal * 465static struct iscsi_internal *
444iscsi_if_transport_lookup(struct iscsi_transport *tt) 466iscsi_if_transport_lookup(struct iscsi_transport *tt)
445{ 467{
@@ -504,6 +526,12 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
504 if (!zp) 526 if (!zp)
505 return NULL; 527 return NULL;
506 528
529 zp->size = size;
530 zp->hiwat = hiwat;
531 INIT_LIST_HEAD(&zp->freequeue);
532 spin_lock_init(&zp->freelock);
533 atomic_set(&zp->allocated, 0);
534
507 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 535 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
508 mempool_zone_free_skb, zp); 536 mempool_zone_free_skb, zp);
509 if (!zp->pool) { 537 if (!zp->pool) {
@@ -511,13 +539,6 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
511 return NULL; 539 return NULL;
512 } 540 }
513 541
514 zp->size = size;
515 zp->hiwat = hiwat;
516
517 INIT_LIST_HEAD(&zp->freequeue);
518 spin_lock_init(&zp->freelock);
519 atomic_set(&zp->allocated, 0);
520
521 return zp; 542 return zp;
522} 543}
523 544
@@ -559,25 +580,21 @@ iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb)
559 return 0; 580 return 0;
560} 581}
561 582
562int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, 583int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
563 char *data, uint32_t data_size) 584 char *data, uint32_t data_size)
564{ 585{
565 struct nlmsghdr *nlh; 586 struct nlmsghdr *nlh;
566 struct sk_buff *skb; 587 struct sk_buff *skb;
567 struct iscsi_uevent *ev; 588 struct iscsi_uevent *ev;
568 struct iscsi_cls_conn *conn;
569 char *pdu; 589 char *pdu;
570 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 590 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) +
571 data_size); 591 data_size);
572 592
573 conn = iscsi_if_find_conn(connh);
574 BUG_ON(!conn);
575
576 mempool_zone_complete(conn->z_pdu); 593 mempool_zone_complete(conn->z_pdu);
577 594
578 skb = mempool_zone_get_skb(conn->z_pdu); 595 skb = mempool_zone_get_skb(conn->z_pdu);
579 if (!skb) { 596 if (!skb) {
580 iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); 597 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
581 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 598 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
582 "control PDU: OOM\n"); 599 "control PDU: OOM\n");
583 return -ENOMEM; 600 return -ENOMEM;
@@ -590,7 +607,7 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
590 ev->type = ISCSI_KEVENT_RECV_PDU; 607 ev->type = ISCSI_KEVENT_RECV_PDU;
591 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 608 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat)
592 ev->iferror = -ENOMEM; 609 ev->iferror = -ENOMEM;
593 ev->r.recv_req.conn_handle = connh; 610 ev->r.recv_req.conn_handle = iscsi_handle(conn);
594 pdu = (char*)ev + sizeof(*ev); 611 pdu = (char*)ev + sizeof(*ev);
595 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 612 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
596 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 613 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
@@ -599,17 +616,13 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
599} 616}
600EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 617EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
601 618
602void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) 619void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
603{ 620{
604 struct nlmsghdr *nlh; 621 struct nlmsghdr *nlh;
605 struct sk_buff *skb; 622 struct sk_buff *skb;
606 struct iscsi_uevent *ev; 623 struct iscsi_uevent *ev;
607 struct iscsi_cls_conn *conn;
608 int len = NLMSG_SPACE(sizeof(*ev)); 624 int len = NLMSG_SPACE(sizeof(*ev));
609 625
610 conn = iscsi_if_find_conn(connh);
611 BUG_ON(!conn);
612
613 mempool_zone_complete(conn->z_error); 626 mempool_zone_complete(conn->z_error);
614 627
615 skb = mempool_zone_get_skb(conn->z_error); 628 skb = mempool_zone_get_skb(conn->z_error);
@@ -626,7 +639,7 @@ void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error)
626 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 639 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat)
627 ev->iferror = -ENOMEM; 640 ev->iferror = -ENOMEM;
628 ev->r.connerror.error = error; 641 ev->r.connerror.error = error;
629 ev->r.connerror.conn_handle = connh; 642 ev->r.connerror.conn_handle = iscsi_handle(conn);
630 643
631 iscsi_unicast_skb(conn->z_error, skb); 644 iscsi_unicast_skb(conn->z_error, skb);
632 645
@@ -662,8 +675,7 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
662} 675}
663 676
664static int 677static int
665iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, 678iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
666 struct nlmsghdr *nlh)
667{ 679{
668 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 680 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
669 struct iscsi_stats *stats; 681 struct iscsi_stats *stats;
@@ -677,7 +689,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
677 ISCSI_STATS_CUSTOM_MAX); 689 ISCSI_STATS_CUSTOM_MAX);
678 int err = 0; 690 int err = 0;
679 691
680 conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); 692 conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle);
681 if (!conn) 693 if (!conn)
682 return -EEXIST; 694 return -EEXIST;
683 695
@@ -707,14 +719,14 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
707 ((char*)evstat + sizeof(*evstat)); 719 ((char*)evstat + sizeof(*evstat));
708 memset(stats, 0, sizeof(*stats)); 720 memset(stats, 0, sizeof(*stats));
709 721
710 transport->get_stats(ev->u.get_stats.conn_handle, stats); 722 transport->get_stats(conn, stats);
711 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 723 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) +
712 sizeof(struct iscsi_stats) + 724 sizeof(struct iscsi_stats) +
713 sizeof(struct iscsi_stats_custom) * 725 sizeof(struct iscsi_stats_custom) *
714 stats->custom_length); 726 stats->custom_length);
715 actual_size -= sizeof(*nlhstat); 727 actual_size -= sizeof(*nlhstat);
716 actual_size = NLMSG_LENGTH(actual_size); 728 actual_size = NLMSG_LENGTH(actual_size);
717 skb_trim(skb, NLMSG_ALIGN(actual_size)); 729 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
718 nlhstat->nlmsg_len = actual_size; 730 nlhstat->nlmsg_len = actual_size;
719 731
720 err = iscsi_unicast_skb(conn->z_pdu, skbstat); 732 err = iscsi_unicast_skb(conn->z_pdu, skbstat);
@@ -727,58 +739,34 @@ static int
727iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 739iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
728{ 740{
729 struct iscsi_transport *transport = priv->iscsi_transport; 741 struct iscsi_transport *transport = priv->iscsi_transport;
730 struct Scsi_Host *shost; 742 struct iscsi_cls_session *session;
731 743 uint32_t sid;
732 if (!transport->create_session)
733 return -EINVAL;
734 744
735 shost = transport->create_session(&priv->t, 745 session = transport->create_session(&priv->t,
736 ev->u.c_session.initial_cmdsn); 746 ev->u.c_session.initial_cmdsn,
737 if (!shost) 747 &sid);
748 if (!session)
738 return -ENOMEM; 749 return -ENOMEM;
739 750
740 ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); 751 ev->r.c_session_ret.session_handle = iscsi_handle(session);
741 ev->r.c_session_ret.sid = shost->host_no; 752 ev->r.c_session_ret.sid = sid;
742 return 0; 753 return 0;
743} 754}
744 755
745static int 756static int
746iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 757iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
747{ 758{
748 struct iscsi_transport *transport = priv->iscsi_transport;
749
750 struct Scsi_Host *shost;
751
752 if (!transport->destroy_session)
753 return -EINVAL;
754
755 shost = scsi_host_lookup(ev->u.d_session.sid);
756 if (shost == ERR_PTR(-ENXIO))
757 return -EEXIST;
758
759 if (transport->destroy_session)
760 transport->destroy_session(shost);
761 /* ref from host lookup */
762 scsi_host_put(shost);
763 return 0;
764}
765
766static int
767iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){
768 struct Scsi_Host *shost;
769 struct iscsi_cls_conn *conn; 759 struct iscsi_cls_conn *conn;
760 struct iscsi_cls_session *session;
770 unsigned long flags; 761 unsigned long flags;
771 762
772 if (!transport->create_conn) 763 session = iscsi_session_lookup(ev->u.c_conn.session_handle);
764 if (!session)
773 return -EINVAL; 765 return -EINVAL;
774 766
775 shost = scsi_host_lookup(ev->u.c_conn.sid); 767 conn = transport->create_conn(session, ev->u.c_conn.cid);
776 if (shost == ERR_PTR(-ENXIO))
777 return -EEXIST;
778
779 conn = transport->create_conn(shost, ev->u.c_conn.cid);
780 if (!conn) 768 if (!conn)
781 goto release_ref; 769 return -ENOMEM;
782 770
783 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 771 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
784 NLMSG_SPACE(sizeof(struct iscsi_uevent) + 772 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
@@ -800,14 +788,13 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
800 goto free_pdu_pool; 788 goto free_pdu_pool;
801 } 789 }
802 790
803 ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); 791 ev->r.handle = iscsi_handle(conn);
804 792
805 spin_lock_irqsave(&connlock, flags); 793 spin_lock_irqsave(&connlock, flags);
806 list_add(&conn->conn_list, &connlist); 794 list_add(&conn->conn_list, &connlist);
807 conn->active = 1; 795 conn->active = 1;
808 spin_unlock_irqrestore(&connlock, flags); 796 spin_unlock_irqrestore(&connlock, flags);
809 797
810 scsi_host_put(shost);
811 return 0; 798 return 0;
812 799
813free_pdu_pool: 800free_pdu_pool:
@@ -815,8 +802,6 @@ free_pdu_pool:
815destroy_conn: 802destroy_conn:
816 if (transport->destroy_conn) 803 if (transport->destroy_conn)
817 transport->destroy_conn(conn->dd_data); 804 transport->destroy_conn(conn->dd_data);
818release_ref:
819 scsi_host_put(shost);
820 return -ENOMEM; 805 return -ENOMEM;
821} 806}
822 807
@@ -827,13 +812,9 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
827 struct iscsi_cls_conn *conn; 812 struct iscsi_cls_conn *conn;
828 struct mempool_zone *z_error, *z_pdu; 813 struct mempool_zone *z_error, *z_pdu;
829 814
830 conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); 815 conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle);
831 if (!conn) 816 if (!conn)
832 return -EEXIST;
833
834 if (!transport->destroy_conn)
835 return -EINVAL; 817 return -EINVAL;
836
837 spin_lock_irqsave(&connlock, flags); 818 spin_lock_irqsave(&connlock, flags);
838 conn->active = 0; 819 conn->active = 0;
839 list_del(&conn->conn_list); 820 list_del(&conn->conn_list);
@@ -858,23 +839,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 839 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
859 struct iscsi_transport *transport = NULL; 840 struct iscsi_transport *transport = NULL;
860 struct iscsi_internal *priv; 841 struct iscsi_internal *priv;
861 842 struct iscsi_cls_session *session;
862 if (NETLINK_CREDS(skb)->uid) 843 struct iscsi_cls_conn *conn;
863 return -EPERM;
864 844
865 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 845 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
866 if (!priv) 846 if (!priv)
867 return -EINVAL; 847 return -EINVAL;
868 transport = priv->iscsi_transport; 848 transport = priv->iscsi_transport;
869 849
870 daemon_pid = NETLINK_CREDS(skb)->pid; 850 if (!try_module_get(transport->owner))
851 return -EINVAL;
871 852
872 switch (nlh->nlmsg_type) { 853 switch (nlh->nlmsg_type) {
873 case ISCSI_UEVENT_CREATE_SESSION: 854 case ISCSI_UEVENT_CREATE_SESSION:
874 err = iscsi_if_create_session(priv, ev); 855 err = iscsi_if_create_session(priv, ev);
875 break; 856 break;
876 case ISCSI_UEVENT_DESTROY_SESSION: 857 case ISCSI_UEVENT_DESTROY_SESSION:
877 err = iscsi_if_destroy_session(priv, ev); 858 session = iscsi_session_lookup(ev->u.d_session.session_handle);
859 if (session)
860 transport->destroy_session(session);
861 else
862 err = -EINVAL;
878 break; 863 break;
879 case ISCSI_UEVENT_CREATE_CONN: 864 case ISCSI_UEVENT_CREATE_CONN:
880 err = iscsi_if_create_conn(transport, ev); 865 err = iscsi_if_create_conn(transport, ev);
@@ -883,56 +868,64 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
883 err = iscsi_if_destroy_conn(transport, ev); 868 err = iscsi_if_destroy_conn(transport, ev);
884 break; 869 break;
885 case ISCSI_UEVENT_BIND_CONN: 870 case ISCSI_UEVENT_BIND_CONN:
886 if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) 871 session = iscsi_session_lookup(ev->u.b_conn.session_handle);
887 return -EEXIST; 872 conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle);
888 ev->r.retcode = transport->bind_conn( 873
889 ev->u.b_conn.session_handle, 874 if (session && conn)
890 ev->u.b_conn.conn_handle, 875 ev->r.retcode = transport->bind_conn(session, conn,
891 ev->u.b_conn.transport_fd, 876 ev->u.b_conn.transport_fd,
892 ev->u.b_conn.is_leading); 877 ev->u.b_conn.is_leading);
878 else
879 err = -EINVAL;
893 break; 880 break;
894 case ISCSI_UEVENT_SET_PARAM: 881 case ISCSI_UEVENT_SET_PARAM:
895 if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) 882 conn = iscsi_conn_lookup(ev->u.set_param.conn_handle);
896 return -EEXIST; 883 if (conn)
897 ev->r.retcode = transport->set_param( 884 ev->r.retcode = transport->set_param(conn,
898 ev->u.set_param.conn_handle, 885 ev->u.set_param.param, ev->u.set_param.value);
899 ev->u.set_param.param, ev->u.set_param.value); 886 else
887 err = -EINVAL;
900 break; 888 break;
901 case ISCSI_UEVENT_START_CONN: 889 case ISCSI_UEVENT_START_CONN:
902 if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) 890 conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle);
903 return -EEXIST; 891 if (conn)
904 ev->r.retcode = transport->start_conn( 892 ev->r.retcode = transport->start_conn(conn);
905 ev->u.start_conn.conn_handle); 893 else
894 err = -EINVAL;
895
906 break; 896 break;
907 case ISCSI_UEVENT_STOP_CONN: 897 case ISCSI_UEVENT_STOP_CONN:
908 if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) 898 conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle);
909 return -EEXIST; 899 if (conn)
910 transport->stop_conn(ev->u.stop_conn.conn_handle, 900 transport->stop_conn(conn, ev->u.stop_conn.flag);
911 ev->u.stop_conn.flag); 901 else
902 err = -EINVAL;
912 break; 903 break;
913 case ISCSI_UEVENT_SEND_PDU: 904 case ISCSI_UEVENT_SEND_PDU:
914 if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) 905 conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle);
915 return -EEXIST; 906 if (conn)
916 ev->r.retcode = transport->send_pdu( 907 ev->r.retcode = transport->send_pdu(conn,
917 ev->u.send_pdu.conn_handle, 908 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
918 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 909 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
919 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 910 ev->u.send_pdu.data_size);
920 ev->u.send_pdu.data_size); 911 else
912 err = -EINVAL;
921 break; 913 break;
922 case ISCSI_UEVENT_GET_STATS: 914 case ISCSI_UEVENT_GET_STATS:
923 err = iscsi_if_get_stats(transport, skb, nlh); 915 err = iscsi_if_get_stats(transport, nlh);
924 break; 916 break;
925 default: 917 default:
926 err = -EINVAL; 918 err = -EINVAL;
927 break; 919 break;
928 } 920 }
929 921
922 module_put(transport->owner);
930 return err; 923 return err;
931} 924}
932 925
933/* Get message from skb (based on rtnetlink_rcv_skb). Each message is 926/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
934 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 927 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are
935 * discarded silently. */ 928 * or invalid creds discarded silently. */
936static void 929static void
937iscsi_if_rx(struct sock *sk, int len) 930iscsi_if_rx(struct sock *sk, int len)
938{ 931{
@@ -940,6 +933,12 @@ iscsi_if_rx(struct sock *sk, int len)
940 933
941 mutex_lock(&rx_queue_mutex); 934 mutex_lock(&rx_queue_mutex);
942 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 935 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
936 if (NETLINK_CREDS(skb)->uid) {
937 skb_pull(skb, skb->len);
938 goto free_skb;
939 }
940 daemon_pid = NETLINK_CREDS(skb)->pid;
941
943 while (skb->len >= NLMSG_SPACE(0)) { 942 while (skb->len >= NLMSG_SPACE(0)) {
944 int err; 943 int err;
945 uint32_t rlen; 944 uint32_t rlen;
@@ -951,10 +950,12 @@ iscsi_if_rx(struct sock *sk, int len)
951 skb->len < nlh->nlmsg_len) { 950 skb->len < nlh->nlmsg_len) {
952 break; 951 break;
953 } 952 }
953
954 ev = NLMSG_DATA(nlh); 954 ev = NLMSG_DATA(nlh);
955 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 955 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
956 if (rlen > skb->len) 956 if (rlen > skb->len)
957 rlen = skb->len; 957 rlen = skb->len;
958
958 err = iscsi_if_recv_msg(skb, nlh); 959 err = iscsi_if_recv_msg(skb, nlh);
959 if (err) { 960 if (err) {
960 ev->type = ISCSI_KEVENT_IF_ERROR; 961 ev->type = ISCSI_KEVENT_IF_ERROR;
@@ -978,6 +979,7 @@ iscsi_if_rx(struct sock *sk, int len)
978 } while (err < 0 && err != -ECONNREFUSED); 979 } while (err < 0 && err != -ECONNREFUSED);
979 skb_pull(skb, rlen); 980 skb_pull(skb, rlen);
980 } 981 }
982free_skb:
981 kfree_skb(skb); 983 kfree_skb(skb);
982 } 984 }
983 mutex_unlock(&rx_queue_mutex); 985 mutex_unlock(&rx_queue_mutex);
@@ -997,7 +999,7 @@ show_conn_int_param_##param(struct class_device *cdev, char *buf) \
997 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 999 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
998 struct iscsi_transport *t = conn->transport; \ 1000 struct iscsi_transport *t = conn->transport; \
999 \ 1001 \
1000 t->get_conn_param(conn->dd_data, param, &value); \ 1002 t->get_conn_param(conn, param, &value); \
1001 return snprintf(buf, 20, format"\n", value); \ 1003 return snprintf(buf, 20, format"\n", value); \
1002} 1004}
1003 1005
@@ -1024,10 +1026,9 @@ show_session_int_param_##param(struct class_device *cdev, char *buf) \
1024{ \ 1026{ \
1025 uint32_t value = 0; \ 1027 uint32_t value = 0; \
1026 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1028 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1027 struct Scsi_Host *shost = iscsi_session_to_shost(session); \
1028 struct iscsi_transport *t = session->transport; \ 1029 struct iscsi_transport *t = session->transport; \
1029 \ 1030 \
1030 t->get_session_param(shost, param, &value); \ 1031 t->get_session_param(session, param, &value); \
1031 return snprintf(buf, 20, format"\n", value); \ 1032 return snprintf(buf, 20, format"\n", value); \
1032} 1033}
1033 1034
@@ -1121,7 +1122,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1121 return NULL; 1122 return NULL;
1122 memset(priv, 0, sizeof(*priv)); 1123 memset(priv, 0, sizeof(*priv));
1123 INIT_LIST_HEAD(&priv->list); 1124 INIT_LIST_HEAD(&priv->list);
1124 INIT_LIST_HEAD(&priv->sessions);
1125 priv->iscsi_transport = tt; 1125 priv->iscsi_transport = tt;
1126 1126
1127 priv->cdev.class = &iscsi_transport_class; 1127 priv->cdev.class = &iscsi_transport_class;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 8260f040d39c..f4854c33f48d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3588,7 +3588,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int
3588 3588
3589 if (pm) { 3589 if (pm) {
3590 dp_scr = scr_to_cpu(pm->ret); 3590 dp_scr = scr_to_cpu(pm->ret);
3591 dp_ofs -= scr_to_cpu(pm->sg.size); 3591 dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff;
3592 } 3592 }
3593 3593
3594 /* 3594 /*
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 3e5cb5ab2d34..e5618b90996e 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -163,9 +163,6 @@ enum iscsi_param {
163}; 163};
164#define ISCSI_PARAM_MAX 14 164#define ISCSI_PARAM_MAX 14
165 165
166typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */
167typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */
168
169#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 166#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
170#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 167#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
171#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) 168#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index c60b8ff2f5e4..9c331258bc27 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -433,4 +433,6 @@ struct scsi_lun {
433/* Used to obtain the PCI location of a device */ 433/* Used to obtain the PCI location of a device */
434#define SCSI_IOCTL_GET_PCI 0x5387 434#define SCSI_IOCTL_GET_PCI 0x5387
435 435
436int scsi_execute_in_process_context(void (*fn)(void *data), void *data);
437
436#endif /* _SCSI_SCSI_H */ 438#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 16602a547a63..b41cf077e54b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -63,25 +63,28 @@ struct iscsi_transport {
63 int max_lun; 63 int max_lun;
64 unsigned int max_conn; 64 unsigned int max_conn;
65 unsigned int max_cmd_len; 65 unsigned int max_cmd_len;
66 struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, 66 struct iscsi_cls_session *(*create_session)
67 uint32_t initial_cmdsn); 67 (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid);
68 void (*destroy_session) (struct Scsi_Host *shost); 68 void (*destroy_session) (struct iscsi_cls_session *session);
69 struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, 69 struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
70 uint32_t cid); 70 uint32_t cid);
71 int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, 71 int (*bind_conn) (struct iscsi_cls_session *session,
72 struct iscsi_cls_conn *cls_conn,
72 uint32_t transport_fd, int is_leading); 73 uint32_t transport_fd, int is_leading);
73 int (*start_conn) (iscsi_connh_t conn); 74 int (*start_conn) (struct iscsi_cls_conn *conn);
74 void (*stop_conn) (iscsi_connh_t conn, int flag); 75 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag);
75 void (*destroy_conn) (struct iscsi_cls_conn *conn); 76 void (*destroy_conn) (struct iscsi_cls_conn *conn);
76 int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, 77 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
77 uint32_t value); 78 uint32_t value);
78 int (*get_conn_param) (void *conndata, enum iscsi_param param, 79 int (*get_conn_param) (struct iscsi_cls_conn *conn,
80 enum iscsi_param param,
79 uint32_t *value); 81 uint32_t *value);
80 int (*get_session_param) (struct Scsi_Host *shost, 82 int (*get_session_param) (struct iscsi_cls_session *session,
81 enum iscsi_param param, uint32_t *value); 83 enum iscsi_param param, uint32_t *value);
82 int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, 84 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
83 char *data, uint32_t data_size); 85 char *data, uint32_t data_size);
84 void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); 86 void (*get_stats) (struct iscsi_cls_conn *conn,
87 struct iscsi_stats *stats);
85}; 88};
86 89
87/* 90/*
@@ -93,15 +96,14 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt);
93/* 96/*
94 * control plane upcalls 97 * control plane upcalls
95 */ 98 */
96extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); 99extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
97extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, 100extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
98 char *data, uint32_t data_size); 101 char *data, uint32_t data_size);
99 102
100struct iscsi_cls_conn { 103struct iscsi_cls_conn {
101 struct list_head conn_list; /* item in connlist */ 104 struct list_head conn_list; /* item in connlist */
102 void *dd_data; /* LLD private data */ 105 void *dd_data; /* LLD private data */
103 struct iscsi_transport *transport; 106 struct iscsi_transport *transport;
104 iscsi_connh_t connh;
105 int active; /* must be accessed with the connlock */ 107 int active; /* must be accessed with the connlock */
106 struct device dev; /* sysfs transport/container device */ 108 struct device dev; /* sysfs transport/container device */
107 struct mempool_zone *z_error; 109 struct mempool_zone *z_error;
@@ -113,7 +115,7 @@ struct iscsi_cls_conn {
113 container_of(_dev, struct iscsi_cls_conn, dev) 115 container_of(_dev, struct iscsi_cls_conn, dev)
114 116
115struct iscsi_cls_session { 117struct iscsi_cls_session {
116 struct list_head list; /* item in session_list */ 118 struct list_head sess_list; /* item in session_list */
117 struct iscsi_transport *transport; 119 struct iscsi_transport *transport;
118 struct device dev; /* sysfs transport/container device */ 120 struct device dev; /* sysfs transport/container device */
119}; 121};